Here are the examples of the python api shutil.rmtree taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
160 Examples
0
Example 51
Project: cstar_perf Source File: stress_compare.py
def stress_compare(revisions,
title,
log,
operations = [],
subtitle = '',
capture_fincore=False,
initial_destroy=True,
leave_data=False,
keep_page_cache=False,
git_fetch_before_test=True,
bootstrap_before_test=True,
teardown_after_test=True
):
"""
Run Stress on multiple C* branches and compare them.
revisions - List of dictionaries that contain cluster configurations
to trial. This is combined with the default config.
title - The title of the comparison
subtitle - A subtitle for more information (displayed smaller underneath)
log - The json file path to record stats to
operations - List of dictionaries indicating the operations. Example:
[# cassandra-stress command, node defaults to cluster defined 'stress_node'
{'type': 'stress',
'command': 'write n=19M -rate threads=50',
'node': 'node1',
'wait_for_compaction': True},
# nodetool command to run in parallel on nodes:
{'type': 'nodetool',
'command': 'decomission',
'nodes': ['node1','node2']},
# cqlsh script, node defaults to cluster defined 'stress_node'
{'type': 'cqlsh',
'script': "use my_ks; INSERT INTO blah (col1, col2) VALUES (val1, val2);",
'node': 'node1'}
]
capture_fincore - Enables capturing of linux-fincore logs of C* data files.
initial_destroy - Destroy all data before the first revision is run.
leave_data - Whether to leave the Cassandra data/commitlog/etc directories intact between revisions.
keep_page_cache - Whether to leave the linux page cache intact between revisions.
git_fetch_before_test (bool): If True, will update the cassandra.git with fab_common.git_repos
bootstrap_before_test (bool): If True, will bootstrap DSE / C* before running the operations
teardown_after_test (bool): If True, will shutdown DSE / C* after all of the operations
"""
validate_revisions_list(revisions)
validate_operations_list(operations)
pristine_config = copy.copy(fab_config)
# initial_destroy and git_fetch_before_test can be set in the job configuration,
# or manually in the call to this function.
# Either is fine, but they shouldn't conflict. If they do, a ValueError is raised.
initial_destroy = get_bool_if_method_and_config_values_do_not_conflict('initial_destroy',
initial_destroy,
pristine_config,
method_name='stress_compare')
if initial_destroy:
logger.info("Cleaning up from prior runs of stress_compare ...")
teardown(destroy=True, leave_data=False)
# https://datastax.jira.com/browse/CSTAR-633
git_fetch_before_test = get_bool_if_method_and_config_values_do_not_conflict('git_fetch_before_test',
git_fetch_before_test,
pristine_config,
method_name='stress_compare')
stress_shas = maybe_update_cassandra_git_and_setup_stress(operations, git_fetch=git_fetch_before_test)
# Flamegraph Setup
if flamegraph.is_enabled():
execute(flamegraph.setup)
with GracefulTerminationHandler() as handler:
for rev_num, revision_config in enumerate(revisions):
config = copy.copy(pristine_config)
config.update(revision_config)
revision = revision_config['revision']
config['log'] = log
config['title'] = title
config['subtitle'] = subtitle
product = dse if config.get('product') == 'dse' else cstar
# leave_data, bootstrap_before_test, and teardown_after_test can be set in the job configuration,
# or manually in the call to this function.
# Either is fine, but they shouldn't conflict. If they do, a ValueError is raised.
leave_data = get_bool_if_method_and_config_values_do_not_conflict('leave_data',
leave_data,
revision_config,
method_name='stress_compare')
# https://datastax.jira.com/browse/CSTAR-638
bootstrap_before_test = get_bool_if_method_and_config_values_do_not_conflict('bootstrap_before_test',
bootstrap_before_test,
revision_config,
method_name='stress_compare')
# https://datastax.jira.com/browse/CSTAR-639
teardown_after_test = get_bool_if_method_and_config_values_do_not_conflict('teardown_after_test',
teardown_after_test,
revision_config,
method_name='stress_compare')
logger.info("Bringing up {revision} cluster...".format(revision=revision))
# Drop the page cache between each revision, especially
# important when leave_data=True :
if not keep_page_cache:
drop_page_cache()
# Only fetch from git on the first run and if git_fetch_before_test is True
git_fetch_before_bootstrap = True if rev_num == 0 and git_fetch_before_test else False
if bootstrap_before_test:
revision_config['git_id'] = git_id = bootstrap(config,
destroy=initial_destroy,
leave_data=leave_data,
git_fetch=git_fetch_before_bootstrap)
else:
revision_config['git_id'] = git_id = config['revision']
if flamegraph.is_enabled(revision_config):
execute(flamegraph.ensure_stopped_perf_agent)
execute(flamegraph.start_perf_agent, rev_num)
if capture_fincore:
start_fincore_capture(interval=10)
last_stress_operation_id = 'None'
for operation_i, operation in enumerate(operations, 1):
try:
start = datetime.datetime.now()
stats = {
"id": str(uuid.uuid1()),
"type": operation['type'],
"revision": revision,
"git_id": git_id,
"start_date": start.isoformat(),
"label": revision_config.get('label', revision_config['revision']),
"test": '{operation_i}_{operation}'.format(
operation_i=operation_i,
operation=operation['type'])
}
if operation['type'] == 'stress':
last_stress_operation_id = stats['id']
# Default to all the nodes of the cluster if no
# nodes were specified in the command:
if operation.has_key('nodes'):
cmd = "{command} -node {hosts}".format(
command=operation['command'],
hosts=",".join(operation['nodes']))
elif '-node' in operation['command']:
cmd = operation['command']
else:
cmd = "{command} -node {hosts}".format(
command=operation['command'],
hosts=",".join([n for n in fab_config['hosts']]))
stats['command'] = cmd
stats['intervals'] = []
stats['test'] = '{operation_i}_{operation}'.format(
operation_i=operation_i, operation=cmd.strip().split(' ')[0]).replace(" ", "_")
logger.info('Running stress operation : {cmd} ...'.format(cmd=cmd))
# Run stress:
# (stress takes the stats as a parameter, and adds
# more as it runs):
stress_sha = stress_shas[operation.get('stress_revision', 'default')]
stats = stress(cmd, revision, stress_sha, stats=stats)
# Wait for all compactions to finish (unless disabled):
if operation.get('wait_for_compaction', True):
compaction_throughput = revision_config.get("compaction_throughput_mb_per_sec", 16)
wait_for_compaction(compaction_throughput=compaction_throughput)
elif operation['type'] == 'nodetool':
if 'nodes' not in operation:
operation['nodes'] = 'all'
if operation['nodes'] in ['all','ALL']:
nodes = [n for n in fab_config['hosts']]
else:
nodes = operation['nodes']
set_nodetool_path(os.path.join(product.get_bin_path(), 'nodetool'))
logger.info("Running nodetool on {nodes} with command: {command}".format(nodes=operation['nodes'], command=operation['command']))
stats['command'] = operation['command']
output = nodetool_multi(nodes, operation['command'])
stats['output'] = output
logger.info("Nodetool command finished on all nodes")
elif operation['type'] == 'cqlsh':
logger.info("Running cqlsh commands on {node}".format(node=operation['node']))
set_cqlsh_path(os.path.join(product.get_bin_path(), 'cqlsh'))
output = cqlsh(operation['script'], operation['node'])
stats['output'] = output.split("\n")
stats['command'] = operation['script']
logger.info("Cqlsh commands finished")
elif operation['type'] == 'bash':
nodes = operation.get('nodes', [n for n in fab_config['hosts']])
logger.info("Running bash commands on: {nodes}".format(nodes=nodes))
stats['output'] = bash(operation['script'], nodes)
stats['command'] = operation['script']
logger.info("Bash commands finished")
elif operation['type'] == 'spark_cassandra_stress':
nodes = operation.get('nodes', [n for n in fab_config['hosts']])
stress_node = config.get('stress_node', None)
# Note: once we have https://datastax.jira.com/browse/CSTAR-617, we should fix this to use
# client-tool when DSE_VERSION >= 4.8.0
# https://datastax.jira.com/browse/DSP-6025: dse client-tool
master_regex = re.compile(r"(.|\n)*(?P<master>spark:\/\/\d+.\d+.\d+.\d+:\d+)(.|\n)*")
master_out = dsetool_cmd(nodes[0], options='sparkmaster')[nodes[0]]
master_match = master_regex.match(master_out)
if not master_match:
raise ValueError('Could not find master address from "dsetool sparkmaster" cmd\n'
'Found output: {f}'.format(f=master_out))
master_string = master_match.group('master')
build_spark_cassandra_stress = bool(distutils.util.strtobool(
str(operation.get('build_spark_cassandra_stress', 'True'))))
remove_existing_spark_data = bool(distutils.util.strtobool(
str(operation.get('remove_existing_spark_data', 'True'))))
logger.info("Running spark_cassandra_stress on {stress_node} "
"using spark.cassandra.connection.host={node} and "
"spark-master {master}".format(stress_node=stress_node,
node=nodes[0],
master=master_string))
output = spark_cassandra_stress(operation['script'], nodes, stress_node=stress_node,
master=master_string,
build_spark_cassandra_stress=build_spark_cassandra_stress,
remove_existing_spark_data=remove_existing_spark_data)
stats['output'] = output.get('output', 'No output captured')
stats['spark_cass_stress_time_in_seconds'] = output.get('stats', {}).get('TimeInSeconds', 'No time captured')
stats['spark_cass_stress_ops_per_second'] = output.get('stats', {}).get('OpsPerSecond', 'No ops/s captured')
logger.info("spark_cassandra_stress finished")
elif operation['type'] == 'ctool':
logger.info("Running ctool with parameters: {command}".format(command=operation['command']))
ctool = Ctool(operation['command'], common.config)
output = execute(ctool.run)
stats['output'] = output
logger.info("ctool finished")
elif operation['type'] == 'dsetool':
if 'nodes' not in operation:
operation['nodes'] = 'all'
if operation['nodes'] in ['all','ALL']:
nodes = [n for n in fab_config['hosts']]
else:
nodes = operation['nodes']
dsetool_options = operation['script']
logger.info("Running dsetool {command} on {nodes}".format(nodes=operation['nodes'], command=dsetool_options))
stats['command'] = dsetool_options
output = dsetool_cmd(nodes=nodes, options=dsetool_options)
stats['output'] = output
logger.info("dsetool command finished on all nodes")
elif operation['type'] == 'dse':
logger.info("Running dse command on {node}".format(node=operation['node']))
output = dse_cmd(node=operation['node'], options=operation['script'])
stats['output'] = output.split("\n")
stats['command'] = operation['script']
logger.info("dse commands finished")
end = datetime.datetime.now()
stats['end_date'] = end.isoformat()
stats['op_duration'] = str(end - start)
log_stats(stats, file=log)
finally:
# Copy node logs:
retrieve_logs_and_create_tarball(job_id=stats['id'])
revision_config['last_log'] = stats['id']
if capture_fincore:
stop_fincore_capture()
log_dir = os.path.join(CSTAR_PERF_LOGS_DIR, stats['id'])
retrieve_fincore_logs(log_dir)
# Restart fincore capture if this is not the last
# operation:
if operation_i < len(operations):
start_fincore_capture(interval=10)
if flamegraph.is_enabled(revision_config):
# Generate and Copy node flamegraphs
execute(flamegraph.stop_perf_agent)
execute(flamegraph.generate_flamegraph, rev_num)
flamegraph_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf', 'flamegraph')
flamegraph_test_dir = os.path.join(flamegraph_dir, last_stress_operation_id)
retrieve_flamegraph(flamegraph_test_dir, rev_num+1)
sh.tar('cfvz', "{}.tar.gz".format(stats['id']), last_stress_operation_id, _cwd=flamegraph_dir)
shutil.rmtree(flamegraph_test_dir)
log_add_data(log, {'title':title,
'subtitle': subtitle,
'revisions': revisions})
if teardown_after_test:
if revisions[-1].get('leave_data', leave_data):
teardown(destroy=False, leave_data=True)
else:
kill_delay = 300 if profiler.yourkit_is_enabled(revision_config) else 0
teardown(destroy=True, leave_data=False, kill_delay=kill_delay)
if profiler.yourkit_is_enabled(revision_config):
yourkit_config = profiler.yourkit_get_config()
yourkit_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf', 'yourkit')
yourkit_test_dir = os.path.join(yourkit_dir, last_stress_operation_id)
retrieve_yourkit(yourkit_test_dir, rev_num+1)
sh.tar('cfvz', "{}.tar.gz".format(stats['id']),
last_stress_operation_id, _cwd=yourkit_dir)
shutil.rmtree(yourkit_test_dir)
0
Example 52
Project: akvo-rsr Source File: test_iati_org_export.py
def test_complete_org_export(self):
"""
Test the export of a fully filled organisation.
"""
# Create organisation
organisation = Organisation.objects.create(
name="Test Org Export",
long_name="Test Organisation for IATI org export",
iati_org_id="NL-KVK-OrgExport",
can_create_projects=True,
new_organisation_type=22,
logo=SimpleUploadedFile(
name='test_image.jpg',
content=open(self.image_path, 'rb').read(),
content_type='image/jpeg'
),
)
# Add organisation docuement, with category and country
doc = OrganisationDocuement.objects.create(
organisation=organisation,
docuement=SimpleUploadedFile(
name='test_image.jpg',
content=open(self.image_path, 'rb').read(),
content_type='image/jpeg'
),
format="application/jpg",
title="Title",
title_language="en",
language="en",
docuement_date=datetime.date.today(),
)
OrganisationDocuementCategory.objects.create(
docuement=doc,
category="A1",
)
OrganisationDocuementCountry.objects.create(
docuement=doc,
country="NL",
text="The Netherlands",
)
# Add total expenditure, with expense line
expenditure = OrganisationTotalExpenditure.objects.create(
organisation=organisation,
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
value=1,
value_date=datetime.date.today(),
currency="EUR",
)
OrganisationExpenseLine.objects.create(
expenditure=expenditure,
reference="ref",
value=1,
currency="EUR",
value_date=datetime.date.today(),
text="Text",
)
# Add total budget, with budget line
budget = OrganisationTotalBudget.objects.create(
organisation=organisation,
status="1",
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
value=1,
value_date=datetime.date.today(),
currency="EUR",
)
OrganisationTotalBudgetLine.objects.create(
budget=budget,
reference="ref",
value=1,
currency="EUR",
value_date=datetime.date.today(),
text="Text",
)
# Add recipient country budget
country_budget = OrganisationCountryBudget.objects.create(
organisation=organisation,
status="1",
country="NL",
text="The Netherlands",
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
value=1,
value_date=datetime.date.today(),
currency="EUR",
)
OrganisationCountryBudgetLine.objects.create(
budget=country_budget,
reference="ref",
value=1,
currency="EUR",
value_date=datetime.date.today(),
text="Text",
)
# Add recipient region budget
region_budget = OrganisationRegionBudget.objects.create(
organisation=organisation,
status="1",
region="110",
region_vocabulary="1",
region_vocabulary_uri="http://akvo.org/",
text="Region",
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
value=1,
value_date=datetime.date.today(),
currency="EUR",
)
OrganisationRegionBudgetLine.objects.create(
budget=region_budget,
reference="ref",
value=1,
currency="EUR",
value_date=datetime.date.today(),
text="Text",
)
# Add recipient org budget
org_budget = OrganisationRecipientOrgBudget.objects.create(
organisation=organisation,
status="1",
recipient_organisation=organisation,
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
value=1,
value_date=datetime.date.today(),
currency="EUR",
)
OrganisationRecipientOrgBudgetLine.objects.create(
budget=org_budget,
reference="ref",
value=1,
currency="EUR",
value_date=datetime.date.today(),
text="Text",
)
# Remove folder
media_root = '/var/akvo/rsr/mediaroot/'
directory = 'db/org/{}/iati-org/'.format(str(organisation.pk))
if os.path.exists(media_root + directory):
shutil.rmtree(media_root + directory)
# Run IATI export
iati_org_export = IatiOrgXML(self.request, [organisation])
iati_org_export.save_file(str(organisation.pk), 'test-org-iati.xml')
# In order to easily access the XML file, generate the IATI file again
iati_org_xml = etree.tostring(iati_org_export.iati_organisations)
# Perform checks on IATI XML file
root_test = self.assertXmlDocuement(iati_org_xml)
self.assertXmlNode(root_test, tag="iati-organisations")
0
Example 53
Project: cgat Source File: runZinba.py
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-f", "--input-format", dest="input_format",
type="choice",
choices=("bed", "bam"),
help="input file format [default=%default].")
parser.add_option("-s", "--fragment-size", dest="fragment_size",
type="int",
help="fragment size, used for the extension parameter "
"in Zinba [default=%default].")
parser.add_option("-m", "--zinba-mappability-dir", dest="mappability_dir",
type="string",
help="mappability_dir [default=%default].")
parser.add_option("-b", "--bit-file", dest="bit_filename",
type="string",
help="2bit genome filename [default=%default].")
parser.add_option("-c", "--control-filename", dest="control_filename",
type="string",
help="filename of input/control data in bed format "
"[default=%default].")
parser.add_option("-i", "--zinba-index-dir", dest="index_dir", type="string",
help="index directory [default=%default].")
parser.add_option("-t", "--threads", dest="threads", type="int",
help="number of threads to use [default=%default].")
parser.add_option("-q", "--fdr-threshold", dest="fdr_threshold",
type="float",
help="fdr threshold [default=%default].")
parser.add_option("-a", "--zinba-alignability-threshold",
dest="alignability_threshold", type="int",
help="alignability threshold [default=%default].")
parser.add_option("-p", "--aggregate-by-contig", dest="per_contig",
action="store_true",
help="run analysis per chromosome [default=%default]")
parser.add_option("-w", "--temp-dir", dest="tempdir", type="string",
help="use existing directory as temporary directory "
"[default=%default].")
parser.add_option("--keep-temp", dest="keep_temp", action="store_true",
help="keep temporary directory [default=%default]")
parser.add_option("--action", dest="action", type="choice",
choices=("full", "count", "predict", "model"),
help="action to perform [default=%default]")
parser.add_option("--zinba-improvement", dest="improvement", type="float",
help="relative improvement of likelihood until "
"convergence [default=%default]")
parser.add_option("--min-insert-size", dest="min_insert_size", type="int",
help="minimum insert size for paired end data "
"[default=%default]")
parser.add_option("--max-insert-size", dest="max_insert_size", type="int",
help="maximum insert size for paired end data "
"[default=%default]")
parser.set_defaults(
input_format="bed",
fragment_size=200,
mappability_dir=None,
threads=1,
alignability_threshold=1,
bit_filename=None,
fdr_threshold=0.05,
tempdir=None,
winsize=250,
offset=125,
cnvWinSize=1e+05,
cnvOffset=2500,
per_contig=False,
keep_temp=False,
min_insert_size=0,
max_insert_size=1000,
filelist="files.list",
selectchr="chr19",
action="full",
improvement=0.00001,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(args) != 2:
raise ValueError(
"please specify a filename with sample data and an output file")
filename_sample, filename_output = args[0], args[1]
filename_sample = os.path.abspath(filename_sample)
filename_output = os.path.abspath(filename_output)
if options.control_filename:
filename_control = os.path.abspath(options.control_filename)
else:
filename_control = None
# load Zinba
R.library('zinba')
if not options.tempdir:
tmpdir = tempfile.mkdtemp()
else:
tmpdir = options.tempdir
E.info("changing to temporary directory %s" % tmpdir)
os.chdir(tmpdir)
if options.input_format == "bam":
E.info("converting bam files to bed")
if not os.path.exists(os.path.join(tmpdir, "sample.bed")):
filename_sample = bamToBed(
filename_sample,
os.path.join(tmpdir, "sample.bed"))
else:
E.info("using existing file %(tmpdir)s/sample.bed" %
locals())
filename_sample = os.path.join(
tmpdir, "sample.bed")
if filename_control:
if not os.path.exists(os.path.join(tmpdir, "control.bed")):
filename_control = bamToBed(
filename_control,
os.path.join(tmpdir, "control.bed"))
else:
E.info("using existing file %(tmpdir)s/control.bed" %
locals())
filename_control = os.path.join(
tmpdir, "control.bed")
fragment_size = options.fragment_size
threads = options.threads
bit_filename = options.bit_filename
mappability_dir = options.mappability_dir
fdr_threshold = options.fdr_threshold
tol = options.improvement
contigs = E.run(
"twoBitInfo %(bit_filename)s %(tmpdir)s/contig_sizes" % locals())
contig2size = dict(
[x.split() for x in IOTools.openFile(
os.path.join(tmpdir, "contig_sizes"))])
outdir = filename_output + "_files"
E.info('saving intermediate results in %s' % outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
filelist = os.path.join(outdir, filename_output + ".list")
modelfile = os.path.join(outdir, filename_output + ".model")
winfile = os.path.join(outdir, filename_output + ".wins")
winSize = 250
offset = 125
cnvWinSize = 100000
cnvOffset = 0
winGap = 0
peakconfidence = 1.0 - fdr_threshold
selectchr = options.selectchr
if not os.path.exists(os.path.join(tmpdir, "basecount")):
E.info("computing counts")
R('''basealigncount(inputfile='%(filename_sample)s',
outputfile='%(tmpdir)s/basecount',
extension=%(fragment_size)i,
filetype='bed',
twoBitFile='%(bit_filename)s' )
''' % locals())
else:
E.info("using existing counts")
# tried incremental updates
# for contig, size in contig2size.iteritems():
# for size in
# fn = os.path.join( tmpdir, "sample_%(contig)s_win%(size)ibp_offset(offset)ibp.txt" % locals() )
if options.action == "count":
E.info("computing window counts only - saving results in %s" % outdir)
R('''buildwindowdata(
seq='%(filename_sample)s',
align='%(mappability_dir)s',
input='%(filename_control)s',
twoBit='%(bit_filename)s',
winSize=%(winSize)i,
offset=%(offset)i,
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
filelist='%(filelist)s',
filetype='bed',
extension=%(fragment_size)s,
outdir='%(outdir)s/') ''' % locals())
elif options.action == "model":
# The important option is buildwin = 0
# parameterized for broad == FALSE and input present
# see zinba.R
# model selection only on chr19.
R('''run.zinba(
filelist='%(filelist)s',
formula=NULL,formulaE=NULL,formulaZ=NULL,
outfile='%(filename_output)s',
seq='%(filename_sample)s',
input='%(filename_control)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
extension=%(fragment_size)s,
winSize=%(winSize)i,
offset=%(offset)i,
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
basecountfile='%(tmpdir)s/basecount',
buildwin=0,
threshold=%(fdr_threshold)f,
pquant=1,
peakconfidence=%(peakconfidence)f,
winGap=%(winGap)i,
tol=%(tol)f,
initmethod="count",
method="mixture",
numProc=%(threads)i,
printFullOut=1,
interaction=FALSE,
selectmodel=TRUE,
selectchr='%(selectchr)s',
selectcovs=c("input_count"),
selecttype="complete",
FDR=TRUE)''' % locals())
elif options.action == "predict":
# The important option is buildwin = 0 and selectmodel = FALSE
# parameterized for broad == FALSE and input present
# see zinba.R
# model selection only on chr19.
if not os.path.exists(modelfile):
raise OSError("model file %s does not exist" % modelfile)
E.info("reading model from %s" % modelfile)
R('''
final=read.table('%(modelfile)s', header=T, sep="\t")
final=final[final$fail==0,]
bestBIC=which.min(final$BIC)
formula=as.formula(paste("exp_count~",final$formula[bestBIC]))
formulaE=as.formula(paste("exp_count~",final$formulaE[bestBIC]))
formulaZ=as.formula(paste("exp_count~",final$formulaZ[bestBIC]))
cat("Background formula is:\n\t")
print(formula)
cat("Enrichment formula is:\n\t")
print(formulaE)
cat("Zero-inflated formula is:\n\t")
print(formulaE)
''' % locals())
E.info("predicting peaks")
R('''run.zinba(
filelist='%(filelist)s',
outfile='%(filename_output)s',
seq='%(filename_sample)s',
input='%(filename_control)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
extension=%(fragment_size)s,
winSize=%(winSize)i,
offset=%(offset)i,
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
basecountfile='%(tmpdir)s/basecount',
buildwin=0,
threshold=%(fdr_threshold)f,
pquant=1,
winGap=%(winGap)i,
initmethod="count",
selectchr='%(selectchr)s',
tol=%(tol)f,
method="mixture",
numProc=%(threads)i,
printFullOut=1,
interaction=FALSE,
selectmodel=FALSE,
formula=formula,
formulaE=formulaE,
formulaZ=formulaZ,
peakconfidence=%(peakconfidence)f,
FDR=TRUE)''' % locals())
elif options.action == "per_contig":
E.info("processing per chromosome")
for contig, size in contig2size.items():
if contig not in ("chr16",):
continue
E.info("processing contig %s" % contig)
filename_sample_contig = filename_sample + "_%s" % contig
filename_control_contig = filename_control + "_%s" % contig
if not os.path.exists(filename_output + "_files"):
os.mkdir(filename_output + "_files")
filename_output_contig = os.path.join(
filename_output + "_files", contig)
filename_basecounts_contig = os.path.join(
tmpdir, "basecount_%s" % contig)
E.run(
"grep %(contig)s < %(filename_sample)s > %(filename_sample_contig)s" % locals())
E.run(
"grep %(contig)s < %(filename_control)s > %(filename_control_contig)s" % locals())
if not os.path.exists(filename_basecounts_contig):
E.info("computing counts")
R('''basealigncount( inputfile='%(filename_sample_contig)s',
outputfile='%(filename_basecounts_contig)s',
extension=%(fragment_size)i,
filetype='bed',
twoBitFile='%(bit_filename)s' )
''' % locals())
else:
E.info("using existing counts")
# run zinba, do not build window data
R('''zinba( refinepeaks=1,
seq='%(filename_sample_contig)s',
input='%(filename_control_contig)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
outfile='%(filename_output_contig)s',
extension=%(fragment_size)s,
basecountfile='%(filename_basecounts_contig)s',
numProc=%(threads)i,
threshold=%(fdr_threshold)f,
broad=FALSE,
printFullOut=0,
interaction=FALSE,
mode='peaks',
FDR=TRUE) ''' % locals())
elif options.action == "full":
# run zinba, build window data and refine peaks
# Note that zinba() uses 'chr22' to select model
# which is not present in mouse. So call run.zinba
# directly.
R('''run.zinba(
refinepeaks=1,
buildwin=1,
seq='%(filename_sample)s',
input='%(filename_control)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
outfile='%(filename_output)s',
extension=%(fragment_size)s,
winSize=%(winSize)i,
offset=%(offset)i,
basecountfile='%(tmpdir)s/basecount',
numProc=%(threads)i,
threshold=%(fdr_threshold)f,
pquant=1,
winGap=%(winGap)i,
selectchr='%(selectchr)s',
interaction=FALSE,
method="mixture",
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
selectmodel=TRUE,
selectcovs=c("input_count"),
selecttype="complete",
initmethod="count",
printFullOut=1,
diff=0,
pWinSize=200,
peakconfidence=%(peakconfidence)f,
FDR=TRUE) ''' % locals())
if not (options.tempdir or options.keep_temp):
shutil.rmtree(tmpdir)
# write footer and output benchmark information.
E.Stop()
0
Example 54
Project: Bluto Source File: output.py
def action_output_vuln_zone(google_results, bing_results, linkedin_results, time_spent_email, time_spent_total, clean_dump, sub_intrest, domain, report_location, company, data_mine):
info('Output action_output_vuln_zone: Start')
linkedin_evidence_results = []
email_evidence_results = []
email_results = []
email_seen = []
url_seen = []
person_seen = []
final_emails = []
for email, url in google_results:
try:
e1, e2 = email.split(',')
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(e2).replace(' ',''),url))
email_evidence_results.append((str(e1).replace(' ',''),url))
email_results.append((str(e2).replace(' ','')))
email_results.append((str(e1).replace(' ','')))
except ValueError:
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(email).replace(' ',''),url))
email_results.append(str(email).replace(' ',''))
for e, u in bing_results:
email_results.append(e)
if u not in url_seen:
email_evidence_results.append((e, u))
for url, person, description in linkedin_results:
if person not in person_seen:
person_seen.append(person)
linkedin_evidence_results.append((url, person, description))
linkedin_evidence_results.sort(key=lambda tup: tup[1])
sorted_email = set(sorted(email_results))
for email in sorted_email:
if email == '[]':
pass
elif email == '@' + domain:
pass
else:
final_emails.append(email)
email_count = len(final_emails)
staff_count = len(person_seen)
f_emails = sorted(final_emails)
pwned_results = action_pwned(f_emails)
c_accounts = len(pwned_results)
print '\n\nEmail Addresses:\n'
write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine)
if f_emails:
for email in f_emails:
print str(email).replace("u'","").replace("'","").replace('[','').replace(']','')
else:
print '\tNo Data To Be Found'
print '\nCompromised Accounts:\n'
if pwned_results:
sorted_pwned = sorted(pwned_results)
for account in sorted_pwned:
print 'Account: \t{}'.format(account[0])
print 'Domain: \t{}'.format(account[1])
print 'Date: \t{}\n'.format(account[3])
else:
print '\tNo Data To Be Found'
print '\nLinkedIn Results:\n'
sorted_person = sorted(person_seen)
if sorted_person:
for person in sorted_person:
print person
else:
print '\tNo Data To Be Found'
if data_mine is not None:
user_names = data_mine[0]
software_list = data_mine[1]
download_count = data_mine[2]
download_list = data_mine[3]
username_count = len(user_names)
software_count = len(software_list)
print '\nData Found In Docuement MetaData'
print '\nPotential Usernames:\n'
if user_names:
for user in user_names:
print '\t' + colored(user, 'red')
else:
print '\tNo Data To Be Found'
print '\nSoftware And Versions Found:\n'
if software_list:
for software in software_list:
print '\t' + colored(software, 'red')
else:
print '\tNo Data To Be Found'
else:
user_names = []
software_list = []
download_count = 0
username_count = len(user_names)
software_count = len(software_list)
target_dict = dict((x.split(' ') for x in clean_dump))
clean_target = collections.OrderedDict(sorted(target_dict.items()))
print "\nProcessed Dump\n"
bruted_count = len(clean_target)
for item in clean_target:
if item in sub_intrest:
print colored(item, 'red'), colored("\t" + clean_target[item], 'red')
else:
print item, "\t" + target_dict[item]
time_spent_email_f = str(datetime.timedelta(seconds=(time_spent_email))).split('.')[0]
time_spent_total_f = str(datetime.timedelta(seconds=(time_spent_total))).split('.')[0]
print '\nHosts Identified: {}' .format(str(bruted_count))
print 'Potential Emails Found: {}' .format(str(email_count))
print 'Potential Staff Members Found: {}' .format(str(staff_count))
print 'Compromised Accounts: {}' .format(str(c_accounts))
print 'Potential Usernames Found: {}'.format(username_count)
print 'Potential Software Found: {}'.format(software_count)
print 'Docuements Downloaded: {}'.format(download_count)
print "Email Enumeration:", time_spent_email_f
print "Total Time:", time_spent_total_f
info('Hosts Identified: {}' .format(str(bruted_count)))
info("Total Time:" .format(str(time_spent_total_f)))
info("Email Enumeration: {}" .format(str(time_spent_email_f)))
info('Compromised Accounts: {}' .format(str(c_accounts)))
info('Potential Usernames Found: {}'.format(username_count))
info('Potential Software Found: {}'.format(software_count))
info('Docuements Downloaded: {}'.format(download_count))
info('Potential Staff Members Found: {}' .format(str(staff_count)))
info('Potential Emails Found: {}' .format(str(email_count)))
info('DNS Vuln Run completed')
info('Output action_output_vuln_zone: Complete')
domain_r = domain.split('.')
docs = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
answers = ['no','n','y','yes']
while True:
answer = raw_input("\nWould you like to keep all local data?\n(Local Logs, Downloded Docuements, HTML Evidence Report)\n\nYes|No:").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
domain
print '\nThe docuements are located here: {}'.format(docs)
print 'The logs are located here: {}.'.format(LOG_DIR)
print "\nAn evidence report has been written to {}\n".format(report_location)
while True:
answer = raw_input("Would you like to open this report now? ").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
print '\nOpening {}' .format(report_location)
webbrowser.open('file://' + str(report_location))
break
else:
break
else:
print 'Your answer needs to be either yes|y|no|n rather than, {}' .format(answer)
break
else:
shutil.rmtree(docs)
shutil.rmtree(LOG_DIR)
os.remove(report_location)
break
else:
print '\tYour answer needs to be either yes|y|no|n rather than, {}' .format(answer)
0
Example 55
Project: Customizer Source File: rebuild.py
def main():
common.check_filesystem()
# Basic sanity checks of files and paths that absolutely need to exist.
message.sub_info('Doing sanity checks')
lsb_file = misc.join_paths(config.FILESYSTEM_DIR, 'etc/lsb-release')
if not os.path.isfile(lsb_file):
raise(message.exception(lsb_file + ' does not exist'))
isolinux_dir = misc.join_paths(config.ISO_DIR, 'isolinux')
if not os.path.isdir(isolinux_dir):
raise(message.exception(isolinux_dir + ' does not exist'))
if misc.search_file('999:999', misc.join_paths(config.FILESYSTEM_DIR, 'etc/passwd')):
raise(message.exception('User with UID 999 exists, this means automatic login will fail'))
elif misc.search_file('999:999', misc.join_paths(config.FILESYSTEM_DIR, 'etc/group')):
raise(message.exception('Group with GID 999 exists, this means automatic login will fail'))
casper_dir = misc.join_paths(config.ISO_DIR, 'casper')
if not os.path.isdir(casper_dir):
message.sub_debug('Creating', casper_dir)
os.makedirs(casper_dir)
base_file = misc.join_paths(config.ISO_DIR, '.disk/base_installable')
if os.path.isfile(misc.join_paths(config.FILESYSTEM_DIR, 'usr/bin/ubiquity')):
if not os.path.isfile(base_file):
message.sub_debug('Creating', base_file)
misc.write_file(base_file, '')
elif os.path.isfile(base_file):
message.sub_debug('Removing', base_file)
os.unlink(base_file)
# Acquire distribution information from the FileSystem
message.sub_info('Gathering information')
arch = misc.chroot_exec(('dpkg', '--print-architecture'), prepare=False, \
mount=False, output=True)
distrib = common.get_value(config.FILESYSTEM_DIR + '/etc/lsb-release', \
'DISTRIB_ID=')
release = common.get_value(config.FILESYSTEM_DIR + '/etc/lsb-release', \
'DISTRIB_RELEASE=')
message.sub_debug('Architecture', arch)
message.sub_debug('Distribution (DISTRIB_ID)', distrib)
message.sub_debug('Release (DISTRIB_RELEASE)', release)
# Remove files, by name, that we know we must repopulate if they exist.
message.sub_info('Cleaning up')
cleanup_files = ['casper/filesystem.squashfs', 'casper/initrd.lz', \
'casper/vmlinuz', 'casper/vmlinuz.efi', 'casper/filesystem.manifest', \
'casper/filesystem.size']
cleanup_files.extend(glob.glob('.disk/casper-uuid-*'))
for sfile in cleanup_files:
full_file = misc.join_paths(config.ISO_DIR, sfile)
if os.path.exists(full_file):
message.sub_debug('Removing', full_file)
os.unlink(full_file)
# Define the checksum files, and the ISO filename.
md5sum_iso_file = misc.join_paths(config.WORK_DIR, 'md5sum')
sha1sum_iso_file = misc.join_paths(config.WORK_DIR, 'sha1sum')
sha256sum_iso_file = misc.join_paths(config.WORK_DIR, 'sha256sum')
iso_file = '%s/%s-%s-%s.iso' % (config.WORK_DIR, distrib, arch, release)
if os.path.exists(iso_file):
message.sub_debug('Removing', iso_file)
os.unlink(iso_file)
if os.path.exists(md5sum_iso_file):
message.sub_debug('Removing', md5sum_iso_file)
os.unlink(md5sum_iso_file)
if os.path.exists(sha1sum_iso_file):
message.sub_debug('Removing', sha1sum_iso_file)
os.unlink(sha1sum_iso_file)
if os.path.exists(sha256sum_iso_file):
message.sub_debug('Removing', sha256sum_iso_file)
os.unlink(sha256sum_iso_file)
# Detect files needed for booting, the kernel, initramfs, xen and anything else.
detect_boot()
if not vmlinuz:
message.sub_info('Re-installing kernel')
misc.chroot_exec(('apt-get', 'purge', '--yes', 'linux-image*', '-q'))
misc.chroot_exec(('apt-get', 'install', '--yes', \
'linux-image-generic', '-q'))
misc.chroot_exec(('apt-get', 'clean'))
else:
message.sub_info('Updating initramfs')
misc.chroot_exec(('update-initramfs', '-k', 'all', '-t', '-u'))
detect_boot()
if not initrd or not vmlinuz:
raise(message.exception('Missing boot file (initrd or vmlinuz)'))
else:
message.sub_info('Copying boot files')
message.sub_debug('Initrd', initrd)
message.sub_debug('Vmlinuz', vmlinuz)
misc.copy_file(initrd, misc.join_paths(config.ISO_DIR, 'casper/initrd.lz'))
# FIXME: extend to support grub
efi_boot_entry = False
isolinux_dir = config.ISO_DIR + '/isolinux'
if os.path.isdir(isolinux_dir):
for sfile in os.listdir(isolinux_dir):
if sfile.endswith('.cfg') and misc.search_file('vmlinuz.efi', isolinux_dir + '/' + sfile):
message.sub_debug('Found EFI entry in isolinux conf', sfile)
efi_boot_entry = True
if os.path.isdir(misc.join_paths(config.ISO_DIR, 'efi/boot')) or \
efi_boot_entry:
message.sub_debug('Copying EFI vmlinuz')
misc.copy_file(vmlinuz, misc.join_paths(config.ISO_DIR, \
'casper/vmlinuz.efi'))
os.link(misc.join_paths(config.ISO_DIR, \
'casper/vmlinuz.efi'), misc.join_paths(config.ISO_DIR, \
'casper/vmlinuz'))
# EFI Kernels are still loadable by grub, modern ISOs lack a bare vmlinuz.
# mkisofs/genisoimage -cache-inodes reuses hard linked inodes.
else:
misc.copy_file(vmlinuz, misc.join_paths(config.ISO_DIR, 'casper/vmlinuz'))
# We only need to copy the bare kernel if we're not using EFI at all.
# Copy optional boot-enablement packages onto the ISO, if found.
if mt86plus:
message.sub_debug('Memtest86+ kernel', mt86plus)
misc.copy_file(mt86plus, misc.join_paths(config.ISO_DIR, 'install/mt86plus'))
if xen_kernel:
message.sub_debug('Xen kernel', xen_kernel)
misc.copy_file(xen_kernel, \
misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(xen_kernel)))
if xen_efi:
message.sub_debug('Xen EFI kernel', xen_efi)
misc.copy_file(xen_efi, \
misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(xen_efi)))
if ipxe_kernel:
message.sub_debug('iPXE kernel', ipxe_kernel)
misc.copy_file(ipxe_kernel, \
misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(ipxe_kernel)))
if ipxe_efi:
message.sub_debug('iPXE EFI kernel', ipxe_efi)
misc.copy_file(ipxe_efi, \
misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(ipxe_efi)))
message.sub_info('Extracting casper UUID')
confdir = config.FILESYSTEM_DIR + '/conf'
if os.path.isdir(confdir):
shutil.rmtree(confdir)
os.makedirs(confdir)
try:
misc.chroot_exec('zcat ' + initrd.replace(config.FILESYSTEM_DIR, '') + ' | cpio --quiet -id conf/uuid.conf', \
shell=True, cwd=config.FILESYSTEM_DIR)
kernel = re.search('initrd.img-*.*.*-*-(.*)', initrd).group(1)
message.sub_debug('Kernel', kernel)
misc.copy_file(confdir + '/uuid.conf', misc.join_paths(config.ISO_DIR, \
'.disk/casper-uuid-' + kernel))
finally:
shutil.rmtree(confdir)
# Define some default compression parameters, including a 1MB blocksize for all compressors.
compression_parameters = ('-b', '1048576', '-comp', config.COMPRESSION)
if config.COMPRESSION == 'xz': # Append additional compression parameters for xz.
# Using the branch-call-jump filter provides a compression boost with executable code.
# This can save a hundred megabytes easily, on an 800MB ISO. The dictionary size must
# match the block size, and it's advisable to use larger block sizes, like 1MB or 4MB.
compression_parameters += ('-Xbcj', 'x86', '-Xdict-size', '100%')
message.sub_info('SquashFS Compression parameters', compression_parameters)
# Create the compressed filesystem
message.sub_info('Creating SquashFS compressed filesystem')
make_squash_fs = ('mksquashfs', config.FILESYSTEM_DIR, \
misc.join_paths(config.ISO_DIR, 'casper/filesystem.squashfs'), \
'-wildcards', '-no-recovery', '-noappend', \
'-ef', os.path.join(sys.prefix, 'share/customizer/exclude.list'))
misc.system_command(make_squash_fs + compression_parameters)
message.sub_info('Checking SquashFS filesystem size')
sfs_size = os.path.getsize(misc.join_paths(config.ISO_DIR, \
'casper/filesystem.squashfs'))
message.sub_debug('SquashFS filesystem size', sfs_size)
if sfs_size > 4000000000:
raise(message.exception('The SquashFS filesystem size is greater than 4GB'))
message.sub_info('Creating filesystem.size')
fs_size = 0
for root, subdirs, files in os.walk(config.FILESYSTEM_DIR):
for sfile in files:
sfull = os.path.join(root, sfile)
if os.path.islink(sfull):
continue
# FIXME: respect ignored files from exclude.list
fs_size += os.path.getsize(sfull)
message.sub_debug('Root filesystem size', fs_size)
misc.write_file(misc.join_paths(config.ISO_DIR, \
'casper/filesystem.size'), str(fs_size))
message.sub_info('Creating filesystem.manifest')
lpackages = misc.chroot_exec(('dpkg-query', '-W', \
'--showformat=${Package} ${Version}\\n'), prepare=False, mount=False, \
output=True)
message.sub_debug('Packages', lpackages)
misc.write_file(misc.join_paths(config.ISO_DIR, \
'casper/filesystem.manifest'), lpackages)
# FIXME: do some kung-fu to check if packages are installed
# and remove them from filesystem.manifest-remove if they are not
# Creating a md5sum.txt file fixes lubuntu's integrity check.
md5sums_file = misc.join_paths(config.ISO_DIR, 'md5sum.txt')
if os.path.isfile(md5sums_file):
message.sub_info('Creating md5sum.txt')
misc.write_file(md5sums_file, '')
for sfile in misc.list_files(config.ISO_DIR):
if sfile.endswith('md5sum.txt'):
continue
if sfile.endswith('SHA256SUMS'):
continue
message.sub_debug('MD5 Checksumming', sfile)
checksum = misc.generate_hash_for_file('md5', sfile)
misc.append_file(md5sums_file, checksum + ' .' + \
sfile.replace(config.ISO_DIR, '') +'\n')
# Creating a SHA256SUMS file fixes ubuntu-mini-remix's integrity check.
shasums_file = misc.join_paths(config.ISO_DIR, 'SHA256SUMS')
if os.path.isfile(shasums_file):
message.sub_info('Creating SHA256SUMS')
misc.write_file(shasums_file, '')
for sfile in misc.list_files(config.ISO_DIR):
if sfile.endswith('md5sum.txt'):
continue
if sfile.endswith('SHA256SUMS'):
continue
message.sub_debug('SHA256 Checksumming', sfile)
checksum = misc.generate_hash_for_file('sha256', sfile)
misc.append_file(shasums_file, checksum + ' .' + \
sfile.replace(config.ISO_DIR, '') +'\n')
# Create the ISO filesystem
message.sub_info('Creating ISO')
os.chdir(config.ISO_DIR)
misc.system_command(('xorriso', '-as', 'mkisofs', '-r', '-V', \
distrib + '-' + arch + '-' + release, '-b', 'isolinux/isolinux.bin', \
'-c', 'isolinux/boot.cat', '-J', '-l', '-no-emul-boot', \
'-boot-load-size', '4', '-boot-info-table', '-o', iso_file, \
'-cache-inodes', '-input-charset', 'utf-8', '.'))
message.sub_info('Creating ISO checksums')
md5checksum = misc.generate_hash_for_file('md5', iso_file)
message.sub_info('ISO md5 checksum', md5checksum)
misc.append_file(md5sum_iso_file, md5checksum + ' .' + \
iso_file.replace(config.WORK_DIR, '') +'\n')
sha1checksum = misc.generate_hash_for_file('sha1', iso_file)
message.sub_info('ISO sha1 checksum', sha1checksum)
misc.append_file(sha1sum_iso_file, sha1checksum + ' .' + \
iso_file.replace(config.WORK_DIR, '') +'\n')
sha256checksum = misc.generate_hash_for_file('sha256', iso_file)
message.sub_info('ISO sha256 checksum', sha256checksum)
misc.append_file(sha256sum_iso_file, sha256checksum + ' .' + \
iso_file.replace(config.WORK_DIR, '') +'\n')
message.sub_info('Successfuly created ISO image', iso_file)
0
Example 56
Project: pth-toolkit Source File: backend.py
def provision(self):
from samba.provision import ProvisioningError, setup_path
# Wipe the directories so we can start
shutil.rmtree(os.path.join(self.ldapdir, "db"), True)
# Allow the test scripts to turn off fsync() for OpenLDAP as for TDB
# and LDB
nosync_config = ""
if self.nosync:
nosync_config = "dbnosync"
lnkattr = self.schema.linked_attributes()
refint_attributes = ""
memberof_config = "# Generated from Samba4 schema\n"
for att in lnkattr.keys():
if lnkattr[att] is not None:
refint_attributes = refint_attributes + " " + att
memberof_config += read_and_sub_file(
setup_path("memberof.conf"), {
"MEMBER_ATTR": att,
"MEMBEROF_ATTR" : lnkattr[att] })
refint_config = read_and_sub_file(setup_path("refint.conf"),
{ "LINK_ATTRS" : refint_attributes})
attrs = ["linkID", "lDAPDisplayName"]
res = self.schema.ldb.search(expression="(&(objectclass=attributeSchema)(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs)
index_config = ""
for i in range (0, len(res)):
index_attr = res[i]["lDAPDisplayName"][0]
if index_attr == "objectGUID":
index_attr = "entryUUID"
index_config += "index " + index_attr + " eq\n"
# generate serverids, ldap-urls and syncrepl-blocks for mmr hosts
mmr_on_config = ""
mmr_replicator_acl = ""
mmr_serverids_config = ""
mmr_syncrepl_schema_config = ""
mmr_syncrepl_config_config = ""
mmr_syncrepl_user_config = ""
if self.ol_mmr_urls is not None:
# For now, make these equal
mmr_pass = self.ldapadminpass
url_list = filter(None,self.ol_mmr_urls.split(','))
for url in url_list:
self.logger.info("Using LDAP-URL: "+url)
if len(url_list) == 1:
raise ProvisioningError("At least 2 LDAP-URLs needed for MMR!")
mmr_on_config = "MirrorMode On"
mmr_replicator_acl = " by dn=cn=replicator,cn=samba read"
serverid = 0
for url in url_list:
serverid = serverid + 1
mmr_serverids_config += read_and_sub_file(
setup_path("mmr_serverids.conf"), {
"SERVERID": str(serverid),
"LDAPSERVER": url })
rid = serverid * 10
rid = rid + 1
mmr_syncrepl_schema_config += read_and_sub_file(
setup_path("mmr_syncrepl.conf"), {
"RID" : str(rid),
"MMRDN": self.names.schemadn,
"LDAPSERVER" : url,
"MMR_PASSWORD": mmr_pass})
rid = rid + 1
mmr_syncrepl_config_config += read_and_sub_file(
setup_path("mmr_syncrepl.conf"), {
"RID" : str(rid),
"MMRDN": self.names.configdn,
"LDAPSERVER" : url,
"MMR_PASSWORD": mmr_pass})
rid = rid + 1
mmr_syncrepl_user_config += read_and_sub_file(
setup_path("mmr_syncrepl.conf"), {
"RID" : str(rid),
"MMRDN": self.names.domaindn,
"LDAPSERVER" : url,
"MMR_PASSWORD": mmr_pass })
# OpenLDAP cn=config initialisation
olc_syncrepl_config = ""
olc_mmr_config = ""
# if mmr = yes, generate cn=config-replication directives
# and olc_seed.lif for the other mmr-servers
if self.ol_mmr_urls is not None:
serverid = 0
olc_serverids_config = ""
olc_syncrepl_seed_config = ""
olc_mmr_config += read_and_sub_file(
setup_path("olc_mmr.conf"), {})
rid = 500
for url in url_list:
serverid = serverid + 1
olc_serverids_config += read_and_sub_file(
setup_path("olc_serverid.conf"), {
"SERVERID" : str(serverid), "LDAPSERVER" : url })
rid = rid + 1
olc_syncrepl_config += read_and_sub_file(
setup_path("olc_syncrepl.conf"), {
"RID" : str(rid), "LDAPSERVER" : url,
"MMR_PASSWORD": mmr_pass})
olc_syncrepl_seed_config += read_and_sub_file(
setup_path("olc_syncrepl_seed.conf"), {
"RID" : str(rid), "LDAPSERVER" : url})
setup_file(setup_path("olc_seed.ldif"), self.olcseedldif,
{"OLC_SERVER_ID_CONF": olc_serverids_config,
"OLC_PW": self.ldapadminpass,
"OLC_SYNCREPL_CONF": olc_syncrepl_seed_config})
# end olc
setup_file(setup_path("slapd.conf"), self.slapdconf,
{"DNSDOMAIN": self.names.dnsdomain,
"LDAPDIR": self.ldapdir,
"DOMAINDN": self.names.domaindn,
"CONFIGDN": self.names.configdn,
"SCHEMADN": self.names.schemadn,
"MEMBEROF_CONFIG": memberof_config,
"MIRRORMODE": mmr_on_config,
"REPLICATOR_ACL": mmr_replicator_acl,
"MMR_SERVERIDS_CONFIG": mmr_serverids_config,
"MMR_SYNCREPL_SCHEMA_CONFIG": mmr_syncrepl_schema_config,
"MMR_SYNCREPL_CONFIG_CONFIG": mmr_syncrepl_config_config,
"MMR_SYNCREPL_USER_CONFIG": mmr_syncrepl_user_config,
"OLC_SYNCREPL_CONFIG": olc_syncrepl_config,
"OLC_MMR_CONFIG": olc_mmr_config,
"REFINT_CONFIG": refint_config,
"INDEX_CONFIG": index_config,
"NOSYNC": nosync_config})
self.setup_db_config(os.path.join(self.ldapdir, "db", "user"))
self.setup_db_config(os.path.join(self.ldapdir, "db", "config"))
self.setup_db_config(os.path.join(self.ldapdir, "db", "schema"))
if not os.path.exists(os.path.join(self.ldapdir, "db", "samba", "cn=samba")):
os.makedirs(os.path.join(self.ldapdir, "db", "samba", "cn=samba"), 0700)
setup_file(setup_path("cn=samba.ldif"),
os.path.join(self.ldapdir, "db", "samba", "cn=samba.ldif"),
{ "UUID": str(uuid.uuid4()),
"LDAPTIME": timestring(int(time.time()))} )
setup_file(setup_path("cn=samba-admin.ldif"),
os.path.join(self.ldapdir, "db", "samba", "cn=samba", "cn=samba-admin.ldif"),
{"LDAPADMINPASS_B64": b64encode(self.ldapadminpass),
"UUID": str(uuid.uuid4()),
"LDAPTIME": timestring(int(time.time()))} )
if self.ol_mmr_urls is not None:
setup_file(setup_path("cn=replicator.ldif"),
os.path.join(self.ldapdir, "db", "samba", "cn=samba", "cn=replicator.ldif"),
{"MMR_PASSWORD_B64": b64encode(mmr_pass),
"UUID": str(uuid.uuid4()),
"LDAPTIME": timestring(int(time.time()))} )
mapping = "schema-map-openldap-2.3"
backend_schema = "backend-schema.schema"
f = open(setup_path(mapping), 'r')
try:
backend_schema_data = self.schema.convert_to_openldap(
"openldap", f.read())
finally:
f.close()
assert backend_schema_data is not None
f = open(os.path.join(self.ldapdir, backend_schema), 'w')
try:
f.write(backend_schema_data)
finally:
f.close()
# now we generate the needed strings to start slapd automatically,
if self.ldap_backend_extra_port is not None:
# When we use MMR, we can't use 0.0.0.0 as it uses the name
# specified there as part of it's clue as to it's own name,
# and not to replicate to itself
if self.ol_mmr_urls is None:
server_port_string = "ldap://0.0.0.0:%d" % self.ldap_backend_extra_port
else:
server_port_string = "ldap://%s.%s:%d" (self.names.hostname,
self.names.dnsdomain, self.ldap_backend_extra_port)
else:
server_port_string = ""
# Prepare the 'result' information - the commands to return in
# particular
self.slapd_provision_command = [self.slapd_path, "-F" + self.olcdir,
"-h"]
# copy this command so we have two version, one with -d0 and only
# ldapi (or the forced ldap_uri), and one with all the listen commands
self.slapd_command = list(self.slapd_provision_command)
self.slapd_provision_command.extend([self.ldap_uri, "-d0"])
uris = self.ldap_uri
if server_port_string is not "":
uris = uris + " " + server_port_string
self.slapd_command.append(uris)
# Set the username - done here because Fedora DS still uses the admin
# DN and simple bind
self.credentials.set_username("samba-admin")
# Wipe the old sam.ldb databases away
shutil.rmtree(self.olcdir, True)
os.makedirs(self.olcdir, 0770)
# If we were just looking for crashes up to this point, it's a
# good time to exit before we realise we don't have OpenLDAP on
# this system
if self.ldap_dryrun_mode:
sys.exit(0)
slapd_cmd = [self.slapd_path, "-Ttest", "-n", "0", "-f",
self.slapdconf, "-F", self.olcdir]
retcode = subprocess.call(slapd_cmd, close_fds=True, shell=False)
if retcode != 0:
self.logger.error("conversion from slapd.conf to cn=config failed slapd started with: %s" % "\'" + "\' \'".join(slapd_cmd) + "\'")
raise ProvisioningError("conversion from slapd.conf to cn=config failed")
if not os.path.exists(os.path.join(self.olcdir, "cn=config.ldif")):
raise ProvisioningError("conversion from slapd.conf to cn=config failed")
# Don't confuse the admin by leaving the slapd.conf around
os.remove(self.slapdconf)
0
Example 57
Project: butterflow Source File: cli.py
def main():
par = argparse.ArgumentParser(usage='butterflow [options] [video]',
add_help=False)
req = par.add_argument_group('Required arguments')
gen = par.add_argument_group('General options')
dsp = par.add_argument_group('Display options')
vid = par.add_argument_group('Video options')
mux = par.add_argument_group('Muxing options')
fgr = par.add_argument_group('Advanced options')
req.add_argument('video', type=str, nargs='?', default=None,
help='Specify the input video')
gen.add_argument('-h', '--help', action='help',
help='Show this help message and exit')
gen.add_argument('--version', action='store_true',
help='Show program\'s version number and exit')
gen.add_argument('-d', '--devices', action='store_true',
help='Show detected OpenCL devices and exit')
gen.add_argument('-sw', action='store_true',
help='Set to force software rendering')
gen.add_argument('-c', '--cache', action='store_true',
help='Show cache information and exit')
gen.add_argument('--rm-cache', action='store_true',
help='Set to clear the cache and exit')
gen.add_argument('-prb', '--probe', action='store_true',
help='Show media file information and exit')
gen.add_argument('-v', '--verbosity', action='count',
help='Set to increase output verbosity')
gen.add_argument('-q', '--quiet', action='store_true',
help='Set to suppress console output')
dsp.add_argument('-p', '--show-preview', action='store_true',
help='Set to show video preview')
dsp.add_argument('-a', '--add-info', action='store_true',
help='Set to embed debugging info into the output video')
dsp.add_argument('-tt', '--text-type',
choices=['light', 'dark', 'stroke'],
default=settings['text_type'],
help='Specify text type for debugging info, '
'(default: %(default)s)')
dsp.add_argument('-mrk', '--mark-frames', action='store_true',
help='Set to mark interpolated frames')
vid.add_argument('-o', '--output-path', type=str,
default=settings['out_path'],
help='Specify path to the output video')
vid.add_argument('-r', '--playback-rate', type=str,
help='Specify the playback rate as an integer or a float '
'Fractional forms are acceptable, e.g., 24/1.001 is the '
'same as 23.976. To use a multiple of the source '
'video\'s rate, follow a number with `x`, e.g., "2x" '
'will double the frame rate. The original rate will be '
'used by default if nothing is specified.')
vid.add_argument('-s', '--subregions', type=str,
help='Specify rendering subregions in the form: '
'"a=TIME,b=TIME,TARGET=VALUE" where TARGET is either '
'`fps`, `dur`, `spd`. Valid TIME syntaxes are [hr:m:s], '
'[m:s], [s], [s.xxx], or `end`, which signifies to the '
'end the video. You can specify multiple subregions by '
'separating them with a colon `:`. A special subregion '
'format that conveniently describes the entire clip is '
'available in the form: "full,TARGET=VALUE".')
vid.add_argument('-k', '--keep-subregions', action='store_true',
help='Set to render subregions that are not explicitly '
'specified')
vid.add_argument('-vs', '--video-scale', type=str,
default=str(settings['video_scale']),
help='Specify output video size in the form: '
'"WIDTH:HEIGHT" or by using a factor. To keep the '
'aspect ratio only specify one component, either width '
'or height, and set the other component to -1, '
'(default: %(default)s)')
vid.add_argument('-l', '--lossless', action='store_true',
help='Set to use lossless encoding settings')
vid.add_argument('-sm', '--smooth-motion', action='store_true',
help='Set to tune for smooth motion. This mode yields '
'artifact-less frames by emphasizing blended frames over '
'warping pixels.')
mux.add_argument('-mux', action='store_true',
help='Set to mux the source audio with the output video')
fgr.add_argument('--fast-pyr', action='store_true',
help='Set to use fast pyramids')
fgr.add_argument('--pyr-scale', type=float,
default=settings['pyr_scale'],
help='Specify pyramid scale factor, '
'(default: %(default)s)')
fgr.add_argument('--levels', type=int,
default=settings['levels'],
help='Specify number of pyramid layers, '
'(default: %(default)s)')
fgr.add_argument('--winsize', type=int,
default=settings['winsize'],
help='Specify averaging window size, '
'(default: %(default)s)')
fgr.add_argument('--iters', type=int,
default=settings['iters'],
help='Specify number of iterations at each pyramid '
'level, (default: %(default)s)')
fgr.add_argument('--poly-n', type=int,
choices=settings['poly_n_choices'],
default=settings['poly_n'],
help='Specify size of pixel neighborhood, '
'(default: %(default)s)')
fgr.add_argument('--poly-s', type=float,
default=settings['poly_s'],
help='Specify standard deviation to smooth derivatives, '
'(default: %(default)s)')
fgr.add_argument('-ff', '--flow-filter', choices=['box', 'gaussian'],
default=settings['flow_filter'],
help='Specify which filter to use for optical flow '
'estimation, (default: %(default)s)')
for i, arg in enumerate(sys.argv):
if arg[0] == '-' and arg[1].isdigit():
sys.argv[i] = ' '+arg
args = par.parse_args()
fmt = '[butterflow:%(filename)s:%(funcName)s.%(levelname)s]: %(message)s'
logging.basicConfig(level=settings['loglevel_0'], format=fmt)
log = logging.getLogger('butterflow')
if args.verbosity == 1:
log.setLevel(settings['loglevel_1'])
if args.verbosity >= 2:
log.setLevel(settings['loglevel_2'])
if args.quiet:
log.setLevel(settings['loglevel_quiet'])
settings['quiet'] = True
if args.version:
print(__version__)
return 0
cachedir = settings['tempdir']
if args.cache:
nfiles = 0
sz = 0
for dirpath, dirnames, filenames in os.walk(cachedir):
if dirpath == settings['clbdir']:
continue
for filename in filenames:
nfiles += 1
fp = os.path.join(dirpath, filename)
sz += os.path.getsize(fp)
sz = sz / 1024.0**2
print('{} files, {:.2f} MB'.format(nfiles, sz))
print('cache @ '+cachedir)
return 0
if args.rm_cache:
if os.path.exists(cachedir):
import shutil
shutil.rmtree(cachedir)
print('cache deleted, done.')
return 0
if args.devices:
ocl.print_ocl_devices()
return 0
if not args.video:
print('no file specified, use: -h for help')
return 1
elif not os.path.exists(args.video):
print('file does not exist')
return 1
if args.probe:
avinfo.print_av_info(args.video)
return 0
extension = os.path.splitext(os.path.basename(args.output_path))[1].lower()
if extension[1:] != 'mp4':
print('bad out file extension')
return 0
av_info = avinfo.get_av_info(args.video)
use_sw_interpolate = args.sw or not ocl.compat_ocl_device_available()
if use_sw_interpolate:
log.warn('not using opencl, ctrl+c to quit')
if args.flow_filter == 'gaussian':
args.flow_filter = cv2.OPTFLOW_FARNEBACK_GAUSSIAN
else:
args.flow_filter = 0
if args.smooth_motion:
args.polys = 0.01
def optflow_fn(x, y,
pyr=args.pyr_scale, levels=args.levels,
winsize=args.winsize, iters=args.iters, polyn=args.poly_n,
polys=args.poly_s, fast=args.fast_pyr,
filt=args.flow_filter):
if use_sw_interpolate:
return cv2.calcOpticalFlowFarneback(
x, y, pyr, levels, winsize, iters, polyn, polys, filt)
else:
return motion.ocl_farneback_optical_flow(
x, y, pyr, levels, winsize, iters, polyn, polys, fast, filt)
interpolate_fn = None
if use_sw_interpolate:
from butterflow.interpolate import sw_interpolate_flow
interpolate_fn = sw_interpolate_flow
else:
interpolate_fn = motion.ocl_interpolate_flow
try:
w, h = w_h_from_input_str(args.video_scale, av_info['w'], av_info['h'])
sequence = sequence_from_input_str(args.subregions,
av_info['duration'],
av_info['frames'])
rate = rate_from_input_str(args.playback_rate, av_info['rate'])
except (ValueError, AttributeError) as error:
print('error: '+str(error))
return 1
def nearest_even_int(x):
return x & ~1
w1, h1 = av_info['w'], av_info['h']
w2, h2 = nearest_even_int(w), nearest_even_int(h)
if w1*h1 > w2*h2:
scaling_method = settings['scaler_dn']
elif w1*h1 < w2*h2:
scaling_method = settings['scaler_up']
else:
scaling_method = None
rnd = Renderer(args.video,
args.output_path,
sequence,
rate,
optflow_fn,
interpolate_fn,
w2,
h2,
scaling_method,
args.lossless,
args.keep_subregions,
args.show_preview,
args.add_info,
args.text_type,
args.mark_frames,
args.mux)
motion.set_num_threads(settings['ocv_threads'])
log.info('will render:\n' + str(rnd.sequence))
success = True
total_time = 0
try:
import timeit
total_time = timeit.timeit(rnd.render,
setup='import gc;gc.enable()',
number=1)
except (KeyboardInterrupt, SystemExit):
success = False
if success:
log.info('made: '+args.output_path)
out_sz = os.path.getsize(args.output_path) / 1024.0**2
log.info('write ratio: {}/{}, ({:.2f}%) {:.2f} MB'.format(
rnd.frs_written,
rnd.frs_to_render,
rnd.frs_written*100.0/rnd.frs_to_render,
out_sz))
txt = 'frames: {} real, +{} interpolated, +{} dupe, -{} drop'
if not settings['quiet']:
print(txt.format(rnd.source_frs,
rnd.frs_interpolated,
rnd.frs_duped,
rnd.frs_dropped))
log.info('butterflow took {:.3g} mins, done.'.format(total_time / 60))
return 0
else:
log.warn('quit unexpectedly')
log.warn('files left in cache @ '+settings['tempdir'])
return 1
0
Example 58
Project: WAPT Source File: gencache.py
def EnsureModule(typelibCLSID, lcid, major, minor, progressInstance = None, bValidateFile=not is_readonly, bForDemand = bForDemandDefault, bBuildHidden = 1):
"""Ensure Python support is loaded for a type library, generating if necessary.
Given the IID, LCID and version information for a type library, check and if
necessary (re)generate, then import the necessary support files. If we regenerate the file, there
is no way to totally snuff out all instances of the old module in Python, and thus we will regenerate the file more than necessary,
unless makepy/genpy is modified accordingly.
Returns the Python module. No exceptions are caught during the generate process.
Params
typelibCLSID -- IID of the type library.
major -- Integer major version.
minor -- Integer minor version
lcid -- Integer LCID for the library.
progressInstance -- Instance to use as progress indicator, or None to
use the GUI progress bar.
bValidateFile -- Whether or not to perform cache validation or not
bForDemand -- Should a complete generation happen now, or on demand?
bBuildHidden -- Should hidden members/attributes etc be generated?
"""
bReloadNeeded = 0
try:
try:
module = GetModuleForTypelib(typelibCLSID, lcid, major, minor)
except ImportError:
# If we get an ImportError
# We may still find a valid cache file under a different MinorVersion #
# (which windows will search out for us)
#print "Loading reg typelib", typelibCLSID, major, minor, lcid
module = None
try:
tlbAttr = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid).GetLibAttr()
# if the above line doesn't throw a pythoncom.com_error, check if
# it is actually a different lib than we requested, and if so, suck it in
if tlbAttr[1] != lcid or tlbAttr[4]!=minor:
#print "Trying 2nd minor #", tlbAttr[1], tlbAttr[3], tlbAttr[4]
try:
module = GetModuleForTypelib(typelibCLSID, tlbAttr[1], tlbAttr[3], tlbAttr[4])
except ImportError:
# We don't have a module, but we do have a better minor
# version - remember that.
minor = tlbAttr[4]
# else module remains None
except pythoncom.com_error:
# couldn't load any typelib - mod remains None
pass
if module is not None and bValidateFile:
assert not is_readonly, "Can't validate in a read-only gencache"
try:
typLibPath = pythoncom.QueryPathOfRegTypeLib(typelibCLSID, major, minor, lcid)
# windows seems to add an extra \0 (via the underlying BSTR)
# The mainwin toolkit does not add this erroneous \0
if typLibPath[-1]=='\0':
typLibPath=typLibPath[:-1]
suf = getattr(os.path, "supports_unicode_filenames", 0)
if not suf:
# can't pass unicode filenames directly - convert
try:
typLibPath=typLibPath.encode(sys.getfilesystemencoding())
except AttributeError: # no sys.getfilesystemencoding
typLibPath=str(typLibPath)
tlbAttributes = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid).GetLibAttr()
except pythoncom.com_error:
# We have a module, but no type lib - we should still
# run with what we have though - the typelib may not be
# deployed here.
bValidateFile = 0
if module is not None and bValidateFile:
assert not is_readonly, "Can't validate in a read-only gencache"
filePathPrefix = "%s\\%s" % (GetGeneratePath(), GetGeneratedFileName(typelibCLSID, lcid, major, minor))
filePath = filePathPrefix + ".py"
filePathPyc = filePathPrefix + ".py"
if __debug__:
filePathPyc = filePathPyc + "c"
else:
filePathPyc = filePathPyc + "o"
# Verify that type library is up to date.
# If we have a differing MinorVersion or genpy has bumped versions, update the file
import genpy
if module.MinorVersion != tlbAttributes[4] or genpy.makepy_version != module.makepy_version:
#print "Version skew: %d, %d" % (module.MinorVersion, tlbAttributes[4])
# try to erase the bad file from the cache
try:
os.unlink(filePath)
except os.error:
pass
try:
os.unlink(filePathPyc)
except os.error:
pass
if os.path.isdir(filePathPrefix):
import shutil
shutil.rmtree(filePathPrefix)
minor = tlbAttributes[4]
module = None
bReloadNeeded = 1
else:
minor = module.MinorVersion
filePathPrefix = "%s\\%s" % (GetGeneratePath(), GetGeneratedFileName(typelibCLSID, lcid, major, minor))
filePath = filePathPrefix + ".py"
filePathPyc = filePathPrefix + ".pyc"
#print "Trying py stat: ", filePath
fModTimeSet = 0
try:
pyModTime = os.stat(filePath)[8]
fModTimeSet = 1
except os.error, e:
# If .py file fails, try .pyc file
#print "Trying pyc stat", filePathPyc
try:
pyModTime = os.stat(filePathPyc)[8]
fModTimeSet = 1
except os.error, e:
pass
#print "Trying stat typelib", pyModTime
#print str(typLibPath)
typLibModTime = os.stat(typLibPath)[8]
if fModTimeSet and (typLibModTime > pyModTime):
bReloadNeeded = 1
module = None
except (ImportError, os.error):
module = None
if module is None:
# We need to build an item. If we are in a read-only cache, we
# can't/don't want to do this - so before giving up, check for
# a different minor version in our cache - according to COM, this is OK
if is_readonly:
key = str(typelibCLSID), lcid, major, minor
# If we have been asked before, get last result.
try:
return versionRedirectMap[key]
except KeyError:
pass
# Find other candidates.
items = []
for desc in GetGeneratedInfos():
if key[0]==desc[0] and key[1]==desc[1] and key[2]==desc[2]:
items.append(desc)
if items:
# Items are all identical, except for last tuple element
# We want the latest minor version we have - so just sort and grab last
items.sort()
new_minor = items[-1][3]
ret = GetModuleForTypelib(typelibCLSID, lcid, major, new_minor)
else:
ret = None
# remember and return
versionRedirectMap[key] = ret
return ret
#print "Rebuilding: ", major, minor
module = MakeModuleForTypelib(typelibCLSID, lcid, major, minor, progressInstance, bForDemand = bForDemand, bBuildHidden = bBuildHidden)
# If we replaced something, reload it
if bReloadNeeded:
module = reload(module)
AddModuleToCache(typelibCLSID, lcid, major, minor)
return module
0
Example 59
Project: program.plexus Source File: autoconf.py
def configure_acestream(latest_version):
#Configuration for LINUX
if xbmc.getCondVisibility('system.platform.linux') and not xbmc.getCondVisibility('system.platform.Android'):
print("Detected OS: Linux")
if "arm" in os.uname()[4]:
print("Linux Arm")
if settings.getSetting('rpi2') == "true":
ACE_KIT = os.path.join(addonpath,acestream_rpi2.split("/")[-1])
download_tools().Downloader(acestream_rpi2,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
path_libraries = os.path.join(pastaperfil)
download_tools().extract(ACE_KIT,path_libraries)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
#set chroot to executable
binary_path = os.path.join(pastaperfil,"acestream","chroot")
st = os.stat(binary_path)
import stat
os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif os.uname()[4] == "x86_64":
if settings.getSetting('openelecx86_64') == "true":
print("OpenELEC x86_64 Acestream configuration")
ACE_KIT = os.path.join(addonpath,openeelcx86_64_acestream.split("/")[-1])
download_tools().Downloader(openeelcx86_64_acestream ,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
else:
print("64 bit Linux Disto Acestream Configuration")
ACE_KIT = os.path.join(addonpath,acestream_linux_x64_generic.split("/")[-1])
download_tools().Downloader(acestream_linux_x64_generic,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif os.uname()[4] == "i386" or os.uname()[4] == "i686":
if settings.getSetting('openeleci386') == "true":
print("32 bit Openelec Acestream Configuration")
ACE_KIT = os.path.join(addonpath,openeelcxi386_acestream.split("/")[-1])
download_tools().Downloader(openeelcxi386_acestream,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
else:
print("32 bit Linux general distro Acestream Configuration")
ACE_KIT = os.path.join(addonpath,acestream_linux_i386_generic.split("/")[-1])
download_tools().Downloader(acestream_linux_i386_generic,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif xbmc.getCondVisibility('system.platform.windows'):
print("Detected OS: Windows")
if not os.path.exists(pastaperfil): xbmcvfs.mkdir(pastaperfil)
#Ace
SPSC_KIT = os.path.join(addonpath,acestream_windows.split("/")[-1])
download_tools().Downloader(acestream_windows,SPSC_KIT,translate(30110),translate(30000))
if os.path.exists(os.path.join(pastaperfil,"acestream")):
shutil.rmtree(os.path.join(pastaperfil,"acestream"))
if os.path.exists(os.path.join(pastaperfil,"player")):
shutil.rmtree(os.path.join(pastaperfil,"player"))
if tarfile.is_tarfile(SPSC_KIT):
path_libraries = os.path.join(pastaperfil)
download_tools().extract(SPSC_KIT,path_libraries)
download_tools().remove(SPSC_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif xbmc.getCondVisibility('System.Platform.OSX'):
print("Detected OS: Mac OSX")
available = False
if os.uname()[-1] == "x86_64":
mac_package = osx_x64_acestream
available = True
elif os.uname()[-1] == "i386":
mac_package = osx_i386_acestream
available = True
else:
available = False
if available == True:
MAC_KIT = os.path.join('/Applications',mac_package.split("/")[-1])
if not xbmcvfs.exists(os.path.join('/Applications','Ace Stream.app')):
download_tools().Downloader(mac_package,MAC_KIT,translate(30110),translate(30000))
if xbmcvfs.exists(MAC_KIT):
xbmc.sleep(1000)
cmd = 'unzip /Applications/AceStreamWineOSX.zip'
zipa = subprocess.Popen(cmd,shell=True)
cmd = 'chmod -R 755 /Applications/Ace\ Stream.app'
print cmd
chmod = subprocess.Popen(cmd,shell=True)
try: os.remove(MAC_KIT)
except: pass
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
else:
mensagemok(translate(30000),translate(30100))
return
elif xbmc.getCondVisibility('System.Platform.Android'):
print("Detected OS: Android")
print("Starting Acestream Configuration")
#acestream config for android
if not os.path.exists(pastaperfil): xbmcvfs.mkdir(pastaperfil)
#Hack to get xbmc app id
xbmcfolder=xbmc.translatePath(addonpath).split("/")
found = False
if settings.getSetting('auto_appid') == 'true':
i = 0
sopcast_installed = False
for folder in xbmcfolder:
if folder.count('.') >= 2 and folder != addon_id :
found = True
break
else:
i+=1
if found == True:
uid = os.getuid()
app_id = xbmcfolder[i]
else:
if settings.getSetting('custom_appid') != '':
uid = os.getuid()
app_id = settings.getSetting('custom_appid')
found = True
if found == True:
settings.setSetting('app_id',app_id)
#Acestreamconfiguration for android starts here
if "arm" in os.uname()[4]:
acebundle = os.path.join(pastaperfil,android_aceengine_arm.split("/")[-1])
download_tools().Downloader(android_aceengine_arm,acebundle,translate(30111),translate(30000))
else:
acebundle = os.path.join(pastaperfil,android_aceengine_x86.split("/")[-1])
download_tools().Downloader(android_aceengine_x86,acebundle,translate(30111),translate(30000))
if tarfile.is_tarfile(acebundle):
download_tools().extract(acebundle,pastaperfil)
download_tools().remove(acebundle)
orgacestreamenginefolder = os.path.join(pastaperfil,"org.acestream.engine")
xbmc_data_path = os.path.join("/data", "data", app_id)
if os.path.exists(xbmc_data_path) and uid == os.stat(xbmc_data_path).st_uid:
android_binary_dir = os.path.join(xbmc_data_path, "files", "program.plexus")
if not os.path.exists(android_binary_dir): os.makedirs(android_binary_dir)
android_acestream_folder = os.path.join(android_binary_dir,"org.acestream.engine")
if not os.path.exists(android_acestream_folder): os.makedirs(android_acestream_folder)
else:
#clean install for android - delete old folder
print android_acestream_folder
try:
os.system("chmod -R 777 "+android_acestream_folder+"/*")
os.system("rm -r '"+android_acestream_folder+"'")
except: pass
try: os.makedirs(android_acestream_folder)
except: pass
xbmc.sleep(200)
#clean install in android - remove /sdcard/.ACEStream folder if it exists (to be enabled between versions if we need to remove older settings
#if os.path.exists(os.path.join('/sdcard','.ACEStream')):
# try:
# hidden_ace = os.path.join('/sdcard','.ACEStream')
# os.system("chmod -R 777 "+hidden_ace+"/*")
# os.system("rm -r '"+hidden_ace+"'")
# except: pass
recursive_overwrite(orgacestreamenginefolder, android_acestream_folder, ignore=None)
pythonbin = os.path.join(android_acestream_folder,"files","python","bin","python")
st = os.stat(pythonbin)
import stat
os.chmod(pythonbin, st.st_mode | stat.S_IEXEC)
if os.path.exists(orgacestreamenginefolder):
try:
os.system("chmod -R 777 "+orgacestreamenginefolder+"/*")
os.system("rm -r '"+orgacestreamenginefolder+"'")
except: pass
try: xbmcvfs.mkdir(os.path.join('/sdcard','org.acestream.engine'))
except: pass
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30112),translate(30113))
if not opcao:
settings.setSetting('engine_app','0')
else:
mensagemok(translate(30000),translate(30114),translate(30115),translate(30116))
if os.path.exists(os.path.join("sdcard","Download")):
pasta = os.path.join("sdcard","Download")
if "arm" in os.uname()[4]: acefile = os.path.join("sdcard","Download",acestreamengine_apk_arm.split("/")[-1])
else: acefile = os.path.join("sdcard","Download",acestreamengine_apk_x86.split("/")[-1])
else:
dialog = xbmcgui.Dialog()
pasta = dialog.browse(int(0), translate(30105), 'myprograms')
if "arm" in os.uname()[4]: acefile = os.path.join(pasta,acestreamengine_apk_arm.split("/")[-1])
else: acefile = os.path.join(pasta,acestreamengine_apk_x86.split("/")[-1])
if "arm" in os.uname()[4]: download_tools().Downloader(acestreamengine_apk_arm,acefile,translate(30117),translate(30000))
else: download_tools().Downloader(acestreamengine_apk_x86,acefile,translate(30117),translate(30000))
if tarfile.is_tarfile(acefile):
download_tools().extract(acefile,pasta)
download_tools().remove(acefile)
xbmc.sleep(2000)
mensagemok(translate(30000),translate(30118),pasta,translate(30108))
mensagemok(translate(30000),translate(30119),translate(30120),translate(30121))
settings.setSetting('engine_app','1')
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30122),translate(30123))
if opcao:
if os.path.exists(os.path.join("sdcard","Download")):
pasta = os.path.join("sdcard","Download")
if "arm" in os.uname()[4]: acefile = os.path.join("sdcard","Download",android_aceplayer_arm.split("/")[-1])
else: os.path.join("sdcard","Download",android_aceplayer_x86.split("/")[-1])
else:
dialog = xbmcgui.Dialog()
pasta = dialog.browse(int(0), translate(30105), 'myprograms')
if "arm" in os.uname()[4]: acefile = os.path.join(pasta,acestreamengine_apk_arm.split("/")[-1])
else: acefile = os.path.join(pasta,acestreamengine_apk_x86.split("/")[-1])
if "arm" in os.uname()[4]: download_tools().Downloader(android_aceplayer_arm,acefile,translate(30124),translate(30000))
else: download_tools().Downloader(android_aceplayer_x86,acefile,translate(30124),translate(30000))
if tarfile.is_tarfile(acefile):
download_tools().extract(acefile,pasta)
download_tools().remove(acefile)
xbmc.sleep(2000)
mensagemok(translate(30000),translate(30125),pasta,translate(30108))
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30126))
if opcao:
settings.setSetting('engine_app','2')
if latest_version: settings.setSetting('acestream_version',value=latest_version)
mensagemok(translate(30000),translate(30127))
return
else:
mensagemok(translate(30000),translate(30109))
return
0
Example 60
Project: pyvivado Source File: qa_axi_interconnect.py
def test_one(self):
logger.debug('Starting testone')
directory = os.path.abspath('proj_qa_axi_interconnect')
if os.path.exists(directory):
shutil.rmtree(directory)
os.mkdir(directory)
slave_ids = (13, 15, pow(2, 16)-7)
invalid_slave_id = 14
interface = axi_interconnect.get_axi_interconnect_interface(
params={'slave_ids': slave_ids})
wait_data = []
wait_lines = 20
input_data = []
expected_data = []
make_m2s = axi_utils.make_empty_axi4lite_m2s_dict
make_s2m = axi_utils.make_empty_axi4lite_s2m_dict
for i in range(wait_lines):
wait_data.append({
'reset': 1,
'i_m': make_m2s(),
'i_s': [make_s2m() for i in slave_ids],
})
# Now write to an invalid address.
invalid_write_d = make_m2s()
local_address = 18
write_data = 3456
invalid_write_d['awaddr'] = invalid_slave_id * pow(2, 16) + local_address
invalid_write_d['awvalid'] = 1
invalid_write_d['wvalid'] = 1
invalid_write_d['wdata'] = write_data
input_data.append({
'reset': 0,
'i_m': invalid_write_d,
'i_s': [make_s2m() for slave_id in slave_ids],
})
input_data.append({
'reset': 0,
'i_m': make_m2s(),
'i_s': [make_s2m() for slave_id in slave_ids],
})
e_m2s = make_m2s()
e_m2s['awaddr'] = local_address
e_m2s['wdata'] = write_data
expected_data.append({
'o_m': make_s2m(),
'o_s': [e_m2s] * len(slave_ids),
})
e_s2m = make_s2m()
e_s2m['bresp'] = axi_utils.DECERR
e_s2m['bvalid'] = 1
expected_data.append({
'o_m': e_s2m,
'o_s': [make_m2s() for i in slave_ids],
})
# Now write to a valid address.
valid_write_d = make_m2s()
local_address = 490
write_data = 4567
valid_write_d['awaddr'] = slave_ids[1] * pow(2, 16) + local_address
valid_write_d['awvalid'] = 1
valid_write_d['wvalid'] = 1
valid_write_d['wdata'] = write_data
input_data.append({
'reset': 0,
'i_m': valid_write_d,
'i_s': [make_s2m() for slave_id in slave_ids],
})
d_s2m = make_s2m()
d_s2m['bvalid'] = 1
bresp = 2
d_s2m['bresp'] = bresp
input_data.append({
'reset': 0,
'i_m': make_m2s(),
'i_s': [make_s2m(), d_s2m, make_s2m()],
})
e_m2s_valid = make_m2s()
e_m2s_valid['awaddr'] = local_address
e_m2s_valid['awvalid'] = 1
e_m2s_valid['wdata'] = write_data
e_m2s_valid['wvalid'] = 1
e_m2s_invalid = make_m2s()
e_m2s_invalid['awaddr'] = local_address
e_m2s_invalid['wdata'] = write_data
expected_data.append({
'o_m': make_s2m(),
'o_s': [e_m2s_invalid, e_m2s_valid, e_m2s_invalid],
})
e_s2m = make_s2m()
e_s2m['bresp'] = bresp
e_s2m['bvalid'] = 1
expected_data.append({
'o_m': e_s2m,
'o_s': [make_m2s() for i in slave_ids],
})
p = project.FileTestBenchProject.create(
interface=interface, directory=directory,
board=config.default_board,
part=config.default_part,
)
t = p.wait_for_most_recent_task()
errors = t.get_errors_and_warnings()
logger.debug('errors are {}'.format(errors))
self.assertEqual(len(errors), 0)
# Run the simulation
runtime = '{} ns'.format((len(wait_data+input_data) + 20) * 10)
errors, output_data = p.run_hdl_simulation(
input_data=wait_data+input_data, runtime=runtime)
self.assertEqual(len(errors), 0)
latency = 0
delay = wait_lines + latency + 1
import pdb
pdb.set_trace()
for data in (output_data, expected_data):
for d in data:
pass
self.check_output(output_data[delay:], expected_data)
0
Example 61
Project: arkos Source File: websites.py
def _install(self, extra_vars, enable, nthread):
nthread.title = "Installing website"
msg = Notification("info", "Webs", "Preparing to install...")
nthread.update(msg)
# Make sure the chosen port is indeed open
if not tracked_services.is_open_port(self.port, self.domain):
cname = "({0})".format(self.app.id)
raise errors.InvalidConfigError(cname, nthread)\
from tracked_services.PortConflictError(self.port, self.domain)
# Set some metadata values
specialmsg, dbpasswd = "", ""
site_dir = config.get("websites", "site_dir")
path = (self.path or os.path.join(site_dir, self.id))
self.path = path
self.php = extra_vars.get("php") or self.php \
or self.app.uses_php or False
self.version = self.app.version.rsplit("-", 1)[0] \
if self.app.website_updates else None
# Classify the source package type
if not self.app.download_url:
ending = ""
elif self.app.download_url.endswith(".tar.gz"):
ending = ".tar.gz"
elif self.app.download_url.endswith(".tgz"):
ending = ".tgz"
elif self.app.download_url.endswith(".tar.bz2"):
ending = ".tar.bz2"
elif self.app.download_url.endswith(".zip"):
ending = ".zip"
elif self.app.download_url.endswith(".git"):
ending = ".git"
else:
raise errors.InvalidConfigError(
"Invalid source archive format in {0}".format(self.app.id))
msg = "Running pre-installation..."
uid, gid = users.get_system("http").uid, groups.get_system("http").gid
nthread.update(Notification("info", "Webs", msg))
# Call website type's pre-install hook
self.pre_install(extra_vars)
# If needs DB and user didn't select an engine, choose one for them
if len(self.app.database_engines) > 1 \
and extra_vars.get("dbengine", None):
self.app.selected_dbengine = extra_vars.get("dbengine")
if not getattr(self.app, "selected_dbengine", None)\
and self.app.database_engines:
self.app.selected_dbengine = self.app.database_engines[0]
# Create DB and/or DB user as necessary
if getattr(self.app, "selected_dbengine", None):
msg = "Creating database..."
nthread.update(Notification("info", "Webs", msg))
mgr = databases.get_managers(self.app.selected_dbengine)
if not mgr:
estr = "No manager found for {0}"
raise errors.InvalidConfigError(
estr.format(self.app.selected_dbengine))
# Make sure DB daemon is running if it has one
if not mgr.state:
svc = services.get(mgr.meta.database_service)
svc.restart()
self.db = mgr.add_db(self.id)
if hasattr(self.db, "path"):
os.chmod(self.db.path, 0o660)
os.chown(self.db.path, -1, gid)
# If multiuser DB type, create user
if mgr.meta.database_multiuser:
dbpasswd = random_string(16)
db_user = mgr.add_user(self.id, dbpasswd)
db_user.chperm("grant", self.db)
# Make sure the target directory exists, but is empty
pkg_path = os.path.join("/tmp", self.id + ending)
if os.path.isdir(self.path):
shutil.rmtree(self.path)
os.makedirs(self.path)
# Download and extract the source repo / package
msg = "Downloading website source..."
nthread.update(Notification("info", "Webs", msg))
if self.app.download_url and ending == ".git":
g = git.Repo.clone_from(self.app.download_url, self.path)
if hasattr(self.app, "download_at_tag"):
g = git.Git(self.path)
g.checkout(self.app.download_git_tag)
elif self.app.download_url:
download(self.app.download_url, file=pkg_path, crit=True)
# Format extraction command according to type
msg = "Extracting source..."
nthread.update(Notification("info", "Webs", msg))
if ending in [".tar.gz", ".tgz", ".tar.bz2"]:
arch = tarfile.open(pkg_path, "r:gz")
r = (x for x in arch.getnames() if re.match("^[^/]*$", x))
toplvl = next(r, None)
if not toplvl:
raise errors.OperationFailedError(
"Malformed source archive")
arch.extractall(site_dir)
os.rename(os.path.join(site_dir, toplvl), self.path)
else:
arch = zipfile.ZipFile(pkg_path)
r = (x for x in arch.namelist() if re.match("^[^/]*/$", x))
toplvl = next(r, None)
if not toplvl:
raise errors.OperationFailedError(
"Malformed source archive")
arch.extractall(site_dir)
os.rename(os.path.join(site_dir, toplvl.rstrip("/")),
self.path)
os.remove(pkg_path)
# Set proper starting permissions on source directory
os.chmod(self.path, 0o755)
os.chown(self.path, uid, gid)
for r, d, f in os.walk(self.path):
for x in d:
os.chmod(os.path.join(r, x), 0o755)
os.chown(os.path.join(r, x), uid, gid)
for x in f:
os.chmod(os.path.join(r, x), 0o644)
os.chown(os.path.join(r, x), uid, gid)
# If there is a custom path for the data directory, set it up
if getattr(self.app, "website_datapaths", None) \
and extra_vars.get("datadir"):
self.data_path = extra_vars["datadir"]
if not os.path.exists(self.data_path):
os.makedirs(self.data_path)
os.chmod(self.data_path, 0o755)
os.chown(self.data_path, uid, gid)
elif hasattr(self, "website_default_data_subdir"):
self.data_path = os.path.join(self.path,
self.website_default_data_subdir)
else:
self.data_path = self.path
# Create the nginx serverblock
addtoblock = self.addtoblock or []
if extra_vars.get("addtoblock"):
addtoblock += nginx.loads(extra_vars.get("addtoblock"), False)
default_index = "index."+("php" if self.php else "html")
if hasattr(self.app, "website_root"):
webroot = os.path.join(self.path, self.app.website_root)
else:
webroot = self.path
block = nginx.Conf()
server = nginx.Server(
nginx.Key("listen", str(self.port)),
nginx.Key("listen", "[::]:" + str(self.port)),
nginx.Key("server_name", self.domain),
nginx.Key("root", webroot),
nginx.Key("index", getattr(self.app, "website_index", None) or
default_index),
nginx.Location(
"/.well-known/acme-challenge/",
nginx.Key("root", self.path)
)
)
if addtoblock:
server.add(*[x for x in addtoblock])
block.add(server)
nginx.dumpf(block, os.path.join("/etc/nginx/sites-available",
self.id))
challenge_dir = os.path.join(self.path, ".well-known/acme-challenge/")
if not os.path.exists(challenge_dir):
os.makedirs(challenge_dir)
# Create arkOS metadata file
meta = configparser.SafeConfigParser()
meta.add_section("website")
meta.set("website", "id", self.id)
meta.set("website", "app", self.app.id)
meta.set("website", "ssl", self.cert.id if getattr(self, "cert", None)
else "None")
meta.set("website", "version", self.version or "None")
if getattr(self.app, "website_datapaths", None) \
and self.data_path:
meta.set("website", "data_path", self.data_path)
meta.set("website", "dbengine", "")
meta.set("website", "dbengine",
getattr(self.app, "selected_dbengine", ""))
with open(os.path.join(self.path, ".arkos"), "w") as f:
meta.write(f)
# Call site type's post-installation hook
msg = "Running post-installation. This may take a few minutes..."
nthread.update(Notification("info", "Webs", msg))
specialmsg = self.post_install(extra_vars, dbpasswd)
# Cleanup and reload daemons
msg = "Finishing..."
nthread.update(Notification("info", "Webs", msg))
self.installed = True
storage.websites[self.id] = self
if self.port == 80:
cleanup_acme_dummy(self.domain)
signals.emit("websites", "site_installed", self)
if enable:
self.nginx_enable()
if enable and self.php:
php.open_basedir("add", "/srv/http/")
php_reload()
msg = "{0} site installed successfully".format(self.app.name)
nthread.complete(Notification("success", "Webs", msg))
if specialmsg:
return specialmsg
0
Example 62
Project: CouchPotatoServer Source File: runner.py
def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, Env = None, desktop = None):
try:
locale.setlocale(locale.LC_ALL, "")
encoding = locale.getpreferredencoding()
except (locale.Error, IOError):
encoding = None
# for OSes that are poorly configured I'll just force UTF-8
if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
encoding = 'UTF-8'
Env.set('encoding', encoding)
# Do db stuff
db_path = sp(os.path.join(data_dir, 'database'))
old_db_path = os.path.join(data_dir, 'couchpotato.db')
# Remove database folder if both exists
if os.path.isdir(db_path) and os.path.isfile(old_db_path):
db = SuperThreadSafeDatabase(db_path)
db.open()
db.destroy()
# Check if database exists
db = SuperThreadSafeDatabase(db_path)
db_exists = db.exists()
if db_exists:
# Backup before start and cleanup old backups
backup_path = sp(os.path.join(data_dir, 'db_backup'))
backup_count = 5
existing_backups = []
if not os.path.isdir(backup_path): os.makedirs(backup_path)
for root, dirs, files in os.walk(backup_path):
# Only consider files being a direct child of the backup_path
if root == backup_path:
for backup_file in sorted(files):
ints = re.findall('\d+', backup_file)
# Delete non zip files
if len(ints) != 1:
try: os.remove(os.path.join(root, backup_file))
except: pass
else:
existing_backups.append((int(ints[0]), backup_file))
else:
# Delete stray directories.
shutil.rmtree(root)
# Remove all but the last 5
for eb in existing_backups[:-backup_count]:
os.remove(os.path.join(backup_path, eb[1]))
# Create new backup
new_backup = sp(os.path.join(backup_path, '%s.tar.gz' % int(time.time())))
zipf = tarfile.open(new_backup, 'w:gz')
for root, dirs, files in os.walk(db_path):
for zfilename in files:
zipf.add(os.path.join(root, zfilename), arcname = 'database/%s' % os.path.join(root[len(db_path) + 1:], zfilename))
zipf.close()
# Open last
db.open()
else:
db.create()
# Force creation of cachedir
log_dir = sp(log_dir)
cache_dir = sp(os.path.join(data_dir, 'cache'))
python_cache = sp(os.path.join(cache_dir, 'python'))
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
if not os.path.exists(python_cache):
os.mkdir(python_cache)
session = requests.Session()
session.max_redirects = 5
# Register environment settings
Env.set('app_dir', sp(base_path))
Env.set('data_dir', sp(data_dir))
Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log')))
Env.set('db', db)
Env.set('http_opener', session)
Env.set('cache_dir', cache_dir)
Env.set('cache', FileSystemCache(python_cache))
Env.set('console_log', options.console_log)
Env.set('quiet', options.quiet)
Env.set('desktop', desktop)
Env.set('daemonized', options.daemon)
Env.set('args', args)
Env.set('options', options)
# Determine debug
debug = options.debug or Env.setting('debug', default = False, type = 'bool')
Env.set('debug', debug)
# Development
development = Env.setting('development', default = False, type = 'bool')
Env.set('dev', development)
# Disable logging for some modules
for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']:
logging.getLogger(logger_name).setLevel(logging.ERROR)
for logger_name in ['gntp']:
logging.getLogger(logger_name).setLevel(logging.WARNING)
# Disable SSL warning
disable_warnings()
# Use reloader
reloader = debug is True and development and not Env.get('desktop') and not options.daemon
# Logger
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m-%d %H:%M:%S')
level = logging.DEBUG if debug else logging.INFO
logger.setLevel(level)
logging.addLevelName(19, 'INFO')
# To screen
if (debug or options.console_log) and not options.quiet and not options.daemon:
hdlr = logging.StreamHandler(sys.stderr)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# To file
hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding'))
hdlr2.setFormatter(formatter)
logger.addHandler(hdlr2)
# Start logging & enable colors
# noinspection PyUnresolvedReferences
import color_logs
from couchpotato.core.logger import CPLog
log = CPLog(__name__)
log.debug('Started with options %s', options)
# Check soft-chroot dir exists:
try:
# Load Soft-Chroot
soft_chroot = Env.get('softchroot')
soft_chroot_dir = Env.setting('soft_chroot', section = 'core', default = None, type='unicode' )
soft_chroot.initialize(soft_chroot_dir)
except SoftChrootInitError as exc:
log.error(exc)
return
except:
log.error('Unable to check whether SOFT-CHROOT is defined')
return
# Check available space
try:
total_space, available_space = getFreeSpace(data_dir)
if available_space < 100:
log.error('Shutting down as CP needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space)
return
except:
log.error('Failed getting diskspace: %s', traceback.format_exc())
def customwarn(message, category, filename, lineno, file = None, line = None):
log.warning('%s %s %s line:%s', (category, message, filename, lineno))
warnings.showwarning = customwarn
# Create app
from couchpotato import WebHandler
web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/'
Env.set('web_base', web_base)
api_key = Env.setting('api_key')
if not api_key:
api_key = uuid4().hex
Env.setting('api_key', value = api_key)
api_base = r'%sapi/%s/' % (web_base, api_key)
Env.set('api_base', api_base)
# Basic config
host = Env.setting('host', default = '0.0.0.0')
host6 = Env.setting('host6', default = '::')
config = {
'use_reloader': reloader,
'port': tryInt(Env.setting('port', default = 5050)),
'host': host if host and len(host) > 0 else '0.0.0.0',
'host6': host6 if host6 and len(host6) > 0 else '::',
'ssl_cert': Env.setting('ssl_cert', default = None),
'ssl_key': Env.setting('ssl_key', default = None),
}
# Load the app
application = Application(
[],
log_function = lambda x: None,
debug = config['use_reloader'],
gzip = True,
cookie_secret = api_key,
login_url = '%slogin/' % web_base,
)
Env.set('app', application)
# Request handlers
application.add_handlers(".*$", [
(r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler),
# API handlers
(r'%s(.*)(/?)' % api_base, ApiHandler), # Main API handler
(r'%sgetkey(/?)' % web_base, KeyHandler), # Get API key
(r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}), # API docs
# Login handlers
(r'%slogin(/?)' % web_base, LoginHandler),
(r'%slogout(/?)' % web_base, LogoutHandler),
# Catch all webhandlers
(r'%s(.*)(/?)' % web_base, WebHandler),
(r'(.*)', WebHandler),
])
# Static paths
static_path = '%sstatic/' % web_base
for dir_name in ['fonts', 'images', 'scripts', 'style']:
application.add_handlers(".*$", [
('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': sp(os.path.join(base_path, 'couchpotato', 'static', dir_name))})
])
Env.set('static_path', static_path)
# Load configs & plugins
loader = Env.get('loader')
loader.preload(root = sp(base_path))
loader.run()
# Fill database with needed stuff
fireEvent('database.setup')
if not db_exists:
fireEvent('app.initialize', in_order = True)
fireEvent('app.migrate')
# Go go go!
from tornado.ioloop import IOLoop
from tornado.autoreload import add_reload_hook
loop = IOLoop.current()
# Reload hook
def reload_hook():
fireEvent('app.shutdown')
add_reload_hook(reload_hook)
# Some logging and fire load event
try: log.info('Starting server on port %(port)s', config)
except: pass
fireEventAsync('app.load')
ssl_options = None
if config['ssl_cert'] and config['ssl_key']:
ssl_options = {
'certfile': config['ssl_cert'],
'keyfile': config['ssl_key'],
}
server = HTTPServer(application, no_keep_alive = True, ssl_options = ssl_options)
try_restart = True
restart_tries = 5
while try_restart:
try:
if config['host'].startswith('unix:'):
server.add_socket(bind_unix_socket(config['host'][5:]))
else:
server.listen(config['port'], config['host'])
if Env.setting('ipv6', default = False):
try: server.listen(config['port'], config['host6'])
except: log.info2('Tried to bind to IPV6 but failed')
loop.start()
server.close_all_connections()
server.stop()
loop.close(all_fds = True)
except Exception as e:
log.error('Failed starting: %s', traceback.format_exc())
try:
nr, msg = e
if nr == 48:
log.info('Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds', (config.get('port'), restart_tries))
time.sleep(1)
restart_tries -= 1
if restart_tries > 0:
continue
else:
return
except ValueError:
return
except:
pass
raise
try_restart = False
0
Example 63
Project: yunohost Source File: service.py
def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
list_pending=False):
"""
Regenerate the configuration file(s) for a service
Keyword argument:
names -- Services name to regenerate configuration of
with_diff -- Show differences in case of configuration changes
force -- Override all manual modifications in configuration files
dry_run -- Show what would have been regenerated
list_pending -- List pending configuration files and exit
"""
result = {}
# Return the list of pending conf
if list_pending:
pending_conf = _get_pending_conf(names)
if with_diff:
for service, conf_files in pending_conf.items():
for system_path, pending_path in conf_files.items():
pending_conf[service][system_path] = {
'pending_conf': pending_path,
'diff': _get_files_diff(
system_path, pending_path, True),
}
return pending_conf
# Clean pending conf directory
if os.path.isdir(pending_conf_dir):
if not names:
shutil.rmtree(pending_conf_dir, ignore_errors=True)
else:
for name in names:
shutil.rmtree(os.path.join(pending_conf_dir, name),
ignore_errors=True)
else:
filesystem.mkdir(pending_conf_dir, 0755, True)
# Format common hooks arguments
common_args = [1 if force else 0, 1 if dry_run else 0]
# Execute hooks for pre-regen
pre_args = ['pre',] + common_args
def _pre_call(name, priority, path, args):
# create the pending conf directory for the service
service_pending_path = os.path.join(pending_conf_dir, name)
filesystem.mkdir(service_pending_path, 0755, True, uid='admin')
# return the arguments to pass to the script
return pre_args + [service_pending_path,]
pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call)
# Update the services name
names = pre_result['succeed'].keys()
if not names:
raise MoulinetteError(errno.EIO,
m18n.n('service_regenconf_failed',
services=', '.join(pre_result['failed'])))
# Set the processing method
_regen = _process_regen_conf if not dry_run else lambda *a, **k: True
# Iterate over services and process pending conf
for service, conf_files in _get_pending_conf(names).items():
logger.info(m18n.n(
'service_regenconf_pending_applying' if not dry_run else \
'service_regenconf_dry_pending_applying',
service=service))
conf_hashes = _get_conf_hashes(service)
succeed_regen = {}
failed_regen = {}
for system_path, pending_path in conf_files.items():
logger.debug("processing pending conf '%s' to system conf '%s'",
pending_path, system_path)
conf_status = None
regenerated = False
# Get the diff between files
conf_diff = _get_files_diff(
system_path, pending_path, True) if with_diff else None
# Check if the conf must be removed
to_remove = True if os.path.getsize(pending_path) == 0 else False
# Retrieve and calculate hashes
system_hash = _calculate_hash(system_path)
saved_hash = conf_hashes.get(system_path, None)
new_hash = None if to_remove else _calculate_hash(pending_path)
# -> system conf does not exists
if not system_hash:
if to_remove:
logger.debug("> system conf is already removed")
os.remove(pending_path)
continue
if not saved_hash or force:
if force:
logger.debug("> system conf has been manually removed")
conf_status = 'force-created'
else:
logger.debug("> system conf does not exist yet")
conf_status = 'created'
regenerated = _regen(
system_path, pending_path, save=False)
else:
logger.warning(m18n.n(
'service_conf_file_manually_removed',
conf=system_path))
conf_status = 'removed'
# -> system conf is not managed yet
elif not saved_hash:
logger.debug("> system conf is not managed yet")
if system_hash == new_hash:
logger.debug("> no changes to system conf has been made")
conf_status = 'managed'
regenerated = True
elif force and to_remove:
regenerated = _regen(system_path)
conf_status = 'force-removed'
elif force:
regenerated = _regen(system_path, pending_path)
conf_status = 'force-updated'
else:
logger.warning(m18n.n('service_conf_file_not_managed',
conf=system_path))
conf_status = 'unmanaged'
# -> system conf has not been manually modified
elif system_hash == saved_hash:
if to_remove:
regenerated = _regen(system_path)
conf_status = 'removed'
elif system_hash != new_hash:
regenerated = _regen(system_path, pending_path)
conf_status = 'updated'
else:
logger.debug("> system conf is already up-to-date")
os.remove(pending_path)
continue
else:
logger.debug("> system conf has been manually modified")
if system_hash == new_hash:
logger.debug("> new conf is as current system conf")
conf_status = 'managed'
regenerated = True
elif force:
regenerated = _regen(system_path, pending_path)
conf_status = 'force-updated'
else:
logger.warning(m18n.n(
'service_conf_file_manually_modified',
conf=system_path))
conf_status = 'modified'
# Store the result
conf_result = {'status': conf_status}
if conf_diff is not None:
conf_result['diff'] = conf_diff
if regenerated:
succeed_regen[system_path] = conf_result
conf_hashes[system_path] = new_hash
if os.path.isfile(pending_path):
os.remove(pending_path)
else:
failed_regen[system_path] = conf_result
# Check for service conf changes
if not succeed_regen and not failed_regen:
logger.info(m18n.n('service_conf_up_to_date', service=service))
continue
elif not failed_regen:
logger.success(m18n.n(
'service_conf_updated' if not dry_run else \
'service_conf_would_be_updated',
service=service))
if succeed_regen and not dry_run:
_update_conf_hashes(service, conf_hashes)
# Append the service results
result[service] = {
'applied': succeed_regen,
'pending': failed_regen
}
# Return in case of dry run
if dry_run:
return result
# Execute hooks for post-regen
post_args = ['post',] + common_args
def _pre_call(name, priority, path, args):
# append coma-separated applied changes for the service
if name in result and result[name]['applied']:
regen_conf_files = ','.join(result[name]['applied'].keys())
else:
regen_conf_files = ''
return post_args + [regen_conf_files,]
hook_callback('conf_regen', names, pre_callback=_pre_call)
return result
0
Example 64
Project: Bluto Source File: output.py
def action_output_wild_false(brute_results_dict, sub_intrest, google_results, bing_true_results, linkedin_results, check_count, domain, time_spent_email, time_spent_brute, time_spent_total, report_location, company, data_mine):
info('Output action_output_wild_false: Start')
linkedin_evidence_results = []
email_evidence_results = []
email_results = []
email_seen = []
url_seen = []
person_seen = []
final_emails = []
for email, url in google_results:
try:
e1, e2 = email.split(',')
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(e2).replace(' ',''),url))
email_evidence_results.append((str(e1).replace(' ',''),url))
email_results.append((str(e2).replace(' ','')))
email_results.append((str(e1).replace(' ','')))
except ValueError:
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(email).replace(' ',''),url))
email_results.append(str(email).replace(' ',''))
for e, u in bing_true_results:
email_results.append(e)
if u not in url_seen:
email_evidence_results.append((e, u))
for url, person, description in linkedin_results:
if person not in person_seen:
person_seen.append(person)
linkedin_evidence_results.append((url, person, description))
linkedin_evidence_results.sort(key=lambda tup: tup[1])
sorted_email = set(sorted(email_results))
for email in sorted_email:
if email == '[]':
pass
elif email == '@' + domain:
pass
else:
final_emails.append(email)
email_count = len(final_emails)
staff_count = len(person_seen)
f_emails = sorted(final_emails)
pwned_results = action_pwned(f_emails)
c_accounts = len(pwned_results)
print '\n\nEmail Addresses:\n'
write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine)
if f_emails:
for email in f_emails:
print str(email).replace("u'","").replace("'","").replace('[','').replace(']','')
else:
print '\tNo Data To Be Found'
print '\nCompromised Accounts:\n'
if pwned_results:
sorted_pwned = sorted(pwned_results)
for account in sorted_pwned:
print 'Account: \t{}'.format(account[0])
print 'Domain: \t{}'.format(account[1])
print 'Date: \t{}\n'.format(account[3])
else:
print '\tNo Data To Be Found'
print '\nLinkedIn Results:\n'
sorted_person = sorted(person_seen)
if sorted_person:
for person in sorted_person:
print person
else:
print '\tNo Data To Be Found'
if data_mine is not None:
user_names = data_mine[0]
software_list = data_mine[1]
download_count = data_mine[2]
download_list = data_mine[3]
username_count = len(user_names)
software_count = len(software_list)
print '\nData Found In Docuement MetaData'
print '\nPotential Usernames:\n'
if user_names:
for user in user_names:
print '\t' + colored(user, 'red')
else:
print '\tNo Data To Be Found'
print '\nSoftware And Versions Found:\n'
if software_list:
for software in software_list:
print '\t' + colored(software, 'red')
else:
print '\tNo Data To Be Found'
else:
user_names = []
software_list = []
download_count = 0
username_count = len(user_names)
software_count = len(software_list)
sorted_dict = collections.OrderedDict(sorted(brute_results_dict.items()))
bruted_count = len(sorted_dict)
print "\nBluto Results: \n"
for item in sorted_dict:
if item in sub_intrest:
print colored(item + "\t", 'red'), colored(sorted_dict[item], 'red')
else:
print item + "\t",sorted_dict[item]
time_spent_email_f = str(datetime.timedelta(seconds=(time_spent_email))).split('.')[0]
time_spent_brute_f = str(datetime.timedelta(seconds=(time_spent_brute))).split('.')[0]
time_spent_total_f = str(datetime.timedelta(seconds=(time_spent_total))).split('.')[0]
print '\nHosts Identified: {}' .format(str(bruted_count))
print 'Potential Emails Found: {}' .format(str(email_count))
print 'Potential Staff Members Found: {}' .format(str(staff_count))
print 'Compromised Accounts: {}' .format(str(c_accounts))
print 'Potential Usernames Found: {}'.format(username_count)
print 'Potential Software Found: {}'.format(software_count)
print 'Docuements Downloaded: {}'.format(download_count)
print "Email Enumeration:", time_spent_email_f
print "Requests executed:", str(check_count) + " in ", time_spent_brute_f
print "Total Time:", time_spent_total_f
info('Hosts Identified: {}' .format(str(bruted_count)))
info("Email Enumeration: {}" .format(str(time_spent_email_f)))
info('Compromised Accounts: {}' .format(str(c_accounts)))
info('Potential Staff Members Found: {}' .format(str(staff_count)))
info('Potential Emails Found: {}' .format(str(email_count)))
info('Potential Usernames Found: {}'.format(username_count))
info('Potential Software Found: {}'.format(software_count))
info('Docuements Downloaded: {}'.format(download_count))
info("Total Time:" .format(str(time_spent_total_f)))
info('DNS No Wild Cards + Email Hunter Run completed')
info('Output action_output_wild_false: Completed')
domain_r = domain.split('.')
docs = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
answers = ['no','n','y','yes']
while True:
answer = raw_input("\nWould you like to keep all local data?\n(Local Logs, Downloded Docuements, HTML Evidence Report)\n\nYes|No:").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
domain
print '\nThe docuements are located here: {}'.format(docs)
print 'The logs are located here: {}.'.format(LOG_DIR)
print "\nAn evidence report has been written to {}\n".format(report_location)
while True:
answer = raw_input("Would you like to open this report now? ").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
print '\nOpening {}' .format(report_location)
webbrowser.open('file://' + str(report_location))
break
else:
break
else:
print 'Your answer needs to be either yes|y|no|n rather than, {}' .format(answer)
break
else:
shutil.rmtree(docs)
shutil.rmtree(LOG_DIR)
os.remove(report_location)
break
else:
print '\tYour answer needs to be either yes|y|no|n rather than, {}' .format(answer)
0
Example 65
Project: plugin.video.streamondemand Source File: mct.py
def play(url, xlistitem, is_view=None, subtitle=""):
# -- Necesario para algunas webs ----------------------------
if not url.endswith(".torrent") and not url.startswith("magnet"):
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
# -- Crear dos carpetas en descargas para los archivos ------
save_path_videos = os.path.join( config.get_setting("downloadpath") , "torrent-videos" )
save_path_torrents = os.path.join( config.get_setting("downloadpath") , "torrent-torrents" )
if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents)
# -- Usar - archivo torrent desde web, meagnet o HD ---------
if not os.path.isfile(url) and not url.startswith("magnet"):
# -- http - crear archivo torrent -----------------------
data = url_get(url)
# -- El nombre del torrent será el que contiene en los --
# -- datos. -
re_name = urllib.unquote( scrapertools.get_match(data,':name\d+:(.*?)\d+:') )
#torrent_file = os.path.join(save_path_torrents, re_name+'.torrent')
torrent_file = filetools.join(save_path_torrents, unicode(re_name, "'utf-8'", errors="replace")+'.torrent')
f = open(torrent_file,'wb')
f.write(data)
f.close()
elif os.path.isfile(url):
# -- file - para usar torrens desde el HD ---------------
torrent_file = url
else:
# -- magnet ---------------------------------------------
torrent_file = url
# -----------------------------------------------------------
# -- MCT - MiniClienteTorrent -------------------------------
ses = lt.session()
print "### Init session ########"
print lt.version
print "#########################"
ses.add_dht_router("router.bittorrent.com",6881)
ses.add_dht_router("router.utorrent.com",6881)
ses.add_dht_router("router.bitcomet.com",554)
ses.add_dht_router("dht.transmissionbt.com",6881)
trackers = [
"http://exodus.desync.com:6969/announce",
"udp://tracker.publicbt.com:80/announce",
"udp://tracker.openbittorrent.com:80/announce",
"http://tracker.torrentbay.to:6969/announce",
"http://fr33dom.h33t.com:3310/announce",
"http://tracker.pow7.com/announce",
"udp://tracker.ccc.de:80/announce",
"http://tracker.bittorrent.am:80/announce",
"http://denis.stalker.h3q.com:6969/announce",
"udp://tracker.prq.to:80/announce",
"udp://tracker.istole.it:80/announce",
"udp://open.demonii.com:1337",
"http://9.rarbg.com:2710/announce",
"http://announce.torrentsmd.com:6969/announce",
"http://bt.careland.com.cn:6969/announce",
"http://explodie.org:6969/announce",
"http://mgtracker.org:2710/announce",
"http://tracker.best-torrents.net:6969/announce",
"http://tracker.tfile.me/announce",
"http://tracker.torrenty.org:6969/announce",
"http://tracker1.wasabii.com.tw:6969/announce",
"udp://9.rarbg.com:2710/announce",
"udp://9.rarbg.me:2710/announce",
"udp://coppersurfer.tk:6969/announce",
"udp://tracker.btzoo.eu:80/announce",
"http://www.spanishtracker.com:2710/announce",
"http://www.todotorrents.com:2710/announce",
]
video_file = ""
# -- magnet2torrent -----------------------------------------
if torrent_file.startswith("magnet"):
tempdir = tempfile.mkdtemp()
params = {
'save_path': tempdir,
'trackers':trackers,
'storage_mode': lt.storage_mode_t.storage_mode_allocate,
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
h = lt.add_magnet_uri(ses, torrent_file, params)
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
while not h.has_metadata():
message, porcent, msg_file, s, download = getProgress(h, "Creating torrent from magnet")
dp.update(porcent, message, msg_file)
if s.state == 1: download = 1
if dp.iscanceled():
dp.close()
remove_files( download, torrent_file, video_file, ses, h )
return
dp.close()
info = h.get_torrent_info()
data = lt.bencode( lt.create_torrent(info).generate() )
#torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
torrent_file = os.path.join(save_path_torrents, unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
f = open(torrent_file,'wb')
f.write(data)
f.close()
ses.remove_torrent(h)
shutil.rmtree(tempdir)
# -----------------------------------------------------------
# -- Archivos torrent ---------------------------------------
e = lt.bdecode(open(torrent_file, 'rb').read())
info = lt.torrent_info(e)
# -- El más gordo o uno de los más gordo se entiende que es -
# -- el vídeo o es el vídeo que se usará como referencia -
# -- para el tipo de archivo -
print "##### Archivos ## %s ##" % len(info.files())
_index_file, _video_file, _size_file = get_video_file(info)
_video_file_ext = os.path.splitext( _video_file )[1]
if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
print "##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } )
else:
print "##### storage_mode: none ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } )
# -----------------------------------------------------------
# -- Descarga secuencial - trozo 1, trozo 2, ... ------------
h.set_sequential_download(True)
h.force_reannounce()
h.force_dht_announce()
# -- Prioritarizar/Seleccionar archivo-----------------------
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1:
_index = _index_file
video_file = _video_file
video_size = _size_file
# -- Inicio de variables para 'pause' automático cuando el -
# -- el vídeo se acerca a una pieza sin completar -
is_greater_num_pieces = False
is_greater_num_pieces_plus = False
is_greater_num_pieces_pause = False
#porcent4first_pieces = int( video_size / 1073741824 )
porcent4first_pieces = int( video_size * 0.000000005 )
if porcent4first_pieces < 10: porcent4first_pieces = 10
if porcent4first_pieces > 100: porcent4first_pieces = 100
#num_pieces_to_resume = int( video_size / 1610612736 )
num_pieces_to_resume = int( video_size * 0.0000000025 )
if num_pieces_to_resume < 5: num_pieces_to_resume = 5
if num_pieces_to_resume > 25: num_pieces_to_resume = 25
print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume
# -- Prioritarizar o seleccionar las piezas del archivo que -
# -- se desea reproducir con 'file_priorities' -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
# -- Crear diálogo de progreso para el primer bucle ---------
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
_pieces_info = {}
# -- Doble bucle anidado ------------------------------------
# -- Descarga - Primer bucle -
while not h.is_seed():
s = h.status()
xbmc.sleep(100)
# -- Recuperar los datos del progreso -------------------
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
# -- Si hace 'checking' existe descarga -----------------
# -- 'download' Se usará para saber si hay datos -
# -- descargados para el diálogo de 'remove_files' -
if s.state == 1: download = 1
# -- Player - play --------------------------------------
# -- Comprobar si se han completado las piezas para el -
# -- inicio del vídeo ............... -
first_pieces = True
_p = ""
_c = 0
for i in range( piece_set[0], piece_set[porcent4first_pieces] ):
_p+= "[%s:%s]" % ( i, h.have_piece(i) )
first_pieces&= h.have_piece(i)
if h.have_piece(i): _c+= 1
_pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c,porcent4first_pieces), 'have': h.status().num_pieces, 'len': len(piece_set)}
_p = "##### first_pieces [%s/%s][%s]: " % ( _c, porcent4first_pieces, len(piece_set) ) + _p
print _p
# -- -------------------------------------------------- -
if is_view != "Ok" and first_pieces:
print "##### porcent [%.2f%%]" % (s.progress * 100)
is_view = "Ok"
dp.close()
# -- Player - Ver el vídeo --------------------------
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
#ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
ren_video_file = os.path.join( save_path_videos, video_file )
playlist.add( ren_video_file, xlistitem )
#playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
#playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
player = play_video( xbmc.PLAYER_CORE_AUTO )
player.play(playlist)
'''
# -- Player - Ver el vídeo --------------------------
player = play_video()
#player.play( os.path.join( save_path_videos, video_file ) )
player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
'''
#player.play( os.path.join( save_path_videos, video_file ) )
# -- Contador de cancelaciones para la ventana de -
# -- 'pause' automático -
is_greater_num_pieces_canceled = 0
continuous_pieces = 0
porcent_time = 0.00
current_piece = 0
# -- Impedir que kodi haga 'resume' a un archivo ----
# -- que se reprodució con anterioridad y que se -
# -- eliminó para impedir que intente la reprucción -
# -- en una pieza que aún no se ha completado y se -
# -- active 'pause' automático -
not_resume = True
# -- Bandera subTítulos
_sub = False
# -- Segundo bucle - Player - Control de eventos ----
while player.isPlaying():
xbmc.sleep(100)
# -- Añadir subTítulos
if subtitle!="" and not _sub:
_sub = True
player.setSubtitles(subtitle)
# -- Impedir que kodi haga 'resume' al inicio ---
# -- de la descarga de un archivo conocido -
if not_resume:
player.seekTime(0)
not_resume = False
#xbmc.sleep(1000)
# -- Control 'pause' automático -
continuous_pieces = count_completed_continuous_pieces(h, piece_set)
if xbmc.Player().isPlaying():
# -- Porcentage del progreso del vídeo ------
porcent_time = player.getTime() / player.getTotalTime() * 100
# -- Pieza que se está reproduciendo --------
current_piece = int( porcent_time / 100 * len(piece_set) )
# -- Banderas de control --------------------
is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume)
is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces)
is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set))
# -- Activa 'pause' automático --------------
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
is_greater_num_pieces_pause = True
player.pause()
# -- Log ------------------------------------
_TotalTime = player.getTotalTime()
_Time = player.getTime()
_print_log = "\n##### Player ##################################"
_print_log+= "\nTamaño del vídeo: %s" % video_size
_print_log+= "\nTotal piezas: %s" % len(piece_set)
_print_log+= "\nPiezas contiguas: %s" % continuous_pieces
_print_log+= "\n-----------------------------------------------"
_print_log+= "\nVídeo-Total segundos: %s" % _TotalTime
_print_log+= "\nVídeo-Progreso segundos: %s" % _Time
_print_log+= "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
_print_log+= "\n-----------------------------------------------"
_print_log+= "\ncurrent_piece: %s" % current_piece
_print_log+= "\nis_greater_num_pieces: %s" % is_greater_num_pieces
_print_log+= "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
_print_log+= "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
_print_log+= "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
_print_log+= "\nPieza que se está visionando: %.2f" % ( porcent_time / 100 * len(piece_set) )
_print_log+= "\nOffset que se está visionando: %.2f" % ( porcent_time / 100 * video_size )
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
_print_log+= "\n+++++++++++++++++++++++++++++++++++++++++++++++"
_print_log+= "\nPausa con:"
_print_log+= "\n current_piece = %s" % current_piece
_print_log+= "\n continuous_pieces = %s" % continuous_pieces
_print_log+= "\n###############################################"
print _print_log
# -------------------------------------------
_pieces_info = {'current': current_piece, 'continuous': continuous_pieces, 'have': h.status().num_pieces, 'len': len(piece_set)}
# -- Cerrar el diálogo de progreso --------------
if player.resumed:
dp.close()
# -- Mostrar el diálogo de progreso -------------
if player.paused:
# -- Crear diálogo si no existe -------------
if not player.statusDialogoProgress:
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
player.setDialogoProgress()
# -- Diálogos de estado en el visionado -----
if not h.is_seed():
# -- Recuperar los datos del progreso ---
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
dp.update(porcent, message, msg_file)
else:
dp.update(100, "Download completo: " + video_file)
# -- Se canceló el progreso en el visionado -
# -- Continuar -
if dp.iscanceled():
dp.close()
player.pause()
# -- Se canceló el progreso en el visionado -
# -- en la ventana de 'pause' automático. -
# -- Parar si el contador llega a 3 -
if dp.iscanceled() and is_greater_num_pieces_pause:
is_greater_num_pieces_canceled+= 1
if is_greater_num_pieces_canceled == 3:
player.stop()
# -- Desactiva 'pause' automático y ---------
# -- reinicia el contador de cancelaciones -
if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
dp.close()
player.pause()
is_greater_num_pieces_pause = False
is_greater_num_pieces_canceled = 0
# -- El usuario cancelo el visionado --------
# -- Terminar -
if player.ended:
# -- Diálogo eliminar archivos ----------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Kodi - Se cerró el visionado -----------------------
# -- Continuar | Terminar -
if is_view == "Ok" and not xbmc.Player().isPlaying():
if info.num_files() == 1:
# -- Diálogo continuar o terminar ---------------
d = xbmcgui.Dialog()
ok = d.yesno('streamondemand-MCT', 'Kodi ha chiuso il video.', 'Continuare con la sessione?')
else: ok = False
# -- SI ---------------------------------------------
if ok:
# -- Continuar: ---------------------------------
is_view=None
else:
# -- Terminar: ----------------------------------
# -- Comprobar si el vídeo pertenece a una ------
# -- lista de archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos --------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
# -- Mostar progeso antes del visionado -----------------
if is_view != "Ok" :
dp.update(porcent, message, msg_file)
# -- Se canceló el progreso antes del visionado ---------
# -- Terminar -
if dp.iscanceled():
dp.close()
# -- Comprobar si el vídeo pertenece a una lista de -
# -- archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos ------------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -----
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
# -- Kodi - Error? - No debería llegar aquí -----------------
if is_view == "Ok" and not xbmc.Player().isPlaying():
dp.close()
# -- Diálogo eliminar archivos --------------------------
remove_files( download, torrent_file, video_file, ses, h )
return
0
Example 66
def execute(**kargs):
app = kargs.get("app")
args = kargs.get("args")
play_env = kargs.get("env")
is_application = os.path.exists(os.path.join(app.path, 'conf', 'application.conf'))
if is_application:
app.check()
app.check_jpda()
modules = app.modules()
classpath = app.getClasspath()
# determine the name of the project
# if this is an application, the name of the project is in the application.conf file
# if this is a module, we infer the name from the path
application_name = app.readConf('application.name')
vm_arguments = app.readConf('jvm.memory')
# JDK 7 compat
vm_arguments = vm_arguments +' -XX:-UseSplitVerifier'
if application_name:
application_name = application_name.replace("/", " ")
else:
application_name = os.path.basename(app.path)
dotProject = os.path.join(app.path, '.project')
dotClasspath = os.path.join(app.path, '.classpath')
dotSettings = os.path.join(app.path, '.settings')
eclipse = os.path.join(app.path, 'eclipse')
if os.path.exists(eclipse):
shutil.rmtree(eclipse)
if os.name == 'nt':
time.sleep(1)
if os.path.exists(dotSettings):
shutil.rmtree(dotSettings)
if os.name == 'nt':
time.sleep(1)
shutil.copyfile(os.path.join(play_env["basedir"], 'resources/eclipse/.project'), dotProject)
shutil.copyfile(os.path.join(play_env["basedir"], 'resources/eclipse/.classpath'), dotClasspath)
if is_application:
shutil.copytree(os.path.join(play_env["basedir"], 'resources/eclipse'), eclipse)
shutil.copytree(os.path.join(play_env["basedir"], 'resources/eclipse/.settings'), dotSettings)
replaceAll(dotProject, r'%PROJECT_NAME%', application_name)
playJarPath = os.path.join(play_env["basedir"], 'framework', 'play-%s.jar' % play_env['version'])
playSourcePath = os.path.join(os.path.dirname(playJarPath), 'src')
if os.name == 'nt':
playSourcePath=playSourcePath.replace('\\','/').capitalize()
cpJarToSource = {}
lib_src = os.path.join(app.path, 'tmp/lib-src')
for el in classpath:
# library sources jars in the lib directory
if os.path.basename(el) != "conf" and el.endswith('-sources.jar'):
cpJarToSource[el.replace('-sources', '')] = el
# pointers to source jars produced by 'play deps'
src_file = os.path.join(lib_src, os.path.basename(el) + '.src')
if os.path.exists(src_file):
f = file(src_file)
cpJarToSource[el] = f.readline().rstrip()
f.close()
javadocLocation = {}
for el in classpath:
urlFile = el.replace(r'.jar','.docurl')
if os.path.basename(el) != "conf" and os.path.exists(urlFile):
javadocLocation[el] = urlFile
cpXML = ""
for el in classpath:
if os.path.basename(el) != "conf":
if el == playJarPath:
cpXML += '<classpathentry kind="lib" path="%s" sourcepath="%s" />\n\t' % (os.path.normpath(el) , playSourcePath)
else:
if cpJarToSource.has_key(el):
cpXML += '<classpathentry kind="lib" path="%s" sourcepath="%s"/>\n\t' % (os.path.normpath(el), cpJarToSource[el])
else:
if javadocLocation.has_key(el):
cpXML += '<classpathentry kind="lib" path="%s">\n\t\t' % os.path.normpath(el)
cpXML += '<attributes>\n\t\t\t'
f = file(javadocLocation[el])
url = f.readline()
f.close()
cpXML += '<attribute name="javadoc_location" value="%s"/>\n\t\t' % (url.strip())
cpXML += '</attributes>\n\t'
cpXML += '</classpathentry>\n\t'
else:
cpXML += '<classpathentry kind="lib" path="%s"/>\n\t' % os.path.normpath(el)
if not is_application:
cpXML += '<classpathentry kind="src" path="src"/>'
replaceAll(dotClasspath, r'%PROJECTCLASSPATH%', cpXML)
# generate source path for test folder if one exists
cpTEST = ""
if os.path.exists(os.path.join(app.path, 'test')):
cpTEST += '<classpathentry kind="src" path="test"/>'
replaceAll(dotClasspath, r'%TESTCLASSPATH%', cpTEST)
if len(modules):
lXML = ""
cXML = ""
for module in modules:
lXML += '<link><name>%s</name><type>2</type><location>%s</location></link>\n' % (os.path.basename(module), os.path.join(module, 'app').replace('\\', '/'))
if os.path.exists(os.path.join(module, "conf")):
lXML += '<link><name>conf/%s</name><type>2</type><location>%s/conf</location></link>\n' % (os.path.basename(module), module.replace('\\', '/'))
if os.path.exists(os.path.join(module, "public")):
lXML += '<link><name>public/%s</name><type>2</type><location>%s/public</location></link>\n' % (os.path.basename(module), module.replace('\\', '/'))
cXML += '<classpathentry kind="src" path="%s"/>\n\t' % (os.path.basename(module))
replaceAll(dotProject, r'%LINKS%', '<linkedResources>%s</linkedResources>' % lXML)
replaceAll(dotClasspath, r'%MODULES%', cXML)
else:
replaceAll(dotProject, r'%LINKS%', '')
replaceAll(dotClasspath, r'%MODULES%', '')
if is_application:
replaceAll(os.path.join(app.path, 'eclipse/debug.launch'), r'%PROJECT_NAME%', application_name)
replaceAll(os.path.join(app.path, 'eclipse/debug.launch'), r'%PLAY_BASE%', play_env["basedir"])
replaceAll(os.path.join(app.path, 'eclipse/debug.launch'), r'%PLAY_ID%', play_env["id"])
replaceAll(os.path.join(app.path, 'eclipse/debug.launch'), r'%JPDA_PORT%', str(app.jpda_port))
replaceAll(os.path.join(app.path, 'eclipse/debug.launch'), r'%PLAY_VERSION%', play_env["version"])
replaceAll(os.path.join(app.path, 'eclipse/debug.launch'), r'%VM_ARGUMENTS%', vm_arguments)
replaceAll(os.path.join(app.path, 'eclipse/test.launch'), r'%PROJECT_NAME%', application_name)
replaceAll(os.path.join(app.path, 'eclipse/test.launch'), r'%PLAY_BASE%', play_env["basedir"])
replaceAll(os.path.join(app.path, 'eclipse/test.launch'), r'%PLAY_ID%', play_env["id"])
replaceAll(os.path.join(app.path, 'eclipse/test.launch'), r'%JPDA_PORT%', str(app.jpda_port))
replaceAll(os.path.join(app.path, 'eclipse/test.launch'), r'%PLAY_VERSION%', play_env["version"])
replaceAll(os.path.join(app.path, 'eclipse/test.launch'), r'%VM_ARGUMENTS%', vm_arguments)
replaceAll(os.path.join(app.path, 'eclipse/connect.launch'), r'%PROJECT_NAME%', application_name)
replaceAll(os.path.join(app.path, 'eclipse/connect.launch'), r'%JPDA_PORT%', str(app.jpda_port))
os.rename(os.path.join(app.path, 'eclipse/connect.launch'), os.path.join(app.path, 'eclipse/Connect JPDA to %s.launch' % application_name))
os.rename(os.path.join(app.path, 'eclipse/test.launch'), os.path.join(app.path, 'eclipse/Test %s.launch' % application_name))
os.rename(os.path.join(app.path, 'eclipse/debug.launch'), os.path.join(app.path, 'eclipse/%s.launch' % application_name))
if is_application:
print "~ OK, the application \"%s\" is ready for eclipse" % application_name
else:
print "~ OK, the module \"%s\" is ready for eclipse" % application_name
print "~ Use File/Import/General/Existing project to import %s into eclipse" % os.path.normpath(app.path)
print "~"
print "~ Use eclipsify again when you want to update eclipse configuration files."
print "~ However, it's often better to delete and re-import the project into your workspace since eclipse keeps dirty caches..."
print "~"
0
Example 67
Project: cgat Source File: bam2wiggle.py
def main(argv=None):
"""script main.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-o", "--output-format", dest="output_format",
type="choice",
choices=(
"bedgraph", "wiggle", "bigbed",
"bigwig", "bed"),
help="output format [default=%default]")
parser.add_option("-s", "--shift-size", dest="shift", type="int",
help="shift reads by a certain amount (ChIP-Seq) "
"[%default]")
parser.add_option("-e", "--extend", dest="extend", type="int",
help="extend reads by a certain amount "
"(ChIP-Seq) [%default]")
parser.add_option("-p", "--wiggle-span", dest="span", type="int",
help="span of a window in wiggle tracks "
"[%default]")
parser.add_option("-m", "--merge-pairs", dest="merge_pairs",
action="store_true",
help="merge paired-ended reads into a single "
"bed interval [default=%default].")
parser.add_option("--scale-base", dest="scale_base", type="float",
help="number of reads/pairs to scale bigwig file to. "
"The default is to scale to 1M reads "
"[default=%default]")
parser.add_option("--scale-method", dest="scale_method", type="choice",
choices=("none", "reads",),
help="scale bigwig output. 'reads' will normalize by "
"the total number reads in the bam file that are used "
"to construct the bigwig file. If --merge-pairs is used "
"the number of pairs output will be used for "
"normalization. 'none' will not scale the bigwig file"
"[default=%default]")
parser.add_option("--max-insert-size", dest="max_insert_size",
type="int",
help="only merge if insert size less that "
"# bases. 0 turns of this filter "
"[default=%default].")
parser.add_option("--min-insert-size", dest="min_insert_size",
type="int",
help="only merge paired-end reads if they are "
"at least # bases apart. "
"0 turns of this filter. [default=%default]")
parser.set_defaults(
samfile=None,
output_format="wiggle",
shift=0,
extend=0,
span=1,
merge_pairs=None,
min_insert_size=0,
max_insert_size=0,
scale_method='none',
scale_base=1000000,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv, add_output_options=True)
if len(args) >= 1:
options.samfile = args[0]
if len(args) == 2:
options.output_filename_pattern = args[1]
if not options.samfile:
raise ValueError("please provide a bam file")
# Read BAM file using Pysam
samfile = pysam.Samfile(options.samfile, "rb")
# Create temporary files / folders
tmpdir = tempfile.mkdtemp()
E.debug("temporary files are in %s" % tmpdir)
tmpfile_wig = os.path.join(tmpdir, "wig")
tmpfile_sizes = os.path.join(tmpdir, "sizes")
# Create dictionary of contig sizes
contig_sizes = dict(list(zip(samfile.references, samfile.lengths)))
# write contig sizes
outfile_size = IOTools.openFile(tmpfile_sizes, "w")
for contig, size in sorted(contig_sizes.items()):
outfile_size.write("%s\t%s\n" % (contig, size))
outfile_size.close()
# Shift and extend only available for bigwig format
if options.shift or options.extend:
if options.output_format != "bigwig":
raise ValueError(
"shift and extend only available for bigwig output")
# Output filename required for bigwig / bigbed computation
if options.output_format == "bigwig":
if not options.output_filename_pattern:
raise ValueError(
"please specify an output file for bigwig computation.")
# Define executable to use for binary conversion
if options.output_format == "bigwig":
executable_name = "wigToBigWig"
else:
raise ValueError("unknown output format `%s`" %
options.output_format)
# check required executable file is in the path
executable = IOTools.which(executable_name)
if not executable:
raise OSError("could not find %s in path." % executable_name)
# Open outout file
outfile = IOTools.openFile(tmpfile_wig, "w")
E.info("starting output to %s" % tmpfile_wig)
else:
outfile = IOTools.openFile(tmpfile_wig, "w")
E.info("starting output to stdout")
# Set up output write functions
if options.output_format in ("wiggle", "bigwig"):
# wiggle is one-based, so add 1, also step-size is 1, so need
# to output all bases
if options.span == 1:
outf = lambda outfile, contig, start, end, val: \
outfile.write(
"".join(["%i\t%i\n" % (x, val)
for x in range(start + 1, end + 1)]))
else:
outf = SpanWriter(options.span)
elif options.output_format == "bedgraph":
# bed is 0-based, open-closed
outf = lambda outfile, contig, start, end, val: \
outfile.write("%s\t%i\t%i\t%i\n" % (contig, start, end, val))
# initialise counters
ninput, nskipped, ncontigs = 0, 0, 0
# set output file name
output_filename_pattern = options.output_filename_pattern
if output_filename_pattern:
output_filename = os.path.abspath(output_filename_pattern)
# shift and extend or merge pairs. Output temporay bed file
if options.shift > 0 or options.extend > 0 or options.merge_pairs:
# Workflow 1: convert to bed intervals and use bedtools
# genomecov to build a coverage file.
# Convert to bigwig with UCSC tools bedGraph2BigWig
if options.merge_pairs:
# merge pairs using bam2bed
E.info("merging pairs to temporary file")
counter = _bam2bed.merge_pairs(
samfile,
outfile,
min_insert_size=options.min_insert_size,
max_insert_size=options.max_insert_size,
bed_format=3)
E.info("merging results: {}".format(counter))
if counter.output == 0:
raise ValueError("no pairs output after merging")
else:
# create bed file with shifted/extended tags
shift, extend = options.shift, options.extend
shift_extend = shift + extend
counter = E.Counter()
for contig in samfile.references:
E.debug("output for %s" % contig)
lcontig = contig_sizes[contig]
for read in samfile.fetch(contig):
pos = read.pos
if read.is_reverse:
start = max(0, read.pos + read.alen - shift_extend)
else:
start = max(0, read.pos + shift)
# intervals extending beyond contig are removed
if start >= lcontig:
continue
end = min(lcontig, start + extend)
outfile.write("%s\t%i\t%i\n" % (contig, start, end))
counter.output += 1
outfile.close()
if options.scale_method == "reads":
scale_factor = float(options.scale_base) / counter.output
E.info("scaling: method=%s scale_quantity=%i scale_factor=%f" %
(options.scale_method,
counter.output,
scale_factor))
scale = "-scale %f" % scale_factor
else:
scale = ""
# Convert bed file to coverage file (bedgraph)
tmpfile_bed = os.path.join(tmpdir, "bed")
E.info("computing coverage")
# calculate coverage - format is bedgraph
statement = """bedtools genomecov -bg -i %(tmpfile_wig)s %(scale)s
-g %(tmpfile_sizes)s > %(tmpfile_bed)s""" % locals()
E.run(statement)
# Convert bedgraph to bigwig
E.info("converting to bigwig")
tmpfile_sorted = os.path.join(tmpdir, "sorted")
statement = ("sort -k 1,1 -k2,2n %(tmpfile_bed)s > %(tmpfile_sorted)s;"
"bedGraphToBigWig %(tmpfile_sorted)s %(tmpfile_sizes)s "
"%(output_filename_pattern)s" % locals())
E.run(statement)
else:
# Workflow 2: use pysam column iterator to build a
# wig file. Then convert to bigwig of bedgraph file
# with UCSC tools.
def column_iter(iterator):
start = None
end = 0
n = None
for t in iterator:
if t.pos - end > 1 or n != t.n:
if start is not None:
yield start, end, n
start = t.pos
end = t.pos
n = t.n
end = t.pos
yield start, end, n
if options.scale_method != "none":
raise NotImplementedError(
"scaling not implemented for pileup method")
# Bedgraph track definition
if options.output_format == "bedgraph":
outfile.write("track type=bedGraph\n")
for contig in samfile.references:
# if contig != "chrX": continue
E.debug("output for %s" % contig)
lcontig = contig_sizes[contig]
# Write wiggle header
if options.output_format in ("wiggle", "bigwig"):
outfile.write("variableStep chrom=%s span=%i\n" %
(contig, options.span))
# Generate pileup per contig using pysam and iterate over columns
for start, end, val in column_iter(samfile.pileup(contig)):
# patch: there was a problem with bam files and reads
# overextending at the end. These are usually Ns, but
# need to check as otherwise wigToBigWig fails.
if lcontig <= end:
E.warn("read extending beyond contig: %s: %i > %i" %
(contig, end, lcontig))
end = lcontig
if start >= end:
continue
if val > 0:
outf(outfile, contig, start, end, val)
ncontigs += 1
# Close output file
if type(outf) == type(SpanWriter):
outf.flush(outfile)
else:
outfile.flush()
E.info("finished output")
# Report counters
E.info("ninput=%i, ncontigs=%i, nskipped=%i" %
(ninput, ncontigs, nskipped))
# Convert to binary formats
if options.output_format == "bigwig":
outfile.close()
E.info("starting %s conversion" % executable)
try:
retcode = subprocess.call(
" ".join((executable,
tmpfile_wig,
tmpfile_sizes,
output_filename_pattern)),
shell=True)
if retcode != 0:
E.warn("%s terminated with signal: %i" %
(executable, -retcode))
return -retcode
except OSError as msg:
E.warn("Error while executing bigwig: %s" % msg)
return 1
E.info("finished bigwig conversion")
else:
with open(tmpfile_wig) as inf:
sys.stdout.write(inf.read())
# Cleanup temp files
shutil.rmtree(tmpdir)
E.Stop()
0
Example 68
Project: edx-platform Source File: import_export.py
def _import_handler(request, courselike_key, root_name, successful_url, context_name, courselike_module, import_func):
"""
Parameterized function containing the meat of import_handler.
"""
if not has_course_author_access(request.user, courselike_key):
raise PermissionDenied()
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
# Do everything in a try-except block to make sure everything is properly cleaned up.
try:
data_root = path(settings.GITHUB_REPO_ROOT)
subdir = base64.urlsafe_b64encode(repr(courselike_key))
course_dir = data_root / subdir
filename = request.FILES['course-data'].name
# Use sessions to keep info about import progress
session_status = request.session.setdefault("import_status", {})
courselike_string = unicode(courselike_key) + filename
_save_request_status(request, courselike_string, 0)
# If the course has an entrance exam then remove it and its corresponding milestone.
# current course state before import.
if root_name == COURSE_ROOT:
if courselike_module.entrance_exam_enabled:
remove_entrance_exam_milestone_reference(request, courselike_key)
log.info(
"entrance exam milestone content reference for course %s has been removed",
courselike_module.id
)
if not filename.endswith('.tar.gz'):
_save_request_status(request, courselike_string, -1)
return JsonResponse(
{
'ErrMsg': _('We only support uploading a .tar.gz file.'),
'Stage': -1
},
status=415
)
temp_filepath = course_dir / filename
if not course_dir.isdir():
os.mkdir(course_dir)
logging.debug('importing course to {0}'.format(temp_filepath))
# Get upload chunks byte ranges
try:
matches = CONTENT_RE.search(request.META["HTTP_CONTENT_RANGE"])
content_range = matches.groupdict()
except KeyError: # Single chunk
# no Content-Range header, so make one that will work
content_range = {'start': 0, 'stop': 1, 'end': 2}
# stream out the uploaded files in chunks to disk
if int(content_range['start']) == 0:
mode = "wb+"
else:
mode = "ab+"
size = os.path.getsize(temp_filepath)
# Check to make sure we haven't missed a chunk
# This shouldn't happen, even if different instances are handling
# the same session, but it's always better to catch errors earlier.
if size < int(content_range['start']):
_save_request_status(request, courselike_string, -1)
log.warning(
"Reported range %s does not match size downloaded so far %s",
content_range['start'],
size
)
return JsonResponse(
{
'ErrMsg': _('File upload corrupted. Please try again'),
'Stage': -1
},
status=409
)
# The last request sometimes comes twice. This happens because
# nginx sends a 499 error code when the response takes too long.
elif size > int(content_range['stop']) and size == int(content_range['end']):
return JsonResponse({'ImportStatus': 1})
with open(temp_filepath, mode) as temp_file:
for chunk in request.FILES['course-data'].chunks():
temp_file.write(chunk)
size = os.path.getsize(temp_filepath)
if int(content_range['stop']) != int(content_range['end']) - 1:
# More chunks coming
return JsonResponse({
"files": [{
"name": filename,
"size": size,
"deleteUrl": "",
"deleteType": "",
"url": reverse_course_url('import_handler', courselike_key),
"thumbnailUrl": ""
}]
})
# Send errors to client with stage at which error occurred.
except Exception as exception: # pylint: disable=broad-except
_save_request_status(request, courselike_string, -1)
if course_dir.isdir():
shutil.rmtree(course_dir)
log.info("Course import %s: Temp data cleared", courselike_key)
log.exception(
"error importing course"
)
return JsonResponse(
{
'ErrMsg': str(exception),
'Stage': -1
},
status=400
)
# try-finally block for proper clean up after receiving last chunk.
try:
# This was the last chunk.
log.info("Course import %s: Upload complete", courselike_key)
_save_request_status(request, courselike_string, 1)
tar_file = tarfile.open(temp_filepath)
try:
safetar_extractall(tar_file, (course_dir + '/').encode('utf-8'))
except SuspiciousOperation as exc:
_save_request_status(request, courselike_string, -1)
return JsonResponse(
{
'ErrMsg': 'Unsafe tar file. Aborting import.',
'SuspiciousFileOperationMsg': exc.args[0],
'Stage': -1
},
status=400
)
finally:
tar_file.close()
log.info("Course import %s: Uploaded file extracted", courselike_key)
_save_request_status(request, courselike_string, 2)
# find the 'course.xml' file
def get_all_files(directory):
"""
For each file in the directory, yield a 2-tuple of (file-name,
directory-path)
"""
for dirpath, _dirnames, filenames in os.walk(directory):
for filename in filenames:
yield (filename, dirpath)
def get_dir_for_fname(directory, filename):
"""
Returns the dirpath for the first file found in the directory
with the given name. If there is no file in the directory with
the specified name, return None.
"""
for fname, dirpath in get_all_files(directory):
if fname == filename:
return dirpath
return None
dirpath = get_dir_for_fname(course_dir, root_name)
if not dirpath:
_save_request_status(request, courselike_string, -2)
return JsonResponse(
{
'ErrMsg': _('Could not find the {0} file in the package.').format(root_name),
'Stage': -2
},
status=415
)
dirpath = os.path.relpath(dirpath, data_root)
logging.debug('found %s at %s', root_name, dirpath)
log.info("Course import %s: Extracted file verified", courselike_key)
_save_request_status(request, courselike_string, 3)
with dog_stats_api.timer(
'courselike_import.time',
tags=[u"courselike:{}".format(courselike_key)]
):
courselike_items = import_func(
modulestore(), request.user.id,
settings.GITHUB_REPO_ROOT, [dirpath],
load_error_modules=False,
static_content_store=contentstore(),
target_id=courselike_key
)
new_location = courselike_items[0].location
logging.debug('new course at %s', new_location)
log.info("Course import %s: Course import successful", courselike_key)
_save_request_status(request, courselike_string, 4)
# Send errors to client with stage at which error occurred.
except Exception as exception: # pylint: disable=broad-except
log.exception(
"error importing course"
)
return JsonResponse(
{
'ErrMsg': str(exception),
'Stage': -session_status[courselike_string]
},
status=400
)
finally:
if course_dir.isdir():
shutil.rmtree(course_dir)
log.info("Course import %s: Temp data cleared", courselike_key)
# set failed stage number with negative sign in case of unsuccessful import
if session_status[courselike_string] != 4:
_save_request_status(request, courselike_string, -abs(session_status[courselike_string]))
# status == 4 represents that course has been imported successfully.
if session_status[courselike_string] == 4 and root_name == COURSE_ROOT:
# Reload the course so we have the latest state
course = modulestore().get_course(courselike_key)
if course.entrance_exam_enabled:
entrance_exam_chapter = modulestore().get_items(
course.id,
qualifiers={'category': 'chapter'},
settings={'is_entrance_exam': True}
)[0]
metadata = {'entrance_exam_id': unicode(entrance_exam_chapter.location)}
CourseMetadata.update_from_dict(metadata, course, request.user)
add_entrance_exam_milestone(course.id, entrance_exam_chapter)
log.info("Course %s Entrance exam imported", course.id)
return JsonResponse({'Status': 'OK'})
elif request.method == 'GET': # assume html
status_url = reverse_course_url(
"import_status_handler", courselike_key, kwargs={'filename': "fillerName"}
)
return render_to_response('import.html', {
context_name: courselike_module,
'successful_import_redirect_url': successful_url,
'import_status_url': status_url,
'library': isinstance(courselike_key, LibraryLocator)
})
else:
return HttpResponseNotFound()
0
Example 69
Project: skarphed Source File: template.py
@classmethod
def install_from_data(cls, data):
"""
Receives .tar.gz'ed data and generates templatedata from it
First validates the data. While validating it tracks all occuring
errors in the errorlog. If one severe error happens during validation,
the method stops before actually doing write-operations and returns
the errorlog to the client
Otherwise, it executes the installation and returns all
non-severe errors (warnings).
"""
def cleanup(path):
shutil.rmtree(path)
#TODO: Mutex this operation
errorlog = []
configuration = Configuration()
webpath = configuration.get_entry("core.webpath")
temp_installpath = webpath+"/tpl_install"
os.mkdir(temp_installpath)
tar = open(temp_installpath+"/tpl.tar.gz","w")
tar.write(data)
tar.close()
tar = tarfile.open(temp_installpath+"/tpl.tar.gz","r:gz")
tar.extractall(temp_installpath)
tar.close()
os.unlink(temp_installpath+"/tpl.tar.gz")
manifest_file = open(temp_installpath+"/manifest.json","r")
try:
manifest = JSONDecoder().decode(manifest_file.read())
except ValueError,e:
errorlog.append({'severity':1,
'type':'PackageFile',
'msg':'JSON seems to be corrupt'})
cleanup(temp_installpath)
return errorlog
manifest_file.close()
#BEGIN TO VALIDATE DATA
try:
f = open(temp_installpath+"/general.css")
general_css = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':1,
'type':'PackageFile',
'msg':'File not in Package general.css'})
css_manager = CSSManager()
general_csspropertyset = None
try:
general_csspropertyset = css_manager.create_csspropertyset_from_css(general_css)
general_csspropertyset.set_type_general()
except Exception, e:
errorlog.append({'severity':1,
'type':'CSS-Data',
'msg':'General CSS File does not Contain Valid CSS '+str(e)})
pagedata = [] # Prepared filedata for execution into Database
for page in manifest['pages']:
if page['filename'].endswith(".html"):
name = page['filename'].replace(".html","",1)
elif page['filename'].endswith(".htm"):
name = page['filename'].replace(".htm","",1)
else:
errorlog.append({'severity':1,
'type':'PageData',
'msg':'Invalid format (allowed is .html and .htm: '+page['filename']})
try:
f = open(temp_installpath+"/"+page['filename'])
html = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':1,
'type':'PageFile',
'msg':'File not in Package '+page['filename']})
continue
try:
f = open(temp_installpath+"/"+name+"_head.html","r")
html_head = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':0,
'type':'PageFile',
'msg':'File not in Package '+name+"_head.html"})
html_head = ""
try:
f = open(temp_installpath+"/static/"+name+".css")
css = f.read()
f.close()
except IOError,e:
errorlog.append({'severity':1,
'type':'PageFile',
'msg':'File not in Package static/'+name+".css"})
continue
try:
f = open(temp_installpath+"/static/"+name+"_minimap.png","rb")
minimap = f.read()
f.close()
os.unlink(temp_installpath+"/static/"+name+"_minimap.png")
except IOError,e:
errorlog.append({'severity':0,
'type':'PageFile',
'msg':'File not in Package static/'+name+"_minimap.png"})
minimap = None
pagedata.append({'name':page['name'],
'desc':page['desc'],
'html_body':html,
'html_head':html_head,
'css':css,
'minimap':minimap,
'internal_name':name})
if len(errorlog) > 0:
is_severe_error = False
for error in errorlog:
if error['severity'] >= 1:
is_severe_error = True
break
if is_severe_error:
cleanup(temp_installpath)
return errorlog
# BEGIN TO WRITE DATA
#release maintenance mode at the end?
release_maintenance_mode = not cls.is_template_installed()
#uninstall old template
if cls.is_template_installed():
old_template = cls.get_current_template()
old_template.uninstall()
new_template = Template()
new_template.set_name(manifest['name'])
new_template.set_description(manifest['description'])
new_template.set_author(manifest['author'])
#create pages
for page in pagedata:
Page.create(page['name'],
page['internal_name'],
page['desc'],
page['html_body'],
page['html_head'],
page['css'],
page['minimap'])
#put binary into database
for bin_filename in os.listdir(temp_installpath+"/static"):
binary=None
try:
bin_file = open(temp_installpath+"/static/"+bin_filename,"rb")
bin_data = bin_file.read()
bin_file.close()
# TODO: Find more generic way to determine mimetype
if bin_filename.endswith(".png"):
binary = Binary.create("image/png", bin_data)
if bin_filename.endswith(".jpeg") or bin_filename.endswith(".jpg"):
binary = Binary.create("image/jpeg", bin_data)
else:
binary = Binary.create("application/octet-stream", bin_data)
if binary is not None:
binary.set_filename(bin_filename)
binary.store()
new_template.add_binary(binary.get_id())
except IOError, e:
errorlog.append({'severity':0,
'type':'PageFile',
'msg':'File seems broken static/'+bin_filename})
#read general.css into CSSPropertysets
general_csspropertyset.store()
new_template.store()
cleanup(temp_installpath)
#create a defaultview if there isnt
View.create_default_view()
if release_maintenance_mode:
Core().deactivate_maintenance_mode()
return errorlog
0
Example 70
Project: pip-update-requirements Source File: install.py
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.download_dir:
warnings.warn(
"pip install --download has been deprecated and will be "
"removed in the future. Pip now has a download command that "
"should be used instead.",
RemovedInPip10Warning,
)
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
require_hashes=options.require_hashes,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
prefix=options.prefix_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
0
Example 71
Project: EyeWitness Source File: EyeWitness.py
def create_cli_parser():
parser = argparse.ArgumentParser(
add_help=False, description="EyeWitness is a tool used to capture\
screenshots from a list of URLs")
parser.add_argument('-h', '-?', '--h', '-help',
'--help', action="store_true", help=argparse.SUPPRESS)
protocols = parser.add_argument_group('Protocols')
protocols.add_argument('--web', default=False, action='store_true',
help='HTTP Screenshot using Selenium')
protocols.add_argument('--headless', default=False, action='store_true',
help='HTTP Screenshot using PhantomJS Headless')
protocols.add_argument('--rdp', default=False, action='store_true',
help='Screenshot RDP Services')
protocols.add_argument('--vnc', default=False, action='store_true',
help='Screenshot Authless VNC services')
protocols.add_argument('--all-protocols', default=False,
action='store_true',
help='Screenshot all supported protocols, \
using Selenium for HTTP')
input_options = parser.add_argument_group('Input Options')
input_options.add_argument('-f', metavar='Filename', default=None,
help='Line seperated file containing URLs to \
capture')
input_options.add_argument('-x', metavar='Filename.xml', default=None,
help='Nmap XML or .Nessus file')
input_options.add_argument('--single', metavar='Single URL', default=None,
help='Single URL/Host to capture')
input_options.add_argument('--no-dns', default=False, action='store_true',
help='Skip DNS resolution when connecting to \
websites')
timing_options = parser.add_argument_group('Timing Options')
timing_options.add_argument('--timeout', metavar='Timeout', default=7, type=int,
help='Maximum number of seconds to wait while\
requesting a web page (Default: 7)')
timing_options.add_argument('--jitter', metavar='# of Seconds', default=0,
type=int, help='Randomize URLs and add a random\
delay between requests')
timing_options.add_argument('--threads', metavar='# of Threads', default=10,
type=int, help='Number of threads to use while using\
file based input')
report_options = parser.add_argument_group('Report Output Options')
report_options.add_argument('-d', metavar='Directory Name',
default=None,
help='Directory name for report output')
report_options.add_argument('--results', metavar='Hosts Per Page',
default=25, type=int, help='Number of Hosts per\
page of the report')
report_options.add_argument('--no-prompt', default=False,
action='store_true',
help='Don\'t prompt to open the report')
http_options = parser.add_argument_group('Web Options')
http_options.add_argument('--user-agent', metavar='User Agent',
default=None, help='User Agent to use for all\
requests')
http_options.add_argument('--cycle', metavar='User Agent Type',
default=None, help='User Agent Type (Browser, \
Mobile, Crawler, Scanner, Misc, All')
http_options.add_argument('--difference', metavar='Difference Threshold',
default=50, type=int, help='Difference threshold\
when determining if user agent requests are\
close \"enough\" (Default: 50)')
http_options.add_argument('--proxy-ip', metavar='127.0.0.1', default=None,
help='IP of web proxy to go through')
http_options.add_argument('--proxy-port', metavar='8080', default=None,
type=int, help='Port of web proxy to go through')
http_options.add_argument('--show-selenium', default=False,
action='store_true', help='Show display for selenium')
http_options.add_argument('--resolve', default=False,
action='store_true', help=("Resolve IP/Hostname"
" for targets"))
http_options.add_argument('--add-http-ports', default=[],
type=lambda s:[int(i) for i in s.split(",")],
help=("Comma-seperated additional port(s) to assume "
"are http (e.g. '8018,8028')"))
http_options.add_argument('--add-https-ports', default=[],
type=lambda s:[int(i) for i in s.split(",")],
help=("Comma-seperated additional port(s) to assume "
"are https (e.g. '8018,8028')"))
http_options.add_argument('--only-ports', default=[],
type=lambda s:[int(i) for i in s.split(",")],
help=("Comma-seperated list of exclusive ports to "
"use (e.g. '80,8080')"))
http_options.add_argument('--prepend-https', default=False, action='store_true',
help='Prepend http:\\\\ and https:\\\\ to URLs without either')
http_options.add_argument('--vhost-name', default=None,metavar='hostname', help='Hostname to use in Host header (headless + single mode only)')
http_options.add_argument(
'--active-scan', default=False, action='store_true',
help='Perform live login attempts to identify credentials or login pages.')
resume_options = parser.add_argument_group('Resume Options')
resume_options.add_argument('--resume', metavar='ew.db',
default=None, help='Path to db file if you want to resume')
args = parser.parse_args()
args.date = time.strftime('%m/%d/%Y')
args.time = time.strftime('%H:%M:%S')
if args.h:
parser.print_help()
sys.exit()
if args.d is not None:
if args.d.startswith('/') or re.match(
'^[A-Za-z]:\\\\', args.d) is not None:
args.d = args.d.rstrip('/')
args.d = args.d.rstrip('\\')
else:
args.d = os.path.join(os.getcwd(), args.d)
if not os.access(os.path.dirname(args.d), os.W_OK):
print '[*] Error: Please provide a valid folder name/path'
parser.print_help()
sys.exit()
else:
if os.path.isdir(args.d):
overwrite_dir = raw_input(('Directory Exists! Do you want to '
'overwrite? [y/n] '))
overwrite_dir = overwrite_dir.lower().strip()
if overwrite_dir == 'n':
print('Quitting...Restart and provide the proper '
'directory to write to!')
sys.exit()
elif overwrite_dir == 'y':
shutil.rmtree(args.d)
pass
else:
print('Quitting since you didn\'t provide '
'a valid response...')
sys.exit()
else:
output_folder = args.date.replace(
'/', '') + '_' + args.time.replace(':', '')
args.d = os.path.join(os.getcwd(), output_folder)
args.log_file_path = os.path.join(args.d, 'logfile.log')
if args.f is None and args.single is None and args.resume is None and args.x is None:
print("[*] Error: You didn't specify a file! I need a file containing "
"URLs!")
parser.print_help()
sys.exit()
if not any((args.resume, args.web, args.vnc, args.rdp, args.all_protocols, args.headless)):
print "[*] Error: You didn't give me an action to perform."
print "[*] Error: Please use --web, --rdp, or --vnc!\n"
parser.print_help()
sys.exit()
if all((args.web, args.headless)):
print "[*] Error: Choose either web or headless"
parser.print_help()
sys.exit()
if args.vhost_name and not all((args.single, args.headless)):
print "[*] Error: vhostname can only be used in headless+single mode"
sys.exit()
if args.proxy_ip is not None and args.proxy_port is None:
print "[*] Error: Please provide a port for the proxy!"
parser.print_help()
sys.exit()
if args.proxy_port is not None and args.proxy_ip is None:
print "[*] Error: Please provide an IP for the proxy!"
parser.print_help()
sys.exit()
if args.resume:
if not os.path.isfile(args.resume):
print(" [*] Error: No valid DB file provided for resume!")
sys.exit()
if args.all_protocols:
args.web = True
args.vnc = True
args.rdp = True
args.ua_init = False
return args
0
Example 72
Project: barrista Source File: train.py
def _model(result_folder,
epoch_size,
model_name=None,
epoch=None,
write_every=10,
optimizer_name='sgd',
lr_param=0.01,
lr_decay_sched=None,
lr_decay_ratio=0.1,
mom_param=0.9,
wd_param=1E-4,
no_solver=False,
allow_overwrite=False):
"""Get a model and optimizer either loaded or created."""
if epoch is not None:
write_every = min(write_every, epoch)
optimizer_name = str(optimizer_name)
out_folder = os.path.join('results', result_folder)
if optimizer_name == 'sgd':
if lr_decay_sched is not None and lr_decay_sched != '':
lr_policy = 'multistep'
# Each value must be multiplied with the epoch size (possibly
# rounded). This is done later once the batch size is known.
lr_decay_sched = [int(val) for val in lr_decay_sched.split(',')]
else:
lr_policy = 'fixed'
optimizer = sv.SGDSolver(base_lr=lr_param,
momentum=mom_param,
weight_decay=wd_param,
lr_policy=lr_policy,
gamma=lr_decay_ratio,
stepvalue=lr_decay_sched,
snapshot_prefix=os.path.join(
str(out_folder), 'model'))
else:
assert lr_decay_sched is not None, (
"LR decay schedule only supported for SGD!")
optimizer = sv.AdamSolver(base_lr=lr_param, # pylint: disable=redefined-variable-type
weight_decay=wd_param,
snapshot_prefix=os.path.join(
str(out_folder), 'model'))
if os.path.exists(os.path.join('results', result_folder)) and (
not allow_overwrite or (allow_overwrite and model_name is None)):
assert model_name is None, (
"This result path already exists! "
"If you still want to use it, add the flag `--allow_overwrite`.")
logging.basicConfig(
level=logging.INFO,
format=LOGFORMAT,
filename=os.path.join('results', result_folder, 'train.log'),
filemode='a')
_LOGGER.info("Provided arguments: %s.", str(sys.argv))
# Load the data from there.
modelmod = imp.load_source(
'_modelmod',
os.path.join('results', result_folder, 'model.py'))
model = modelmod.MODEL
batch_size = model.blobs['data'].shape[0]
checkpoint_step = round_to_mbsize(epoch_size * write_every, batch_size) / batch_size
if epoch is None:
# Use the last one.
modelfiles = glob.glob(os.path.join('results',
result_folder,
'model_iter_*.caffemodel'))
if len(modelfiles) == 0:
raise Exception("No model found to resume from!")
lastm = natsorted(modelfiles)[-1]
batch_iters = int(os.path.basename(lastm).split('.')[0][11:])
base_iter = batch_iters * batch_size
cmfilename = lastm
ssfilename = cmfilename[:-10] + 'solverstate'
else:
assert epoch % write_every == 0, (
"Writing every %d epochs. Please use a multiple of it!")
cmfilename = os.path.join('results',
result_folder,
'model_iter_%d.caffemodel' % (
epoch / write_every * checkpoint_step))
ssfilename = os.path.join('results',
result_folder,
'model_iter_%d.solverstate' % (
epoch / write_every * checkpoint_step))
base_iter = epoch * epoch_size
assert os.path.exists(cmfilename), (
"Could not find model parameter file at %s!" % (cmfilename))
assert os.path.exists(ssfilename), (
"Could not find solverstate file at %s!" % (ssfilename))
_LOGGER.info("Loading model from %s...", cmfilename)
model.load_blobs_from(str(cmfilename))
if not no_solver:
_LOGGER.info("Loading solverstate from %s...", ssfilename)
if lr_decay_sched is not None:
# pylint: disable=protected-access
optimizer._parameter_dict['stepvalue'] = [
round_to_mbsize(val * epoch_size, batch_size)
for val in lr_decay_sched]
optimizer.restore(str(ssfilename), model)
else:
# Create the result folder.
assert model_name is not None, (
"If a new result_folder is specified, a model name must be given!")
out_folder = os.path.join(RESULT_FOLDER, result_folder)
if os.path.exists(out_folder):
# Reset, because an overwrite was requested.
shutil.rmtree(out_folder)
os.mkdir(out_folder)
os.mkdir(os.path.join(out_folder, 'visualizations'))
logging.basicConfig(
level=logging.INFO,
format=LOGFORMAT,
filename=os.path.join(out_folder, 'train.log'),
filemode='w')
_LOGGER.info("Provided arguments: %s.", str(sys.argv))
_LOGGER.info("Result folder created: %s.", out_folder)
_LOGGER.info("Freezing experimental setup...")
# Copy the contents over.
shutil.copy2(os.path.join('models', model_name + '.py'),
os.path.join(out_folder, 'model.py'))
for pyfile in glob.glob(os.path.join(os.path.dirname(__file__),
'*.py')):
shutil.copy2(pyfile,
os.path.join(out_folder, os.path.basename(pyfile)))
_LOGGER.info("Creating model...")
# Get the model.
modelmod = imp.load_source('_modelmod',
os.path.join(out_folder, 'model.py'))
model = modelmod.MODEL
if not no_solver and lr_decay_sched is not None:
batch_size = model.blobs['data'].shape[0]
# pylint: disable=protected-access
optimizer._parameter_dict['stepvalue'] = [
round_to_mbsize(val * epoch_size, batch_size)
for val in lr_decay_sched]
base_iter = 0
if no_solver:
return model, None, out_folder, base_iter
else:
return model, optimizer, out_folder, base_iter
0
Example 73
Project: ypkg Source File: main.py
def build_package(filename, outputDir):
""" Will in future be moved to a separate part of the module """
spec = YpkgSpec()
if not spec.load_from_path(filename):
print("Unable to continue - aborting")
sys.exit(1)
possibles = ["{}/.solus/packager", "{}/.evolveos/packager"]
packager_name = ypkg2.packager_name
packager_email = ypkg2.packager_email
dflt = True
for item in possibles:
fpath = item.format(os.path.expanduser("~"))
if not os.path.exists(fpath):
continue
try:
c = ConfigObj(fpath)
pname = c["Packager"]["Name"]
pemail = c["Packager"]["Email"]
packager_name = pname
packager_email = pemail
dflt = False
break
except Exception as e:
console_ui.emit_error("Config", "Error in packager config:")
print(e)
dflt = True
break
if dflt:
packager_name = ypkg2.packager_name
packager_email = ypkg2.packager_email
console_ui.emit_warning("Config", "Using default packager values")
print(" Name: {}".format(packager_name))
print(" Email: {}".format(packager_email))
spec.packager_name = packager_name
spec.packager_email = packager_email
# Try to load history
dirn = os.path.dirname(filename)
hist = os.path.join(dirn, "history.xml")
if os.path.exists(hist):
if not spec.load_history(hist):
sys.exit(1)
metadata.initialize_timestamp(spec)
manager = SourceManager()
if not manager.identify_sources(spec):
print("Unable to continue - aborting")
sys.exit(1)
# Dummy content
console_ui.emit_info("Info", "Building {}-{}".
format(spec.pkg_name, spec.pkg_version))
ctx = YpkgContext(spec)
need_verify = []
for src in manager.sources:
if src.cached(ctx):
need_verify.append(src)
continue
if not src.fetch(ctx):
console_ui.emit_error("Source", "Cannot continue without sources")
sys.exit(1)
need_verify.append(src)
for verify in need_verify:
if not verify.verify(ctx):
console_ui.emit_error("Source", "Cannot verify sources")
sys.exit(1)
steps = {
'setup': spec.step_setup,
'build': spec.step_build,
'install': spec.step_install,
'check': spec.step_check,
'profile': spec.step_profile,
}
r_runs = list()
# Before we get started, ensure PGOs are cleaned
if not ctx.clean_pgo():
console_ui.emit_error("Build", "Failed to clean PGO directories")
sys.exit(1)
if not ctx.clean_install():
console_ui.emit_error("Build", "Failed to clean install directory")
sys.exit(1)
if not ctx.clean_pkg():
console_ui.emit_error("Build", "Failed to clean pkg directory")
possible_sets = []
# Emul32 is *always* first
# AVX2 emul32 comes first too so "normal" emul32 can override it
if spec.pkg_emul32:
if spec.pkg_avx2:
# Emul32, avx2 build
possible_sets.append((True, True))
# Normal, no-avx2, emul32 build
possible_sets.append((True, False))
# Build AVX2 before native, but after emul32
if spec.pkg_avx2:
possible_sets.append((False, True))
# Main step, always last
possible_sets.append((False, False))
for emul32, avx2 in possible_sets:
r_steps = list()
c = YpkgContext(spec, emul32=emul32, avx2=avx2)
if spec.step_profile is not None:
c = YpkgContext(spec, emul32=emul32, avx2=avx2)
c.enable_pgo_generate()
r_steps.append(['setup', c])
r_steps.append(['build', c])
r_steps.append(['profile', c])
c = YpkgContext(spec, emul32=emul32, avx2=avx2)
c.enable_pgo_use()
r_steps.append(['setup', c])
r_steps.append(['build', c])
r_steps.append(['install', c])
r_steps.append(['check', c])
else:
c = YpkgContext(spec, emul32=emul32, avx2=avx2)
r_steps.append(['setup', c])
r_steps.append(['build', c])
r_steps.append(['install', c])
r_steps.append(['check', c])
r_runs.append((emul32, avx2, r_steps))
for emul32, avx2, run in r_runs:
if emul32:
console_ui.emit_info("Build", "Building for emul32")
else:
console_ui.emit_info("Build", "Building native package")
if avx2:
console_ui.emit_info("Build", "Building for AVX2 optimisations")
for step, context in run:
# When doing setup, always do pre-work by blasting away any
# existing build directories for the current context and then
# re-extracting sources
if step == "setup":
if not clean_build_dirs(context):
sys.exit(1)
# Only ever extract the primary source ourselves
if spec.pkg_extract:
src = manager.sources[0]
console_ui.emit_info("Source",
"Extracting source")
if not src.extract(context):
console_ui.emit_error("Source",
"Cannot extract sources")
sys.exit(1)
work_dir = manager.get_working_dir(context)
if not os.path.exists(work_dir):
try:
os.makedirs(work_dir, mode=00755)
except Exception as e:
console_ui.emit_error("Source", "Error creating directory")
print(e)
sys.exit(1)
r_step = steps[step]
if not r_step:
continue
console_ui.emit_info("Build", "Running step: {}".format(step))
if execute_step(context, r_step, step, work_dir):
console_ui.emit_success("Build", "{} successful".
format(step))
continue
console_ui.emit_error("Build", "{} failed".format(step))
sys.exit(1)
# Add user patterns - each consecutive package has higher priority than the
# package before it, ensuring correct levels of control
gene = PackageGenerator(spec)
count = 0
for pkg in spec.patterns:
for pt in spec.patterns[pkg]:
gene.add_pattern(pt, pkg, priority=PRIORITY_USER + count)
count += 1
idir = ctx.get_install_dir()
bad_dir = os.path.join(idir, "emul32")
if os.path.exists(bad_dir):
shutil.rmtree(bad_dir)
for root, dirs, files in os.walk(idir):
for f in files:
fpath = os.path.join(root, f)
localpath = remove_prefix(fpath, idir)
gene.add_file(localpath)
if len(dirs) == 0 and len(files) == 0:
console_ui.emit_warning("Package", "Including empty directory: {}".
format(remove_prefix(root, idir)))
gene.add_file(remove_prefix(root, idir))
# Handle symlinks to directories.
for d in dirs:
fpath = os.path.join(root, d)
if os.path.islink(fpath):
gene.add_file(remove_prefix(fpath, idir))
if not os.path.exists(ctx.get_packaging_dir()):
try:
os.makedirs(ctx.get_packaging_dir(), mode=00755)
except Exception as e:
console_ui.emit_error("Package", "Failed to create pkg dir")
print(e)
sys.exit(1)
exa = PackageExaminer()
exaResults = exa.examine_packages(ctx, gene.packages.values())
if exaResults is None:
console_ui.emit_error("Package", "Failed to correctly examine all "
"packages.")
sys.exit(1)
deps = DependencyResolver()
if not deps.compute_for_packages(ctx, gene, exaResults):
console_ui.emit_error("Dependencies", "Failed to compute all"
" dependencies")
sys.exit(1)
dbgs = ["/usr/lib64/debug", "/usr/lib/debug", "/usr/lib32/debug"]
if ctx.can_dbginfo:
for dbg in dbgs:
fpath = os.path.join(ctx.get_install_dir(), dbg[1:])
if not os.path.exists(fpath):
continue
for root, dirs, files in os.walk(fpath):
# Empty directories in dbginfo we don't care about.
for f in files:
fpath = os.path.join(root, f)
localpath = remove_prefix(fpath, idir)
gene.add_file(localpath)
if len(gene.packages) == 0:
console_ui.emit_error("Package", "No resulting packages found")
wk = "https://wiki.solus-project.com/Packaging"
print("Ensure your files end up in $installdir. Did you mean to "
"use %make_install?\n\nPlease see the wiki: {}".format(wk))
sys.exit(1)
gene.emit_packages()
# TODO: Ensure main is always first
for package in sorted(gene.packages):
pkg = gene.packages[package]
files = sorted(pkg.emit_files())
if len(files) == 0:
console_ui.emit_info("Package", "Skipping empty package: {}".
format(package))
continue
metadata.create_eopkg(ctx, gene, pkg, outputDir)
# Write out the final pspec
metadata.write_spec(ctx, gene, outputDir)
for pkg in spec.patterns:
if pkg in gene.packages:
continue
nm = spec.get_package_name(pkg)
console_ui.emit_warning("Package:{}".format(pkg),
"Did not produce {} by any pattern".format(nm))
# TODO: Consider warning about unused patterns
ctx.clean_pkg()
console_ui.emit_success("Package", "Building complete")
sys.exit(0)
0
Example 74
Project: autospec Source File: tarball.py
def download_tarball(url_argument, name_argument, archives, target_dir):
global name
global rawname
global version
global url
global path
global tarball_prefix
global gcov_file
# go naming
global golibpath
global go_pkgname
url = url_argument
tarfile = os.path.basename(url)
pattern_options = [
r"(.*?)[\-_](v*[0-9]+[alpha\+_spbfourcesigedsvstableP0-9\.\-\~]*)\.src\.(tgz|tar|zip)",
r"(.*?)[\-_](v*[0-9]+[alpha\+_sbpfourcesigedsvstableP0-9\.\-\~]*)\.(tgz|tar|zip)",
r"(.*?)[\-_](v*[0-9]+[a-zalpha\+_spbfourcesigedsvstableP0-9\.\-\~]*)\.orig\.tar",
r"(.*?)[\-_](v*[0-9]+[\+_spbfourcesigedsvstableP0-9\.\~]*)(-.*?)?\.tar",
]
for pattern in pattern_options:
p = re.compile(pattern)
m = p.search(tarfile)
if m:
name = m.group(1).strip()
version = m.group(2).strip()
b = version.find("-")
if b >= 0:
version = version[:b]
break
rawname = name
# R package
if url_argument.find("cran.r-project.org") > 0 or url_argument.find("cran.rstudio.com") > 0:
buildpattern.set_build_pattern("R", 10)
files.want_dev_split = 0
buildreq.add_buildreq("clr-R-helpers")
p = re.compile(r"([A-Za-z0-9]+)_(v*[0-9]+[\+_spbfourcesigedsvstableP0-9\.\~\-]*)\.tar\.gz")
m = p.search(tarfile)
if m:
name = "R-" + m.group(1).strip()
rawname = m.group(1).strip()
version = m.group(2).strip()
b = version.find("-")
if b >= 0:
version = version[:b]
if url_argument.find("pypi.python.org") > 0:
buildpattern.set_build_pattern("distutils", 10)
url_argument = "http://pypi.debian.net/" + name + "/" + tarfile
if url_argument.find("pypi.debian.net") > 0:
buildpattern.set_build_pattern("distutils", 10)
if url_argument.find(".cpan.org/CPAN/") > 0:
buildpattern.set_build_pattern("cpan", 10)
if name:
name = "perl-" + name
if url_argument.find(".metacpan.org/") > 0:
buildpattern.set_build_pattern("cpan", 10)
if name:
name = "perl-" + name
if "github.com" in url_argument:
# golibpath = golang_libpath(url_argument)
# go_pkgname = golang_name(url_argument)
# define regex accepted for valid packages
github_patterns = [r"https://github.com/.*/(.*?)/archive/(.*)-final.tar",
r"https://github.com/.*/.*/archive/[0-9a-fA-F]{1,40}\/(.*)\-(.*).tar",
r"https://github.com/.*/(.*?)/archive/(.*).zip",
r"https://github.com/.*/(.*?)/archive/v?(.*).tar"]
for pattern in github_patterns:
p = re.compile(pattern)
m = p.search(url_argument)
if m:
name = m.group(1).strip()
version = m.group(2).strip()
b = version.find("-")
if b > 0:
version = version[:b]
break
if url_argument.find("bitbucket.org") > 0:
p = re.compile(r"https://bitbucket.org/.*/(.*?)/get/[a-zA-Z_-]*([0-9][0-9_.]*).tar")
m = p.search(url_argument)
if m:
name = m.group(1).strip()
version = m.group(2).strip().replace('_', '.')
else:
version = "1"
# ruby
if url_argument.find("rubygems.org/") > 0:
buildpattern.set_build_pattern("ruby", 10)
p = re.compile(r"(.*?)[\-_](v*[0-9]+[alpha\+_spbfourcesigedsvstableP0-9\.\-\~]*)\.gem")
m = p.search(tarfile)
if m:
buildreq.add_buildreq("ruby")
buildreq.add_buildreq("rubygem-rdoc")
name = "rubygem-" + m.group(1).strip()
rawname = m.group(1).strip()
version = m.group(2).strip()
b = version.find("-")
if b >= 0:
version = version[:b]
# override from commandline
if name_argument and name_argument[0] != name:
pattern = name_argument[0] + r"[\-]*(.*)\.(tgz|tar|zip)"
p = re.compile(pattern)
m = p.search(tarfile)
if m:
name = name_argument[0]
rawname = name
version = m.group(1).strip()
b = version.find("-")
if b >= 0 and version.find("-beta") < 0:
version = version[:b]
if version.startswith('.'):
version = version[1:]
else:
name = name_argument[0]
if not name:
split = url_argument.split('/')
if len(split) > 3 and split[-2] in ('archive', 'tarball'):
name = split[-3]
version = split[-1]
if version.startswith('v'):
version = version[1:]
# remove extension
version = '.'.join(version.split('.')[:-1])
if version.endswith('.tar'):
version = '.'.join(version.split('.')[:-1])
b = version.find("-")
if b >= 0 and version.find("-beta") < 0:
b = b + 1
version = version[b:]
if len(version) > 0 and version[0] in ['v', 'r']:
version = version[1:]
assert name != ""
if not target_dir:
build.download_path = os.getcwd() + "/" + name
else:
build.download_path = target_dir
call("mkdir -p %s" % build.download_path)
gcov_path = build.download_path + "/" + name + ".gcov"
if os.path.isfile(gcov_path):
gcov_file = name + ".gcov"
tarball_path = check_or_get_file(url, tarfile)
sha1 = get_sha1sum(tarball_path)
with open(build.download_path + "/upstream", "w") as file:
file.write(sha1 + "/" + tarfile + "\n")
tarball_prefix = name + "-" + version
if tarfile.lower().endswith('.zip'):
tarball_contents = subprocess.check_output(
["unzip", "-l", tarball_path], universal_newlines=True)
if tarball_contents and len(tarball_contents.splitlines()) > 3:
tarball_prefix = tarball_contents.splitlines()[3].rsplit("/")[0].split()[-1]
extract_cmd = "unzip -d {0} {1}".format(build.base_path, tarball_path)
elif tarfile.lower().endswith('.gem'):
tarball_contents = subprocess.check_output(
["gem", "unpack", "--verbose", tarball_path], universal_newlines=True)
extract_cmd = "gem unpack --target={0} {1}".format(build.base_path, tarball_path)
if tarball_contents:
tarball_prefix = tarball_contents.splitlines()[-1].rsplit("/")[-1]
if tarball_prefix.endswith("'"):
tarball_prefix = tarball_prefix[:-1]
else:
extract_cmd, tarball_prefix = build_untar(tarball_path)
if version == "":
version = "1"
print("\n")
print("Processing", url_argument)
print(
"=============================================================================================")
print("Name :", name)
print("Version :", version)
print("Prefix :", tarball_prefix)
with open(build.download_path + "/Makefile", "w") as file:
file.write("PKG_NAME := " + name + "\n")
file.write("URL := " + url_argument + "\n")
file.write("ARCHIVES :=")
for archive in archives:
file.write(" {}".format(archive))
file.write("\n")
file.write("\n")
file.write("include ../common/Makefile.common\n")
shutil.rmtree("{}".format(build.base_path), ignore_errors=True)
os.makedirs("{}".format(build.output_path))
call("mkdir -p %s" % build.download_path)
call(extract_cmd)
path = build.base_path + tarball_prefix
for archive, destination in zip(archives[::2], archives[1::2]):
source_tarball_path = check_or_get_file(archive, os.path.basename(archive))
if source_tarball_path.lower().endswith('.zip'):
tarball_contents = subprocess.check_output(
["unzip", "-l", source_tarball_path], universal_newlines=True)
if tarball_contents and len(tarball_contents.splitlines()) > 3:
source_tarball_prefix = tarball_contents.splitlines()[3].rsplit("/")[0].split()[-1]
extract_cmd = "unzip -d {0} {1}".format(build.base_path, source_tarball_path)
else:
extract_cmd, source_tarball_prefix = build_untar(source_tarball_path)
buildpattern.archive_details[archive + "prefix"] = source_tarball_prefix
call(extract_cmd)
tar_files = glob.glob("{0}{1}/*".format(build.base_path, source_tarball_prefix))
move_cmd = "mv "
for tar_file in tar_files:
move_cmd += tar_file + " "
move_cmd += '{0}/{1}'.format(path, destination)
mkdir_cmd = "mkdir -p "
mkdir_cmd += '{0}/{1}'.format(path, destination)
print("mkdir " + mkdir_cmd)
call(mkdir_cmd)
call(move_cmd)
sha1 = get_sha1sum(source_tarball_path)
with open(build.download_path + "/upstream", "a") as file:
file.write(sha1 + "/" + os.path.basename(archive) + "\n")
0
Example 75
Project: pelisalacarta Source File: mct.py
def play(url, xlistitem, is_view=None, subtitle=""):
# -- Necesario para algunas webs ----------------------------
if not url.endswith(".torrent") and not url.startswith("magnet"):
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
# -- Crear dos carpetas en descargas para los archivos ------
save_path_videos = os.path.join( config.get_setting("downloadpath") , "torrent-videos" )
save_path_torrents = os.path.join( config.get_setting("downloadpath") , "torrent-torrents" )
if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents)
# -- Usar - archivo torrent desde web, meagnet o HD ---------
if not os.path.isfile(url) and not url.startswith("magnet"):
# -- http - crear archivo torrent -----------------------
data = url_get(url)
# -- El nombre del torrent será el que contiene en los --
# -- datos. -
re_name = urllib.unquote( scrapertools.get_match(data,':name\d+:(.*?)\d+:') )
torrent_file = filetools.join(save_path_torrents, filetools.encode(re_name + '.torrent'))
f = open(torrent_file,'wb')
f.write(data)
f.close()
elif os.path.isfile(url):
# -- file - para usar torrens desde el HD ---------------
torrent_file = url
else:
# -- magnet ---------------------------------------------
torrent_file = url
# -----------------------------------------------------------
# -- MCT - MiniClienteTorrent -------------------------------
ses = lt.session()
print "### Init session ########"
print lt.version
print "#########################"
ses.add_dht_router("router.bittorrent.com",6881)
ses.add_dht_router("router.utorrent.com",6881)
ses.add_dht_router("router.bitcomet.com",554)
ses.add_dht_router("dht.transmissionbt.com",6881)
trackers = [
"http://exodus.desync.com:6969/announce",
"udp://tracker.publicbt.com:80/announce",
"udp://tracker.openbittorrent.com:80/announce",
"http://tracker.torrentbay.to:6969/announce",
"http://fr33dom.h33t.com:3310/announce",
"http://tracker.pow7.com/announce",
"udp://tracker.ccc.de:80/announce",
"http://tracker.bittorrent.am:80/announce",
"http://denis.stalker.h3q.com:6969/announce",
"udp://tracker.prq.to:80/announce",
"udp://tracker.istole.it:80/announce",
"udp://open.demonii.com:1337",
"http://9.rarbg.com:2710/announce",
"http://announce.torrentsmd.com:6969/announce",
"http://bt.careland.com.cn:6969/announce",
"http://explodie.org:6969/announce",
"http://mgtracker.org:2710/announce",
"http://tracker.best-torrents.net:6969/announce",
"http://tracker.tfile.me/announce",
"http://tracker.torrenty.org:6969/announce",
"http://tracker1.wasabii.com.tw:6969/announce",
"udp://9.rarbg.com:2710/announce",
"udp://9.rarbg.me:2710/announce",
"udp://coppersurfer.tk:6969/announce",
"udp://tracker.btzoo.eu:80/announce",
"http://www.spanishtracker.com:2710/announce",
"http://www.todotorrents.com:2710/announce",
]
video_file = ""
# -- magnet2torrent -----------------------------------------
if torrent_file.startswith("magnet"):
try:
tempdir = tempfile.mkdtemp()
except IOError:
tempdir = os.path.join(save_path_torrents , "temp")
if not os.path.exists(tempdir): os.mkdir(tempdir)
params = {
'save_path': tempdir,
'trackers':trackers,
'storage_mode': lt.storage_mode_t.storage_mode_allocate,
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
h = lt.add_magnet_uri(ses, torrent_file, params)
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
while not h.has_metadata():
message, porcent, msg_file, s, download = getProgress(h, "Creando torrent desde magnet")
dp.update(porcent, message, msg_file)
if s.state == 1: download = 1
if dp.iscanceled():
dp.close()
remove_files( download, torrent_file, video_file, ses, h )
return
dp.close()
info = h.get_torrent_info()
data = lt.bencode( lt.create_torrent(info).generate() )
#torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
torrent_file = os.path.join(save_path_torrents, unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
f = open(torrent_file,'wb')
f.write(data)
f.close()
ses.remove_torrent(h)
shutil.rmtree(tempdir)
# -----------------------------------------------------------
# -- Archivos torrent ---------------------------------------
e = lt.bdecode(open(torrent_file, 'rb').read())
info = lt.torrent_info(e)
# -- El más gordo o uno de los más gordo se entiende que es -
# -- el vídeo o es el vídeo que se usará como referencia -
# -- para el tipo de archivo -
print "##### Archivos ## %s ##" % len(info.files())
_index_file, _video_file, _size_file = get_video_file(info)
_video_file_ext = os.path.splitext( _video_file )[1]
if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
print "##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } )
else:
print "##### storage_mode: none ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } )
# -----------------------------------------------------------
# -- Descarga secuencial - trozo 1, trozo 2, ... ------------
h.set_sequential_download(True)
h.force_reannounce()
h.force_dht_announce()
# -- Prioritarizar/Seleccionar archivo-----------------------
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1:
_index = _index_file
video_file = _video_file
video_size = _size_file
# -- Inicio de variables para 'pause' automático cuando el -
# -- el vídeo se acerca a una pieza sin completar -
is_greater_num_pieces = False
is_greater_num_pieces_plus = False
is_greater_num_pieces_pause = False
#porcent4first_pieces = int( video_size / 1073741824 )
porcent4first_pieces = int( video_size * 0.000000005 )
if porcent4first_pieces < 10: porcent4first_pieces = 10
if porcent4first_pieces > 100: porcent4first_pieces = 100
#num_pieces_to_resume = int( video_size / 1610612736 )
num_pieces_to_resume = int( video_size * 0.0000000025 )
if num_pieces_to_resume < 5: num_pieces_to_resume = 5
if num_pieces_to_resume > 25: num_pieces_to_resume = 25
print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume
# -- Prioritarizar o seleccionar las piezas del archivo que -
# -- se desea reproducir con 'file_priorities' -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
# -- Crear diálogo de progreso para el primer bucle ---------
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
_pieces_info = {}
# -- Doble bucle anidado ------------------------------------
# -- Descarga - Primer bucle -
while not h.is_seed():
s = h.status()
xbmc.sleep(100)
# -- Recuperar los datos del progreso -------------------
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
# -- Si hace 'checking' existe descarga -----------------
# -- 'download' Se usará para saber si hay datos -
# -- descargados para el diálogo de 'remove_files' -
if s.state == 1: download = 1
# -- Player - play --------------------------------------
# -- Comprobar si se han completado las piezas para el -
# -- inicio del vídeo ............... -
first_pieces = True
_p = ""
_c = 0
for i in range( piece_set[0], piece_set[porcent4first_pieces] ):
_p+= "[%s:%s]" % ( i, h.have_piece(i) )
first_pieces&= h.have_piece(i)
if h.have_piece(i): _c+= 1
_pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c,porcent4first_pieces), 'have': h.status().num_pieces, 'len': len(piece_set)}
_p = "##### first_pieces [%s/%s][%s]: " % ( _c, porcent4first_pieces, len(piece_set) ) + _p
print _p
# -- -------------------------------------------------- -
if is_view != "Ok" and first_pieces:
print "##### porcent [%.2f%%]" % (s.progress * 100)
is_view = "Ok"
dp.close()
# -- Player - Ver el vídeo --------------------------
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
#ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
ren_video_file = os.path.join( save_path_videos, video_file )
playlist.add( ren_video_file, xlistitem )
#playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
#playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
player = play_video( xbmc.PLAYER_CORE_AUTO )
player.play(playlist)
'''
# -- Player - Ver el vídeo --------------------------
player = play_video()
#player.play( os.path.join( save_path_videos, video_file ) )
player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
'''
#player.play( os.path.join( save_path_videos, video_file ) )
# -- Contador de cancelaciones para la ventana de -
# -- 'pause' automático -
is_greater_num_pieces_canceled = 0
continuous_pieces = 0
porcent_time = 0.00
current_piece = 0
# -- Impedir que kodi haga 'resume' a un archivo ----
# -- que se reprodució con anterioridad y que se -
# -- eliminó para impedir que intente la reprucción -
# -- en una pieza que aún no se ha completado y se -
# -- active 'pause' automático -
not_resume = True
# -- Bandera subTítulos
_sub = False
# -- Segundo bucle - Player - Control de eventos ----
while player.isPlaying():
xbmc.sleep(100)
# -- Añadir subTítulos
if subtitle!="" and not _sub:
_sub = True
player.setSubtitles(subtitle)
# -- Impedir que kodi haga 'resume' al inicio ---
# -- de la descarga de un archivo conocido -
if not_resume:
player.seekTime(0)
not_resume = False
#xbmc.sleep(1000)
# -- Control 'pause' automático -
continuous_pieces = count_completed_continuous_pieces(h, piece_set)
if xbmc.Player().isPlaying():
# -- Porcentage del progreso del vídeo ------
porcent_time = player.getTime() / player.getTotalTime() * 100
# -- Pieza que se está reproduciendo --------
current_piece = int( porcent_time / 100 * len(piece_set) )
# -- Banderas de control --------------------
is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume)
is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces)
is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set))
# -- Activa 'pause' automático --------------
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
is_greater_num_pieces_pause = True
player.pause()
# -- Log ------------------------------------
_TotalTime = player.getTotalTime()
_Time = player.getTime()
_print_log = "\n##### Player ##################################"
_print_log+= "\nTamaño del vídeo: %s" % video_size
_print_log+= "\nTotal piezas: %s" % len(piece_set)
_print_log+= "\nPiezas contiguas: %s" % continuous_pieces
_print_log+= "\n-----------------------------------------------"
_print_log+= "\nVídeo-Total segundos: %s" % _TotalTime
_print_log+= "\nVídeo-Progreso segundos: %s" % _Time
_print_log+= "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
_print_log+= "\n-----------------------------------------------"
_print_log+= "\ncurrent_piece: %s" % current_piece
_print_log+= "\nis_greater_num_pieces: %s" % is_greater_num_pieces
_print_log+= "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
_print_log+= "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
_print_log+= "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
_print_log+= "\nPieza que se está visionando: %.2f" % ( porcent_time / 100 * len(piece_set) )
_print_log+= "\nOffset que se está visionando: %.2f" % ( porcent_time / 100 * video_size )
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
_print_log+= "\n+++++++++++++++++++++++++++++++++++++++++++++++"
_print_log+= "\nPausa con:"
_print_log+= "\n current_piece = %s" % current_piece
_print_log+= "\n continuous_pieces = %s" % continuous_pieces
_print_log+= "\n###############################################"
print _print_log
# -------------------------------------------
_pieces_info = {'current': current_piece, 'continuous': continuous_pieces, 'have': h.status().num_pieces, 'len': len(piece_set)}
# -- Cerrar el diálogo de progreso --------------
if player.resumed:
dp.close()
# -- Mostrar el diálogo de progreso -------------
if player.paused:
# -- Crear diálogo si no existe -------------
if not player.statusDialogoProgress:
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
player.setDialogoProgress()
# -- Diálogos de estado en el visionado -----
if not h.is_seed():
# -- Recuperar los datos del progreso ---
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
dp.update(porcent, message, msg_file)
else:
dp.update(100, "Descarga completa: " + video_file)
# -- Se canceló el progreso en el visionado -
# -- Continuar -
if dp.iscanceled():
dp.close()
player.pause()
# -- Se canceló el progreso en el visionado -
# -- en la ventana de 'pause' automático. -
# -- Parar si el contador llega a 3 -
if dp.iscanceled() and is_greater_num_pieces_pause:
is_greater_num_pieces_canceled+= 1
if is_greater_num_pieces_canceled == 3:
player.stop()
# -- Desactiva 'pause' automático y ---------
# -- reinicia el contador de cancelaciones -
if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
dp.close()
player.pause()
is_greater_num_pieces_pause = False
is_greater_num_pieces_canceled = 0
# -- El usuario cancelo el visionado --------
# -- Terminar -
if player.ended:
# -- Diálogo eliminar archivos ----------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Kodi - Se cerró el visionado -----------------------
# -- Continuar | Terminar -
if is_view == "Ok" and not xbmc.Player().isPlaying():
if info.num_files() == 1:
# -- Diálogo continuar o terminar ---------------
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'XBMC-Kodi Cerró el vídeo.', '¿Continuar con la sesión?')
else: ok = False
# -- SI ---------------------------------------------
if ok:
# -- Continuar: ---------------------------------
is_view=None
else:
# -- Terminar: ----------------------------------
# -- Comprobar si el vídeo pertenece a una ------
# -- lista de archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos --------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Mostar progeso antes del visionado -----------------
if is_view != "Ok" :
dp.update(porcent, message, msg_file)
# -- Se canceló el progreso antes del visionado ---------
# -- Terminar -
if dp.iscanceled():
dp.close()
# -- Comprobar si el vídeo pertenece a una lista de -
# -- archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos ------------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -----
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Kodi - Error? - No debería llegar aquí -----------------
if is_view == "Ok" and not xbmc.Player().isPlaying():
dp.close()
# -- Diálogo eliminar archivos --------------------------
remove_files( download, torrent_file, video_file, ses, h )
return
0
Example 76
Project: ck-wa Source File: module.py
def run(i):
"""
Input: {
(data_uoa) - workload to run (see "ck list wa").
(target) - machine UOA (see "ck list machine")
(record) - if 'yes', record result in repository in 'experiment' standard
(skip-record-raw) - if 'yes', skip record raw results
(overwrite) - if 'yes', do not record date and time in result directory, but overwrite wa-results
(repetitions) - statistical repetitions (default=1), for now statistical analysis is not used (TBD)
(config) - customize config
(params) - workload params
(scenario) - use pre-defined scenario (see ck list wa-scenario)
(keep) - if 'yes', keep tmp file in workload (program) directory
(cache) - if 'yes', cache params (to automate runs)
(cache_repo_uoa) - repo UOA where to cache params
(share) - if 'yes', share benchmarking results with public cknowledge.org/repo server
(our crowd-benchmarking demo)
(exchange_repo) - which repo to record/update info (remote-ck by default)
(exchange_subrepo) - if remote, remote repo UOA
(scenario_module_uoa) - UOA of the scenario (to share results)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
import copy
import time
import shutil
o=i.get('out','')
oo=''
if o=='con': oo=o
cur_dir=os.getcwd()
# Check if any input has . and convert to dict
for k in list(i.keys()):
if k.find('.')>0:
v=i[k]
kk='##'+k.replace('.','#')
del(i[k])
r=ck.set_by_flat_key({'dict':i, 'key':kk, 'value':v})
if r['return']>0: return r
# Check if share
share=i.get('share','')
user=i.get('user','')
smuoa=i.get('scenario_module_uoa','')
if smuoa=='': smuoa=cfg['module_deps']['experiment.bench.workload.android']
er=i.get('exchange_repo','')
if er=='': er=ck.cfg['default_exchange_repo_uoa']
esr=i.get('exchange_subrepo','')
if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa']
# Get device and workload params
config=i.get('config',{})
params=i.get('params',{})
# Check scenarios
scenario=i.get('scenario','')
if scenario=='': scenario='-'
if scenario!='' and scenario!='-':
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['wa-scenario'],
'data_uoa':scenario})
if r['return']>0: return r
d=r['dict']
r=ck.merge_dicts({'dict1':config, 'dict2':d.get('config',{})})
if r['return']>0: return r
r=ck.merge_dicts({'dict1':params, 'dict2':d.get('params',{})})
if r['return']>0: return r
# Check workload(s)
duoa=i.get('data_uoa','')
if duoa!='':
duoa='wa-'+duoa
r=ck.access({'action':'search',
'module_uoa':cfg['module_deps']['program'],
'add_meta':'yes',
'data_uoa':duoa,
'tags':'wa'})
if r['return']>0: return r
lst=r['lst']
if len(lst)==0:
return {'return':1, 'error':'workload is not specified or found'}
record=i.get('record','')
skip_record_raw=i.get('skip-record-raw','')
overwrite=i.get('overwrite','')
repetitions=i.get('repetitions','')
if repetitions=='': repetitions=3
repetitions=int(repetitions)
cache=i.get('cache','')
# Get target features
target=i.get('target','')
if target=='':
# Check and possibly select target machines
r=ck.search({'module_uoa':cfg['module_deps']['machine'], 'data_uoa':target, 'add_meta':'yes'})
if r['return']>0: return r
dlst=r['lst']
# Prune search by only required devices
rdat=['wa_linux', 'wa_android']
xlst=[]
if len(rdat)==0:
xlst=dlst
else:
for q in dlst:
if q.get('meta',{}).get('access_type','') in rdat:
xlst.append(q)
if len(xlst)==0:
return {'return':1, 'error':'no suitable target devices found (use "ck add machine" to register new target device)'}
elif len(xlst)==1:
target=xlst[0]['data_uoa']
else:
# SELECTOR cuem*********************************
ck.out('')
ck.out('Please select target device to run your workloads on:')
ck.out('')
r=ck.select_uoa({'choices':xlst})
if r['return']>0: return r
target=r['choice']
if target=='':
return {'return':1, 'error':'--target machine is not specified (see "ck list machine")'}
ck.out('')
ck.out('Selected target machine: '+target)
ck.out('')
# Load target machine description
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['machine'],
'data_uoa':target})
if r['return']>0: return r
target_uoa=r['data_uoa']
target_uid=r['data_uid']
features=r['dict']['features']
device_id=r['dict'].get('device_id','')
fplat=features.get('platform',{})
fos=features.get('os',{})
fcpu=features.get('cpu',{})
fgpu=features.get('gpu',{})
plat_name=fplat.get('name','')
os_name=fos.get('name','')
cpu_name=fcpu.get('name','')
if cpu_name=='': cpu_name='unknown-'+fcpu.get('cpu_abi','')
gpu_name=fgpu.get('name','')
sn=fos.get('serial_number','')
# Iterate over workloads
rrr={}
cparams=copy.deepcopy(params)
for wa in lst:
# Reset dir
os.chdir(cur_dir)
# Reset params
params=copy.deepcopy(cparams)
duoa=wa['data_uoa']
duid=wa['data_uid']
dw=wa['meta']
dp=wa['path']
apk_name=dw.get('apk',{}).get('name','')
ww=dw['wa_alias']
# If cache, check if params already exist
if cache=='yes':
# Check extra
cruoa=i.get('cache_repo_uoa','')
# Attempt to load
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['wa-params'],
'data_uoa':duoa,
'repo_uoa':cruoa})
if r['return']>0 and r['return']!=16:
return r
if r['return']==0:
cruoa=r['repo_uid']
rx=ck.merge_dicts({'dict1':params, 'dict2':r['dict'].get('params',{})})
if rx['return']>0: return rx
# Check params here (there is another place in pre-processing scripts
# to be able to run WA via program pipeline directly)
dparams=dw.get('params',{})
if len(dparams)>0:
ck.out('Parameters needed for this workload:')
ck.out('')
for k in sorted(dparams):
x=dparams[k]
ds=x.get('desc','')
dv=params.get(k,None)
if dv==None:
dv=x.get('default',None)
if dv!=None:
ck.out(k+': '+str(dv))
elif x.get('mandatory',False):
r=ck.inp({'text':k+' ('+ds+'): '})
if r['return']>0: return r
dv=r['string'].strip()
if dv=='':
dv=None
if dv!=None:
params[k]=dv
# Cache params if required
if cache=='yes':
r=ck.access({'action':'update',
'module_uoa':cfg['module_deps']['wa-params'],
'data_uoa':duoa,
'repo_uoa':cruoa,
'dict':{'params':params},
'sort_keys':'yes',
'substitute':'yes',
'ignore_update':'yes'})
if r['return']>0:
return r
if o=='con':
ck.out('')
ck.out('Parameters were cached in '+r['path']+' ...')
# Prepare high-level experiment meta
meta={'program_uoa':duoa,
'program_uid':duid,
'workload_name':ww,
'cpu_name':cpu_name,
'os_name':os_name,
'plat_name':plat_name,
'gpu_name':gpu_name,
'scenario':scenario,
'serial_number':sn}
mmeta=copy.deepcopy(meta)
mmeta['local_target_uoa']=target_uoa
mmeta['local_target_uid']=target_uid
if o=='con':
ck.out(line)
ck.out('Running workload '+ww+' (CK UOA='+duoa+') ...')
time.sleep(1)
aggregated_stats={} # Pre-load statistics ...
result_path=''
result_path0=''
if skip_record_raw!='yes':
if o=='con':
ck.out(' Preparing wa_result entry to store raw results ...')
ddd={'meta':mmeta}
ii={'action':'search',
'module_uoa':cfg['module_deps']['wa-result'],
'search_dict':{'meta':meta}}
rx=ck.access(ii)
if rx['return']>0: return rx
lst=rx['lst']
if len(lst)==0:
rx=ck.access({'action':'add',
'module_uoa':cfg['module_deps']['wa-result'],
'dict':ddd,
'sort_keys':'yes'})
if rx['return']>0: return rx
result_uid=rx['data_uid']
result_path=rx['path']
else:
result_uid=lst[0]['data_uid']
result_path=lst[0]['path']
# Load entry
rx=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['wa-result'],
'data_uoa':result_uid})
if rx['return']>0: return rx
ddd=rx['dict']
# Possible directory extension (date-time)
result_path0=result_path
if overwrite!='yes':
rx=ck.get_current_date_time({})
if rx['return']>0: return rx
aa=rx['array']
ady=str(aa['date_year'])
adm=str(aa['date_month'])
adm=('0'*(2-len(adm)))+adm
add=str(aa['date_day'])
add=('0'*(2-len(add)))+add
ath=str(aa['time_hour'])
ath=('0'*(2-len(ath)))+ath
atm=str(aa['time_minute'])
atm=('0'*(2-len(atm)))+atm
ats=str(aa['time_second'])
ats=('0'*(2-len(ats)))+ats
pe=ady+adm+add+'-'+ath+atm+ats
result_path=os.path.join(result_path,pe)
if not os.path.isdir(result_path):
os.makedirs(result_path)
# Record input
finp=os.path.join(result_path,'ck-input.json')
r=ck.save_json_to_file({'json_file':finp, 'dict':i})
if r['return']>0: return r
ff=os.path.join(result_path,'ck-platform-features.json')
r=ck.save_json_to_file({'json_file':ff, 'dict':features})
if r['return']>0: return r
# Check stats ...
fstat=os.path.join(result_path0,ffstat)
if overwrite!='yes':
# Check if file already exists (no check for parallel runs)
if os.path.isfile(fstat):
r=ck.load_json_file({'json_file':fstat})
if r['return']==0:
aggregated_stats=r['dict']
# Prepare CK pipeline for a given workload
ii={'action':'pipeline',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':duid,
'target':target,
'device_id':device_id,
'prepare':'yes',
'params':{'config':config,
'params':params},
'no_state_check':'yes',
'no_compiler_description':'yes',
'skip_info_collection':'yes',
'skip_calibration':'yes',
'cpu_freq':'',
'gpu_freq':'',
'env_speed':'yes',
'energy':'no',
'skip_print_timers':'yes',
'generate_rnd_tmp_dir':'yes',
'env':{'CK_WA_RAW_RESULT_PATH':result_path},
'out':oo}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
ready=rr.get('ready','')
if ready!='yes':
return {'return':11, 'error':'couldn\'t prepare universal CK program workflow'}
state=rr['state']
tmp_dir=state['tmp_dir']
# Clean pipeline
if 'ready' in rr: del(rr['ready'])
if 'fail' in rr: del(rr['fail'])
if 'return' in rr: del(rr['return'])
pipeline=copy.deepcopy(rr)
# Save pipeline
if skip_record_raw!='yes':
fpip=os.path.join(result_path,'ck-pipeline-in.json')
r=ck.save_json_to_file({'json_file':fpip, 'dict':pipeline})
if r['return']>0: return r
# Run CK pipeline *****************************************************
ii={'action':'autotune',
'module_uoa':cfg['module_deps']['pipeline'],
'data_uoa':cfg['module_deps']['program'],
'device_id':device_id,
'iterations':1,
'repetitions':repetitions,
'collect_all':'yes',
'process_multi_keys':['##characteristics#*'],
'tmp_dir':tmp_dir,
'pipeline':pipeline,
'stat_flat_dict':aggregated_stats,
'record':record,
'meta':meta,
'tags':'wa',
"features_keys_to_process":["##choices#*"],
"record_params": {
"search_point_by_features":"yes"
},
"record_dict":{"subview_uoa":"3d9a4f4b03b1b257"},
'out':oo}
rrr=ck.access(ii)
if rrr['return']>0: return rrr
ls=rrr.get('last_iteration_output',{})
state=ls.get('state',{})
xchoices=copy.deepcopy(ls.get('choices',{}))
lsa=rrr.get('last_stat_analysis',{})
lsad=lsa.get('dict_flat',{})
# Not very clean - trying to remove passes ...
xparams=xchoices.get('params','').get('params',{})
to_be_deleted=[]
for k in xparams:
if k.find('pass')>=0:
to_be_deleted.append(k)
for k in to_be_deleted:
del(xparams[k])
ddd['choices']=xchoices
features=ls.get('features',{})
apk_ver=''
if apk_name!='':
apk_ver=features.get('apk',{}).get(apk_name,{}).get('versionName','')
deps=ls.get('dependencies',{})
wa_ver=deps.get('wa',{}).get('cus',{}).get('version','')
# Update meta
ddd['meta']['apk_name']=apk_name
ddd['meta']['apk_version']=apk_ver
ddd['meta']['wa_version']=wa_ver
# Clean tmp dir
tmp_dir=state.get('tmp_dir','')
if dp!='' and tmp_dir!='' and i.get('keep','')!='yes':
shutil.rmtree(os.path.join(dp,tmp_dir))
fail=ls.get('fail','')
fail_reason=ls.get('fail_reason','')
ch=ls.get('characteristics',{})
# tet=ch.get('run',{}).get('total_execution_time',0)
# Save pipeline
ddd['state']={'fail':fail, 'fail_reason':fail_reason}
ddd['characteristics']=ch
if skip_record_raw!='yes':
fpip=os.path.join(result_path,'ck-pipeline-out.json')
r=ck.save_json_to_file({'json_file':fpip, 'dict':rrr})
if r['return']>0: return r
# Write stats ...
r=ck.save_json_to_file({'json_file':fstat, 'dict':lsad})
if r['return']>0: return r
# Update meta
rx=ck.access({'action':'update',
'module_uoa':cfg['module_deps']['wa-result'],
'data_uoa':result_uid,
'dict':ddd,
'substitute':'yes',
'sort_keys':'yes'})
if rx['return']>0: return rx
# Share results if crowd-benchmarking
if share=='yes':
ddd['user']=user
if o=='con':
ck.out('')
ck.out('Saving results to the remote public repo ...')
ck.out('')
# Find remote entry
rduid=''
ii={'action':'search',
'module_uoa':smuoa,
'repo_uoa':er,
'remote_repo_uoa':esr,
'search_dict':{'meta':meta}}
rx=ck.access(ii)
lst=rx['lst']
if len(lst)==1:
rduid=lst[0]['data_uid']
else:
rx=ck.gen_uid({})
if rx['return']>0: return rx
rduid=rx['data_uid']
# Update meta
rx=ck.access({'action':'update',
'module_uoa':smuoa,
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'dict':ddd,
'substitute':'yes',
'sort_keys':'yes'})
if rx['return']>0: return rx
# Push statistical characteristics
if os.path.isfile(fstat):
rx=ck.access({'action':'push',
'module_uoa':smuoa,
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'filename':fstat,
'overwrite':'yes'})
if rx['return']>0: return rx
# Push latest results
fx=os.path.join(result_path,'wa-output','results.json')
if os.path.isfile(fx):
rx=ck.access({'action':'push',
'module_uoa':smuoa,
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'filename':fx,
'extra_path':'wa-output',
'overwrite':'yes'})
if rx['return']>0: return rx
# Info
if o=='con':
ck.out('Succesfully recorded results in the remote repo (Entry UID='+rduid+')')
return rrr
0
Example 77
Project: tensorflow-char-rnn Source File: train.py
def main():
parser = argparse.ArgumentParser()
# Data and vocabulary file
parser.add_argument('--data_file', type=str,
default='data/tiny_shakespeare.txt',
help='data file')
parser.add_argument('--encoding', type=str,
default='utf-8',
help='the encoding of the data file.')
# Parameters for saving models.
parser.add_argument('--output_dir', type=str, default='output',
help=('directory to store final and'
' intermediate results and models.'))
# Parameters to configure the neural network.
parser.add_argument('--hidden_size', type=int, default=128,
help='size of RNN hidden state vector')
parser.add_argument('--embedding_size', type=int, default=0,
help='size of character embeddings')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--num_unrollings', type=int, default=10,
help='number of unrolling steps.')
parser.add_argument('--model', type=str, default='lstm',
help='which model to use (rnn, lstm or gru).')
# Parameters to control the training.
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('--batch_size', type=int, default=20,
help='minibatch size')
parser.add_argument('--train_frac', type=float, default=0.9,
help='fraction of data used for training.')
parser.add_argument('--valid_frac', type=float, default=0.05,
help='fraction of data used for validation.')
# test_frac is computed as (1 - train_frac - valid_frac).
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout rate, default to 0 (no dropout).')
parser.add_argument('--input_dropout', type=float, default=0.0,
help=('dropout rate on input layer, default to 0 (no dropout),'
'and no dropout if using one-hot representation.'))
# Parameters for gradient descent.
parser.add_argument('--max_grad_norm', type=float, default=5.,
help='clip global grad norm')
parser.add_argument('--learning_rate', type=float, default=2e-3,
help='initial learning rate')
parser.add_argument('--decay_rate', type=float, default=0.95,
help='decay rate')
# Parameters for logging.
parser.add_argument('--log_to_file', dest='log_to_file', action='store_true',
help=('whether the experiment log is stored in a file under'
' output_dir or printed at stdout.'))
parser.set_defaults(log_to_file=False)
parser.add_argument('--progress_freq', type=int,
default=100,
help=('frequency for progress report in training'
' and evalution.'))
parser.add_argument('--verbose', type=int,
default=0,
help=('whether to show progress report in training'
' and evalution.'))
# Parameters to feed in the initial model and current best model.
parser.add_argument('--init_model', type=str,
default='',
help=('initial model'))
parser.add_argument('--best_model', type=str,
default='',
help=('current best model'))
parser.add_argument('--best_valid_ppl', type=float,
default=np.Inf,
help=('current valid perplexity'))
# Parameters for using saved best models.
parser.add_argument('--init_dir', type=str, default='',
help='continue from the outputs in the given directory')
# Parameters for debugging.
parser.add_argument('--debug', dest='debug', action='store_true',
help='show debug information')
parser.set_defaults(debug=False)
# Parameters for unittesting the implementation.
parser.add_argument('--test', dest='test', action='store_true',
help=('use the first 1000 character to as data'
' to test the implementation'))
parser.set_defaults(test=False)
args = parser.parse_args()
# Specifying location to store model, best model and tensorboard log.
args.save_model = os.path.join(args.output_dir, 'save_model/model')
args.save_best_model = os.path.join(args.output_dir, 'best_model/model')
args.tb_log_dir = os.path.join(args.output_dir, 'tensorboard_log/')
args.vocab_file = ''
# Create necessary directories.
if args.init_dir:
args.output_dir = args.init_dir
else:
if os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir)
for paths in [args.save_model, args.save_best_model,
args.tb_log_dir]:
os.makedirs(os.path.dirname(paths))
# Specify logging config.
if args.log_to_file:
args.log_file = os.path.join(args.output_dir, 'experiment_log.txt')
else:
args.log_file = 'stdout'
# Set logging file.
if args.log_file == 'stdout':
logging.basicConfig(stream=sys.stdout,
format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%I:%M:%S')
else:
logging.basicConfig(filename=args.log_file,
format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%I:%M:%S')
print('=' * 60)
print('All final and intermediate outputs will be stored in %s/' % args.output_dir)
print('All information will be logged to %s' % args.log_file)
print('=' * 60 + '\n')
if args.debug:
logging.info('args are:\n%s', args)
# Prepare parameters.
if args.init_dir:
with open(os.path.join(args.init_dir, 'result.json'), 'r') as f:
result = json.load(f)
params = result['params']
args.init_model = result['latest_model']
best_model = result['best_model']
best_valid_ppl = result['best_valid_ppl']
if 'encoding' in result:
args.encoding = result['encoding']
else:
args.encoding = 'utf-8'
args.vocab_file = os.path.join(args.init_dir, 'vocab.json')
else:
params = {'batch_size': args.batch_size,
'num_unrollings': args.num_unrollings,
'hidden_size': args.hidden_size,
'max_grad_norm': args.max_grad_norm,
'embedding_size': args.embedding_size,
'num_layers': args.num_layers,
'learning_rate': args.learning_rate,
'model': args.model,
'dropout': args.dropout,
'input_dropout': args.input_dropout}
best_model = ''
logging.info('Parameters are:\n%s\n', json.dumps(params, sort_keys=True, indent=4))
# Read and split data.
logging.info('Reading data from: %s', args.data_file)
with codecs.open(args.data_file, 'r', encoding=args.encoding) as f:
text = f.read()
if args.test:
text = text[:1000]
logging.info('Number of characters: %s', len(text))
if args.debug:
n = 10
logging.info('First %d characters: %s', n, text[:n])
logging.info('Creating train, valid, test split')
train_size = int(args.train_frac * len(text))
valid_size = int(args.valid_frac * len(text))
test_size = len(text) - train_size - valid_size
train_text = text[:train_size]
valid_text = text[train_size:train_size + valid_size]
test_text = text[train_size + valid_size:]
if args.vocab_file:
vocab_index_dict, index_vocab_dict, vocab_size = load_vocab(args.vocab_file, args.encoding)
else:
logging.info('Creating vocabulary')
vocab_index_dict, index_vocab_dict, vocab_size = create_vocab(text)
vocab_file = os.path.join(args.output_dir, 'vocab.json')
save_vocab(vocab_index_dict, vocab_file, args.encoding)
logging.info('Vocabulary is saved in %s', vocab_file)
args.vocab_file = vocab_file
params['vocab_size'] = vocab_size
logging.info('Vocab size: %d', vocab_size)
# Create batch generators.
batch_size = params['batch_size']
num_unrollings = params['num_unrollings']
train_batches = BatchGenerator(train_text, batch_size, num_unrollings, vocab_size,
vocab_index_dict, index_vocab_dict)
# valid_batches = BatchGenerator(valid_text, 1, 1, vocab_size,
# vocab_index_dict, index_vocab_dict)
valid_batches = BatchGenerator(valid_text, batch_size, num_unrollings, vocab_size,
vocab_index_dict, index_vocab_dict)
test_batches = BatchGenerator(test_text, 1, 1, vocab_size,
vocab_index_dict, index_vocab_dict)
if args.debug:
logging.info('Test batch generators')
logging.info(batches2string(train_batches.next(), index_vocab_dict))
logging.info(batches2string(valid_batches.next(), index_vocab_dict))
logging.info('Show vocabulary')
logging.info(vocab_index_dict)
logging.info(index_vocab_dict)
# Create graphs
logging.info('Creating graph')
graph = tf.Graph()
with graph.as_default():
with tf.name_scope('training'):
train_model = CharRNN(is_training=True, use_batch=True, **params)
tf.get_variable_scope().reuse_variables()
with tf.name_scope('validation'):
valid_model = CharRNN(is_training=False, use_batch=True, **params)
with tf.name_scope('evaluation'):
test_model = CharRNN(is_training=False, use_batch=False, **params)
saver = tf.train.Saver(name='checkpoint_saver')
best_model_saver = tf.train.Saver(name='best_model_saver')
logging.info('Model size (number of parameters): %s\n', train_model.model_size)
logging.info('Start training\n')
result = {}
result['params'] = params
result['vocab_file'] = args.vocab_file
result['encoding'] = args.encoding
try:
# Use try and finally to make sure that intermediate
# results are saved correctly so that training can
# be continued later after interruption.
with tf.Session(graph=graph) as session:
# Version 8 changed the api of summary writer to use
# graph instead of graph_def.
if TF_VERSION >= 8:
graph_info = session.graph
else:
graph_info = session.graph_def
train_writer = tf.train.SummaryWriter(args.tb_log_dir + 'train/', graph_info)
valid_writer = tf.train.SummaryWriter(args.tb_log_dir + 'valid/', graph_info)
# load a saved model or start from random initialization.
if args.init_model:
saver.restore(session, args.init_model)
else:
tf.initialize_all_variables().run()
for i in range(args.num_epochs):
logging.info('=' * 19 + ' Epoch %d ' + '=' * 19 + '\n', i)
logging.info('Training on training set')
# training step
ppl, train_summary_str, global_step = train_model.run_epoch(
session,
train_size,
train_batches,
is_training=True,
verbose=args.verbose,
freq=args.progress_freq)
# record the summary
train_writer.add_summary(train_summary_str, global_step)
train_writer.flush()
# save model
saved_path = saver.save(session, args.save_model,
global_step=train_model.global_step)
logging.info('Latest model saved in %s\n', saved_path)
logging.info('Evaluate on validation set')
# valid_ppl, valid_summary_str, _ = valid_model.run_epoch(
valid_ppl, valid_summary_str, _ = valid_model.run_epoch(
session,
valid_size,
valid_batches,
is_training=False,
verbose=args.verbose,
freq=args.progress_freq)
# save and update best model
if (not best_model) or (valid_ppl < best_valid_ppl):
best_model = best_model_saver.save(
session,
args.save_best_model,
global_step=train_model.global_step)
best_valid_ppl = valid_ppl
valid_writer.add_summary(valid_summary_str, global_step)
valid_writer.flush()
logging.info('Best model is saved in %s', best_model)
logging.info('Best validation ppl is %f\n', best_valid_ppl)
result['latest_model'] = saved_path
result['best_model'] = best_model
# Convert to float because numpy.float is not json serializable.
result['best_valid_ppl'] = float(best_valid_ppl)
result_path = os.path.join(args.output_dir, 'result.json')
if os.path.exists(result_path):
os.remove(result_path)
with open(result_path, 'w') as f:
json.dump(result, f, indent=2, sort_keys=True)
logging.info('Latest model is saved in %s', saved_path)
logging.info('Best model is saved in %s', best_model)
logging.info('Best validation ppl is %f\n', best_valid_ppl)
logging.info('Evaluate the best model on test set')
saver.restore(session, best_model)
test_ppl, _, _ = test_model.run_epoch(session, test_size, test_batches,
is_training=False,
verbose=args.verbose,
freq=args.progress_freq)
result['test_ppl'] = float(test_ppl)
finally:
result_path = os.path.join(args.output_dir, 'result.json')
if os.path.exists(result_path):
os.remove(result_path)
with open(result_path, 'w') as f:
json.dump(result, f, indent=2, sort_keys=True)
0
Example 78
Project: wasm-e2e Source File: run-tests.py
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--executable', help='override executable.')
parser.add_argument('-v', '--verbose', help='print more diagnotic messages.',
action='store_true')
parser.add_argument('-l', '--list', help='list all tests.',
action='store_true')
parser.add_argument('--list-exes',
help='list all executables needed for the tests.',
action='store_true')
parser.add_argument('-r', '--rebase',
help='rebase a test to its current output.',
action='store_true')
parser.add_argument('-j', '--jobs', help='number of jobs to use to run tests',
type=int, default=multiprocessing.cpu_count())
parser.add_argument('-t', '--timeout', type=float, default=DEFAULT_TIMEOUT,
help='per test timeout in seconds')
parser.add_argument('patterns', metavar='pattern', nargs='*',
help='test patterns.')
options = parser.parse_args(args)
if options.patterns:
pattern_re = '|'.join(fnmatch.translate('*%s*' % p)
for p in options.patterns)
else:
pattern_re = '.*'
test_names = FindTestFiles(SCRIPT_DIR, '.c', pattern_re)
if options.list:
for test_name in test_names:
print test_name
return 0
if not test_names:
print 'no tests match that filter'
return 1
if options.executable:
if not os.path.exists(options.executable):
parser.error('executable %s does not exist' % options.executable)
options.executable = os.path.abspath(options.executable)
isatty = os.isatty(1)
status = Status(options.verbose)
infos = GetAllTestInfo(test_names, status)
if options.list_exes:
exes = set([info.exe for info in infos])
if None in exes:
exes.remove(None)
exes.add(os.path.relpath(DEFAULT_EXE, os.getcwd()))
print '\n'.join(exes)
return 0
inq = multiprocessing.Queue()
test_count = 0
for info in infos:
if info.skip:
status.Skipped(info)
continue
inq.put(info)
test_count += 1
outq = multiprocessing.Queue()
num_proc = options.jobs
processes = []
status.Start(test_count)
def Worker(i, options, inq, outq):
try:
while True:
try:
info = inq.get(False)
try:
out = info.Run(options.timeout, temp_dir, options.executable)
except Exception as e:
outq.put((info, e))
continue
outq.put((info, out))
except Queue.Empty:
# Seems this can be fired even when the queue isn't actually empty.
# Double-check, via inq.empty()
if inq.empty():
break
except KeyboardInterrupt:
pass
temp_dir = tempfile.mkdtemp(prefix='wasm-e2e-')
try:
for i, p in enumerate(range(num_proc)):
proc = multiprocessing.Process(target=Worker,
args=(i, options, inq, outq))
processes.append(proc)
proc.start()
finished_tests = 0
while finished_tests < test_count:
try:
info, result = outq.get(True, 0.01)
except Queue.Empty:
status.Timeout()
continue
finished_tests += 1
try:
if isinstance(result, Exception):
raise result
stdout, stderr, returncode, duration = result
if returncode != info.expected_error:
# This test has already failed, but diff it anyway.
msg = 'expected error code %d, got %d.' % (info.expected_error,
returncode)
try:
info.Diff(stdout, stderr)
except Error as e:
msg += '\n' + str(e)
raise Error(msg)
else:
if options.rebase:
info.Rebase(stdout, stderr)
else:
info.Diff(stdout, stderr)
status.Passed(info, duration)
except Exception as e:
status.Failed(info, str(e))
except KeyboardInterrupt:
for proc in processes:
proc.join()
finally:
for proc in processes:
proc.terminate()
proc.join()
shutil.rmtree(temp_dir)
status.Clear()
ret = 0
if status.failed:
sys.stderr.write('cuem FAILED %s\n' % ('*' * (80 - 14)))
for info in status.failed_tests:
sys.stderr.write('- %s\n' % info.name)
ret = 1
status.Print()
return ret
0
Example 79
@skip_doctest
@magic_arguments()
@argument(
'-i', '--input', action='append',
help='Names of input variable from shell.user_ns to be assigned to R variables of the same names after calling self.pyconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-o', '--output', action='append',
help='Names of variables to be pushed from rpy2 to shell.user_ns after executing cell body and applying self.Rconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-w', '--width', type=int,
help='Width of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-h', '--height', type=int,
help='Height of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-d', '--dataframe', action='append',
help='Convert these objects to data.frames and return as structured arrays.'
)
@argument(
'-u', '--units', type=int,
help='Units of png plotting device sent as an argument to *png* in R. One of ["px", "in", "cm", "mm"].'
)
@argument(
'-p', '--pointsize', type=int,
help='Pointsize of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-b', '--bg',
help='Background of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-n', '--noreturn',
help='Force the magic to not return anything.',
action='store_true',
default=False
)
@argument(
'code',
nargs='*',
)
@needs_local_scope
@line_cell_magic
def R(self, line, cell=None, local_ns=None):
'''
Execute code in R, and pull some of the results back into the Python namespace.
In line mode, this will evaluate an expression and convert the returned value to a Python object.
The return value is determined by rpy2's behaviour of returning the result of evaluating the
final line.
Multiple R lines can be executed by joining them with semicolons::
In [9]: %R X=c(1,4,5,7); sd(X); mean(X)
Out[9]: array([ 4.25])
As a cell, this will run a block of R code, without bringing anything back by default::
In [10]: %%R
....: Y = c(2,4,3,9)
....: print(summary(lm(Y~X)))
....:
Call:
lm(formula = Y ~ X)
Residuals:
1 2 3 4
0.88 -0.24 -2.28 1.64
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.0800 2.3000 0.035 0.975
X 1.0400 0.4822 2.157 0.164
Residual standard error: 2.088 on 2 degrees of freedom
Multiple R-squared: 0.6993,Adjusted R-squared: 0.549
F-statistic: 4.651 on 1 and 2 DF, p-value: 0.1638
In the notebook, plots are published as the output of the cell.
%R plot(X, Y)
will create a scatter plot of X bs Y.
If cell is not None and line has some R code, it is prepended to
the R code in cell.
Objects can be passed back and forth between rpy2 and python via the -i -o flags in line::
In [14]: Z = np.array([1,4,5,10])
In [15]: %R -i Z mean(Z)
Out[15]: array([ 5.])
In [16]: %R -o W W=Z*mean(Z)
Out[16]: array([ 5., 20., 25., 50.])
In [17]: W
Out[17]: array([ 5., 20., 25., 50.])
The return value is determined by these rules:
* If the cell is not None, the magic returns None.
* If the cell evaluates as False, the resulting value is returned
unless the final line prints something to the console, in
which case None is returned.
* If the final line results in a NULL value when evaluated
by rpy2, then None is returned.
* No attempt is made to convert the final value to a structured array.
Use the --dataframe flag or %Rget to push / return a structured array.
* If the -n flag is present, there is no return value.
* A trailing ';' will also result in no return value as the last
value in the line is an empty string.
The --dataframe argument will attempt to return structured arrays.
This is useful for dataframes with
mixed data types. Note also that for a data.frame,
if it is returned as an ndarray, it is transposed::
In [18]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [19]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [20]: %%R -o datar
datar = datapy
....:
In [21]: datar
Out[21]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [22]: %%R -d datar
datar = datapy
....:
In [23]: datar
Out[23]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
The --dataframe argument first tries colnames, then names.
If both are NULL, it returns an ndarray (i.e. unstructured)::
In [1]: %R mydata=c(4,6,8.3); NULL
In [2]: %R -d mydata
In [3]: mydata
Out[3]: array([ 4. , 6. , 8.3])
In [4]: %R names(mydata) = c('a','b','c'); NULL
In [5]: %R -d mydata
In [6]: mydata
Out[6]:
array((4.0, 6.0, 8.3),
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
In [7]: %R -o mydata
In [8]: mydata
Out[8]: array([ 4. , 6. , 8.3])
'''
args = parse_argstring(self.R, line)
# arguments 'code' in line are prepended to
# the cell lines
if cell is None:
code = ''
return_output = True
line_mode = True
else:
code = cell
return_output = False
line_mode = False
code = ' '.join(args.code) + code
# if there is no local namespace then default to an empty dict
if local_ns is None:
local_ns = {}
if args.input:
for input in ','.join(args.input).split(','):
try:
val = local_ns[input]
except KeyError:
val = self.shell.user_ns[input]
self.r.assign(input, self.pyconverter(val))
png_argdict = dict([(n, getattr(args, n)) for n in ['units', 'height', 'width', 'bg', 'pointsize']])
png_args = ','.join(['%s=%s' % (o,v) for o, v in list(png_argdict.items()) if v is not None])
# execute the R code in a temporary directory
tmpd = tempfile.mkdtemp()
self.r('png("%s/Rplots%%03d.png",%s)' % (tmpd, png_args))
text_output = ''
if line_mode:
for line in code.split(';'):
text_result, result = self.eval(line)
text_output += text_result
if text_result:
# the last line printed something to the console so we won't return it
return_output = False
else:
text_result, result = self.eval(code)
text_output += text_result
self.r('dev.off()')
# read out all the saved .png files
images = [open(imgfile, 'rb').read() for imgfile in glob("%s/Rplots*png" % tmpd)]
# now publish the images
# mimicking IPython/zmq/pylab/backend_inline.py
fmt = 'png'
mimetypes = { 'png' : 'image/png', 'svg' : 'image/svg+xml' }
mime = mimetypes[fmt]
# publish the printed R objects, if any
display_data = []
if text_output:
display_data.append(('RMagic.R', {'text/plain':text_output}))
# flush text streams before sending figures, helps a little with output
for image in images:
# synchronization in the console (though it's a bandaid, not a real sln)
sys.stdout.flush(); sys.stderr.flush()
display_data.append(('RMagic.R', {mime: image}))
# kill the temporary directory
rmtree(tmpd)
# try to turn every output into a numpy array
# this means that output are assumed to be castable
# as numpy arrays
if args.output:
for output in ','.join(args.output).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=False)})
if args.dataframe:
for output in ','.join(args.dataframe).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=True)})
for tag, disp_d in display_data:
publish_display_data(tag, disp_d)
# this will keep a reference to the display_data
# which might be useful to other objects who happen to use
# this method
if self.cache_display_data:
self.display_cache = display_data
# if in line mode and return_output, return the result as an ndarray
if return_output and not args.noreturn:
if result != ri.NULL:
return self.Rconverter(result, dataframe=False)
0
Example 80
Project: plugin.video.streamondemand Source File: config.py
def verify_directories_created():
import logger
# xbmc.log("streamondemand.core.config.verify_directories_created")
# Force download path if empty
download_path = get_setting("downloadpath")
if download_path == "":
if is_xbmc():
download_path_special = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads"
set_setting("downloadpath", download_path_special)
else:
download_path = os.path.join(get_data_path(), "downloads")
set_setting("downloadpath", download_path)
# Force download list path if empty
download_list_path = get_setting("downloadlistpath")
if download_list_path == "":
if is_xbmc():
download_list_path_special = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads/list"
set_setting("downloadlistpath", download_list_path_special)
else:
download_list_path = os.path.join(get_data_path(), "downloads", "list")
set_setting("downloadlistpath", download_list_path)
# Force bookmark path if empty
bookmark_path = get_setting("bookmarkpath")
if bookmark_path == "":
if is_xbmc():
bookmark_path_special = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads/list"
set_setting("bookmarkpath", bookmark_path_special)
else:
bookmark_path = os.path.join(get_data_path(), "bookmarks")
set_setting("bookmarkpath", bookmark_path)
# Create data_path if not exists
if not os.path.exists(get_data_path()):
logger.debug("Creating data_path " + get_data_path())
try:
os.mkdir(get_data_path())
except:
pass
if is_xbmc():
# xbmc.log("Es una plataforma XBMC")
if download_path.startswith("special://"):
# Translate from special and create download_path if not exists
download_path = xbmc.translatePath(download_path)
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path (from special): " + download_path)
try:
os.mkdir(download_path)
except:
pass
else:
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path: " + download_path)
try:
os.mkdir(download_path)
except:
pass
if download_list_path.startswith("special://"):
# Create download_list_path if not exists
download_list_path = xbmc.translatePath(download_list_path)
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path (from special): " + download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
else:
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path: " + download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
if bookmark_path.startswith("special://"):
# Create bookmark_path if not exists
bookmark_path = xbmc.translatePath(bookmark_path)
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path (from special): " + bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
else:
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path: " + bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
else:
# xbmc.log("No es una plataforma XBMC")
# Create download_path if not exists
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path " + download_path)
try:
os.mkdir(download_path)
except:
pass
# Create download_list_path if not exists
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path " + download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
# Create bookmark_path if not exists
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path " + bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
# Create library_path if not exists
if not get_library_path().lower().startswith("smb") and not os.path.exists(get_library_path()):
logger.debug("Creating library_path " + get_library_path())
try:
os.mkdir(get_library_path())
except:
pass
# Create settings_path is not exists
settings_path = os.path.join(get_data_path(), "settings_channels")
if not os.path.exists(settings_path):
logger.debug("Creating settings_path " + settings_path)
try:
os.mkdir(settings_path)
except:
pass
# Checks that a directory "xbmc" is not present on platformcode
old_xbmc_directory = os.path.join(get_runtime_path(), "platformcode", "xbmc")
if os.path.exists(old_xbmc_directory):
logger.debug("Removing old platformcode.xbmc directory")
try:
import shutil
shutil.rmtree(old_xbmc_directory)
except:
pass
0
Example 81
Project: Bluto Source File: output.py
def action_output_vuln_zone_hunter(google_results, bing_results, linkedin_results, time_spent_email, time_spent_total, clean_dump, sub_intrest, domain, emailHunter_results, args, report_location, company, data_mine):
info('Output action_output_vuln_zone_hunter: Start')
linkedin_evidence_results = []
email_evidence_results = []
email_results = []
email_seen = []
url_seen = []
person_seen = []
final_emails = []
if emailHunter_results is not None:
for email in emailHunter_results:
email_results.append(email[0])
email_evidence_results.append((email[0],email[1]))
for email, url in google_results:
try:
e1, e2 = email.split(',')
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(e2).replace(' ',''),url))
email_evidence_results.append((str(e1).replace(' ',''),url))
email_results.append((str(e2).replace(' ','')))
email_results.append((str(e1).replace(' ','')))
except ValueError:
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(email).replace(' ',''),url))
email_results.append(str(email).replace(' ',''))
for e, u in bing_results:
email_results.append(e)
if u not in url_seen:
email_evidence_results.append((e, u))
for url, person, description in linkedin_results:
if person not in person_seen:
person_seen.append(person)
linkedin_evidence_results.append((url, person, description))
linkedin_evidence_results.sort(key=lambda tup: tup[1])
sorted_email = set(sorted(email_results))
for email in sorted_email:
if email == '[]':
pass
elif email == '@' + domain:
pass
else:
final_emails.append(email)
email_count = len(final_emails)
staff_count = len(person_seen)
f_emails = sorted(final_emails)
pwned_results = action_pwned(f_emails)
c_accounts = len(pwned_results)
print '\n\nEmail Addresses:\n'
write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine)
if f_emails:
for email in f_emails:
print str(email).replace("u'","").replace("'","").replace('[','').replace(']','')
else:
print '\tNo Data To Be Found'
print '\nCompromised Accounts:\n'
if pwned_results:
sorted_pwned = sorted(pwned_results)
for account in sorted_pwned:
print 'Account: \t{}'.format(account[0])
print 'Domain: \t{}'.format(account[1])
print 'Date: \t{}\n'.format(account[3])
else:
print '\tNo Data To Be Found'
print '\nLinkedIn Results:\n'
sorted_person = sorted(person_seen)
if sorted_person:
for person in sorted_person:
print person
else:
print '\tNo Data To Be Found'
if data_mine is not None:
user_names = data_mine[0]
software_list = data_mine[1]
download_count = data_mine[2]
download_list = data_mine[3]
username_count = len(user_names)
software_count = len(software_list)
print '\nData Found In Docuement MetaData'
print '\nPotential Usernames:\n'
if user_names:
for user in user_names:
print '\t' + colored(user, 'red')
else:
print '\tNo Data To Be Found'
print '\nSoftware And Versions Found:\n'
if software_list:
for software in software_list:
print '\t' + colored(software, 'red')
else:
print '\tNo Data To Be Found'
else:
user_names = []
software_list = []
download_count = 0
username_count = len(user_names)
software_count = len(software_list)
target_dict = dict((x.split(' ') for x in clean_dump))
clean_target = collections.OrderedDict(sorted(target_dict.items()))
print "\nProcessed Dump\n"
bruted_count = len(clean_target)
for item in clean_target:
if item in sub_intrest:
print colored(item, 'red'), colored("\t" + clean_target[item], 'red')
else:
print item, "\t" + target_dict[item]
time_spent_email_f = str(datetime.timedelta(seconds=(time_spent_email))).split('.')[0]
time_spent_total_f = str(datetime.timedelta(seconds=(time_spent_total))).split('.')[0]
print '\nHosts Identified: {}' .format(str(bruted_count))
print 'Potential Emails Found: {}' .format(str(email_count))
print 'Potential Staff Members Found: {}' .format(str(staff_count))
print 'Compromised Accounts: {}' .format(str(c_accounts))
print 'Potential Usernames Found: {}'.format(username_count)
print 'Potential Software Found: {}'.format(software_count)
print 'Docuements Downloaded: {}'.format(download_count)
print "Email Enumeration:", time_spent_email_f
print "Total Time:", time_spent_total_f
info('Hosts Identified: {}' .format(str(bruted_count)))
info("Total Time:" .format(str(time_spent_total_f)))
info("Email Enumeration: {}" .format(str(time_spent_email_f)))
info('Compromised Accounts: {}' .format(str(c_accounts)))
info('Potential Usernames Found: {}'.format(username_count))
info('Potential Software Found: {}'.format(software_count))
info('Docuements Downloaded: {}'.format(download_count))
info('Potential Staff Members Found: {}' .format(str(staff_count)))
info('Potential Emails Found: {}' .format(str(email_count)))
info('DNS Vuln Run completed')
info('Output action_output_vuln_zone_hunter: Completed')
domain_r = domain.split('.')
docs = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
answers = ['no','n','y','yes']
while True:
answer = raw_input("\nWould you like to keep all local data?\n(Local Logs, Downloded Docuements, HTML Evidence Report)\n\nYes|No:").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
domain
print '\nThe docuements are located here: {}'.format(docs)
print 'The logs are located here: {}.'.format(LOG_DIR)
print "\nAn evidence report has been written to {}\n".format(report_location)
while True:
answer = raw_input("Would you like to open this report now? ").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
print '\nOpening {}' .format(report_location)
webbrowser.open('file://' + str(report_location))
break
else:
break
else:
print 'Your answer needs to be either yes|y|no|n rather than, {}' .format(answer)
break
else:
shutil.rmtree(docs)
shutil.rmtree(LOG_DIR)
os.remove(report_location)
break
else:
print '\tYour answer needs to be either yes|y|no|n rather than, {}' .format(answer)
0
Example 82
Project: faf Source File: kerneloops.py
def retrace(self, db, task):
new_symbols = {}
new_symbolsources = {}
debug_paths = set(os.path.join(task.debuginfo.unpacked_path, fname[1:])
for fname in task.debuginfo.debug_files)
if task.debuginfo.debug_files is not None:
db_debug_pkg = task.debuginfo.db_package
if db_debug_pkg.has_lob("offset_map"):
with db_debug_pkg.get_lob_fd("offset_map") as fd:
offset_map = pickle.load(fd)
else:
offset_map = get_function_offset_map(debug_paths)
db_debug_pkg.save_lob("offset_map", pickle.dumps(offset_map))
else:
offset_map = {}
for bin_pkg, db_ssources in task.binary_packages.items():
i = 0
for db_ssource in db_ssources:
i += 1
module = db_ssource.path
self.log_info(u"[{0} / {1}] Processing '{2}' @ '{3}'"
.format(i, len(db_ssources),
db_ssource.symbol.name, module))
if db_ssource.path == "vmlinux":
address = db_ssource.offset
if address < 0:
address += (1 << 64)
else:
if module not in offset_map:
self.log_debug("Module '{0}' not found in package '{1}'"
.format(module, task.debuginfo.nvra))
db_ssource.retrace_fail_count += 1
continue
module_map = offset_map[module]
symbol_name = db_ssource.symbol.name
if symbol_name not in module_map:
symbol_name = symbol_name.lstrip("_")
if symbol_name not in module_map:
self.log_debug("Function '{0}' not found in module "
"'{1}'".format(db_ssource.symbol.name,
module))
db_ssource.retrace_fail_count += 1
continue
address = module_map[symbol_name] + db_ssource.func_offset
debug_dir = os.path.join(task.debuginfo.unpacked_path,
"usr", "lib", "debug")
debug_path = self._get_debug_path(db, module,
task.debuginfo.db_package)
if debug_path is None:
db_ssource.retrace_fail_count += 1
continue
try:
abspath = os.path.join(task.debuginfo.unpacked_path,
debug_path[1:])
results = addr2line(abspath, address, debug_dir)
results.reverse()
except FafError as ex:
self.log_debug("addr2line failed: {0}".format(str(ex)))
db_ssource.retrace_fail_count += 1
continue
inl_id = 0
while len(results) > 1:
inl_id += 1
funcname, srcfile, srcline = results.pop()
self.log_debug("Unwinding inlined function '{0}'"
.format(funcname))
# hack - we have no offset for inlined symbols
# let's use minus source line to avoid collisions
offset = -srcline
db_ssource_inl = get_ssource_by_bpo(db, db_ssource.build_id,
db_ssource.path, offset)
if db_ssource_inl is None:
key = (db_ssource.build_id, db_ssource.path, offset)
if key in new_symbolsources:
db_ssource_inl = new_symbolsources[key]
else:
db_symbol_inl = get_symbol_by_name_path(db,
funcname,
module)
if db_symbol_inl is None:
sym_key = (funcname, module)
if sym_key in new_symbols:
db_symbol_inl = new_symbols[sym_key]
else:
db_symbol_inl = Symbol()
db_symbol_inl.name = funcname
db_symbol_inl.normalized_path = module
db.session.add(db_symbol_inl)
new_symbols[sym_key] = db_symbol_inl
db_ssource_inl = SymbolSource()
db_ssource_inl.symbol = db_symbol_inl
db_ssource_inl.build_id = db_ssource.build_id
db_ssource_inl.path = module
db_ssource_inl.offset = offset
db_ssource_inl.source_path = srcfile
db_ssource_inl.line_number = srcline
db.session.add(db_ssource_inl)
new_symbolsources[key] = db_ssource_inl
for db_frame in db_ssource.frames:
db_frames = sorted(db_frame.thread.frames,
key=lambda f: f.order)
idx = db_frames.index(db_frame)
if idx > 0:
prevframe = db_frame.thread.frames[idx - 1]
if (prevframe.inlined and
prevframe.symbolsource == db_ssource_inl):
continue
db_newframe = ReportBtFrame()
db_newframe.symbolsource = db_ssource_inl
db_newframe.thread = db_frame.thread
db_newframe.inlined = True
db_newframe.order = db_frame.order - inl_id
db.session.add(db_newframe)
funcname, srcfile, srcline = results.pop()
self.log_debug("Result: {0}".format(funcname))
db_symbol = get_symbol_by_name_path(db, funcname, module)
if db_symbol is None:
key = (funcname, module)
if key in new_symbols:
db_symbol = new_symbols[key]
else:
self.log_debug("Creating new symbol '{0}' @ '{1}'"
.format(funcname, module))
db_symbol = Symbol()
db_symbol.name = funcname
db_symbol.normalized_path = module
db.session.add(db_symbol)
new_symbols[key] = db_symbol
if db_symbol.nice_name is None:
db_symbol.nice_name = demangle(funcname)
db_ssource.symbol = db_symbol
db_ssource.source_path = srcfile
db_ssource.line_number = srcline
if task.debuginfo is not None:
self.log_debug("Removing {0}".format(task.debuginfo.unpacked_path))
shutil.rmtree(task.debuginfo.unpacked_path, ignore_errors=True)
if task.source is not None and task.source.unpacked_path is not None:
self.log_debug("Removing {0}".format(task.source.unpacked_path))
shutil.rmtree(task.source.unpacked_path, ignore_errors=True)
0
Example 83
Project: Bluto Source File: output.py
def action_output_wild_false_hunter(brute_results_dict, sub_intrest, google_results, bing_true_results, linkedin_results, check_count, domain, time_spent_email, time_spent_brute, time_spent_total, emailHunter_results, args, report_location, company, data_mine):
info('Output action_output_wild_false_hunter: Start')
linkedin_evidence_results = []
email_evidence_results = []
email_results = []
email_seen = []
url_seen = []
person_seen = []
final_emails = []
if emailHunter_results is not None:
for email in emailHunter_results:
email_results.append(email[0])
email_evidence_results.append((email[0],email[1]))
for email, url in google_results:
try:
e1, e2 = email.split(',')
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(e2).replace(' ',''),url))
email_evidence_results.append((str(e1).replace(' ',''),url))
email_results.append((str(e2).replace(' ','')))
email_results.append((str(e1).replace(' ','')))
except ValueError:
if url not in email_seen:
email_seen.append(url)
email_evidence_results.append((str(email).replace(' ',''),url))
email_results.append(str(email).replace(' ',''))
for e, u in bing_true_results:
email_results.append(e)
if u not in url_seen:
email_evidence_results.append((e, u))
for url, person, description in linkedin_results:
if person not in person_seen:
person_seen.append(person)
linkedin_evidence_results.append((url, person, description))
linkedin_evidence_results.sort(key=lambda tup: tup[1])
sorted_email = set(sorted(email_results))
for email in sorted_email:
if email == '[]':
pass
elif email == '@' + domain:
pass
else:
final_emails.append(email)
email_count = len(final_emails)
staff_count = len(person_seen)
f_emails = sorted(final_emails)
pwned_results = action_pwned(f_emails)
c_accounts = len(pwned_results)
print '\n\nEmail Addresses:\n'
write_html(email_evidence_results, linkedin_evidence_results, pwned_results, report_location, company, data_mine)
if f_emails:
for email in f_emails:
print '\t' + str(email).replace("u'","").replace("'","").replace('[','').replace(']','')
else:
print '\tNo Data To Be Found'
print '\nCompromised Accounts:\n'
if pwned_results:
sorted_pwned = sorted(pwned_results)
for account in sorted_pwned:
print 'Account: \t{}'.format(account[0])
print ' Domain: \t{}'.format(account[1])
print ' Date: \t{}\n'.format(account[3])
else:
print '\tNo Data To Be Found'
print '\nLinkedIn Results:\n'
sorted_person = sorted(person_seen)
if sorted_person:
for person in sorted_person:
print person
else:
print '\tNo Data To Be Found'
if data_mine is not None:
user_names = data_mine[0]
software_list = data_mine[1]
download_count = data_mine[2]
download_list = data_mine[3]
username_count = len(user_names)
software_count = len(software_list)
print '\nData Found In Docuement MetaData'
print '\nPotential Usernames:\n'
if user_names:
for user in user_names:
print '\t' + colored(user, 'red')
else:
print '\tNo Data To Be Found'
print '\nSoftware And Versions Found:\n'
if software_list:
for software in software_list:
print '\t' + colored(software, 'red')
else:
print '\tNo Data To Be Found'
else:
user_names = []
software_list = []
download_count = 0
username_count = len(user_names)
software_count = len(software_list)
sorted_dict = collections.OrderedDict(sorted(brute_results_dict.items()))
bruted_count = len(sorted_dict)
print "\nBluto Results: \n"
for item in sorted_dict:
if item is not '*.' + domain:
if item is not '@.' + domain:
if item in sub_intrest:
print colored(item + "\t", 'red'), colored(sorted_dict[item], 'red')
else:
print item + "\t",sorted_dict[item]
time_spent_email_f = str(datetime.timedelta(seconds=(time_spent_email))).split('.')[0]
time_spent_brute_f = str(datetime.timedelta(seconds=(time_spent_brute))).split('.')[0]
time_spent_total_f = str(datetime.timedelta(seconds=(time_spent_total))).split('.')[0]
print '\nHosts Identified: {}' .format(str(bruted_count))
print 'Potential Emails Found: {}' .format(str(email_count))
print 'Potential Staff Members Found: {}' .format(str(staff_count))
print 'Compromised Accounts: {}' .format(str(c_accounts))
print 'Potential Usernames Found: {}'.format(username_count)
print 'Potential Software Found: {}'.format(software_count)
print 'Docuements Downloaded: {}'.format(download_count)
print "Email Enumeration:", time_spent_email_f
print "Requests executed:", str(check_count) + " in ", time_spent_brute_f
print "Total Time:", time_spent_total_f
info('Hosts Identified: {}' .format(str(bruted_count)))
info("Email Enumeration: {}" .format(str(time_spent_email_f)))
info('Compromised Accounts: {}' .format(str(c_accounts)))
info('Potential Staff Members Found: {}' .format(str(staff_count)))
info('Potential Emails Found: {}' .format(str(email_count)))
info("Total Time:" .format(str(time_spent_total_f)))
info('Docuements Downloaded: {}'.format(download_count))
info('DNS No Wild Cards + Email Hunter Run completed')
info('Output action_output_wild_false_hunter: Completed')
domain_r = domain.split('.')
docs = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
answers = ['no','n','y','yes']
while True:
print colored("\nWould you like to keep all local data?\n(Local Logs, Downloded Docuements, HTML Evidence Report)\n\nYes|No:", "red")
answer = raw_input("").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
domain
print '\nThe docuements are located here: {}'.format(docs)
print 'The logs are located here: {}.'.format(LOG_DIR)
print "\nAn evidence report has been written to {}\n".format(report_location)
while True:
answer = raw_input("Would you like to open this report now? ").lower()
if answer in answers:
if answer == 'y' or answer == 'yes':
print '\nOpening {}' .format(report_location)
webbrowser.open('file://' + str(report_location))
break
else:
break
else:
print 'Your answer needs to be either yes|y|no|n rather than, {}' .format(answer)
break
else:
shutil.rmtree(docs)
shutil.rmtree(LOG_DIR)
os.remove(report_location)
break
else:
print '\tYour answer needs to be either yes|y|no|n rather than, {}' .format(answer)
0
Example 84
Project: SiCKRAGE Source File: __init__.py
def start(self):
self.started = True
# thread name
threading.currentThread().setName('CORE')
# Check if we need to perform a restore first
if os.path.exists(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
success = restoreSR(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR)
print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
if success:
shutil.rmtree(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True)
# migrate old database file names to new ones
if os.path.isfile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))):
if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
helpers.moveFile(os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
os.path.join(sickrage.DATA_DIR, '{}.bak-{}'
.format('sickrage.db',
datetime.datetime.now().strftime(
'%Y%m%d_%H%M%S'))))
helpers.moveFile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')),
os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db')))
# perform database startup actions
for db in [MainDB, CacheDB, FailedDB]:
# check integrity of database
db().check_integrity()
# initialize database
db().initialize()
# migrate database
db().migrate()
# compact database
db().compact()
# load config
self.srConfig.load()
# set socket timeout
socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)
# setup logger settings
self.srLogger.logSize = self.srConfig.LOG_SIZE
self.srLogger.logNr = self.srConfig.LOG_NR
self.srLogger.debugLogging = sickrage.DEBUG
self.srLogger.consoleLogging = not sickrage.QUITE
self.srLogger.logFile = self.srConfig.LOG_FILE
# start logger
self.srLogger.start()
# Check available space
try:
total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
if available_space < 100:
self.srLogger.error(
'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
available_space)
sickrage.restart = False
return
except:
self.srLogger.error('Failed getting diskspace: %s', traceback.format_exc())
# load data for shows from database
self.load_shows()
# build name cache
self.NAMECACHE.build()
if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'):
self.srConfig.DEFAULT_PAGE = 'home'
# cleanup cache folder
for folder in ['mako', 'sessions', 'indexers']:
try:
shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, folder), ignore_errors=True)
except Exception:
continue
# init anidb connection
if not self.srConfig.USE_ANIDB:
try:
self.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=lambda msg: self.srLogger.debug(
"AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD)
except Exception as e:
self.srLogger.warning("AniDB exception msg: %r " % repr(e))
if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
self.srConfig.WEB_PORT = 8081
if not self.srConfig.WEB_COOKIE_SECRET:
self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()
# attempt to help prevent users from breaking links by using a bad url
if not self.srConfig.ANON_REDIRECT.endswith('?'):
self.srConfig.ANON_REDIRECT = ''
if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
self.srConfig.ROOT_DIRS = ''
self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
self.srConfig.NZB_METHOD = 'blackhole'
if self.srConfig.TORRENT_METHOD not in ('blackhole',
'utorrent',
'transmission',
'deluge',
'deluged',
'download_station',
'rtorrent',
'qbittorrent',
'mlnet',
'putio'): self.srConfig.TORRENT_METHOD = 'blackhole'
if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'):
self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'
if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ
if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ
self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ
if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ
if self.srConfig.SHOWUPDATE_HOUR > 23:
self.srConfig.SHOWUPDATE_HOUR = 0
elif self.srConfig.SHOWUPDATE_HOUR < 0:
self.srConfig.SHOWUPDATE_HOUR = 0
if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ
self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ
if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
self.srConfig.SUBTITLES_LANGUAGES = []
# initialize metadata_providers
for cur_metadata_tuple in [(self.srConfig.METADATA_KODI, kodi),
(self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
(self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
(self.srConfig.METADATA_PS3, ps3),
(self.srConfig.METADATA_WDTV, wdtv),
(self.srConfig.METADATA_TIVO, tivo),
(self.srConfig.METADATA_MEDE8ER, mede8er)]:
(cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
tmp_provider = cur_metadata_class.metadata_class()
tmp_provider.set_config(cur_metadata_config)
self.metadataProviderDict[tmp_provider.name] = tmp_provider
# add version checker job
self.srScheduler.add_job(
self.VERSIONUPDATER.run,
srIntervalTrigger(
**{'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ}),
name="VERSIONUPDATER",
id="VERSIONUPDATER"
)
# add network timezones updater job
self.srScheduler.add_job(
update_network_dict,
srIntervalTrigger(**{'days': 1}),
name="TZUPDATER",
id="TZUPDATER"
)
# add namecache updater job
self.srScheduler.add_job(
self.NAMECACHE.run,
srIntervalTrigger(
**{'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ}),
name="NAMECACHE",
id="NAMECACHE"
)
# add show updater job
self.srScheduler.add_job(
self.SHOWUPDATER.run,
srIntervalTrigger(
**{'hours': 1,
'start_date': datetime.datetime.now().replace(hour=self.srConfig.SHOWUPDATE_HOUR)}),
name="SHOWUPDATER",
id="SHOWUPDATER"
)
# add daily search job
self.srScheduler.add_job(
self.DAILYSEARCHER.run,
srIntervalTrigger(
**{'minutes': self.srConfig.DAILY_SEARCHER_FREQ,
'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ,
'start_date': datetime.datetime.now() + datetime.timedelta(minutes=4)}),
name="DAILYSEARCHER",
id="DAILYSEARCHER"
)
# add backlog search job
self.srScheduler.add_job(
self.BACKLOGSEARCHER.run,
srIntervalTrigger(
**{'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ,
'start_date': datetime.datetime.now() + datetime.timedelta(minutes=30)}),
name="BACKLOG",
id="BACKLOG"
)
# add auto-postprocessing job
self.srScheduler.add_job(
self.AUTOPOSTPROCESSOR.run,
srIntervalTrigger(**{'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ}),
name="POSTPROCESSOR",
id="POSTPROCESSOR"
)
# add find proper job
self.srScheduler.add_job(
self.PROPERSEARCHER.run,
srIntervalTrigger(**{
'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
self.srConfig.PROPER_SEARCHER_INTERVAL]}),
name="PROPERSEARCHER",
id="PROPERSEARCHER"
)
# add trakt.tv checker job
self.srScheduler.add_job(
self.TRAKTSEARCHER.run,
srIntervalTrigger(**{'hours': 1}),
name="TRAKTSEARCHER",
id="TRAKTSEARCHER"
)
# add subtitles finder job
self.srScheduler.add_job(
self.SUBTITLESEARCHER.run,
srIntervalTrigger(**{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
name="SUBTITLESEARCHER",
id="SUBTITLESEARCHER"
)
# start scheduler service
self.srScheduler.start()
# Pause/Resume PROPERSEARCHER job
(self.srScheduler.get_job('PROPERSEARCHER').pause,
self.srScheduler.get_job('PROPERSEARCHER').resume
)[self.srConfig.DOWNLOAD_PROPERS]()
# Pause/Resume TRAKTSEARCHER job
(self.srScheduler.get_job('TRAKTSEARCHER').pause,
self.srScheduler.get_job('TRAKTSEARCHER').resume
)[self.srConfig.USE_TRAKT]()
# Pause/Resume SUBTITLESEARCHER job
(self.srScheduler.get_job('SUBTITLESEARCHER').pause,
self.srScheduler.get_job('SUBTITLESEARCHER').resume
)[self.srConfig.USE_SUBTITLES]()
# Pause/Resume POSTPROCESS job
(self.srScheduler.get_job('POSTPROCESSOR').pause,
self.srScheduler.get_job('POSTPROCESSOR').resume
)[self.srConfig.PROCESS_AUTOMATICALLY]()
# start queue's
self.SEARCHQUEUE.start()
self.SHOWQUEUE.start()
# start webserver
self.srWebServer.start()
# start ioloop event handler
self.io_loop.start()
0
Example 85
Project: dcos Source File: __init__.py
def activate(self, packages):
# Ensure the new set is reasonable.
validate_compatible(packages, self.__roles)
# Build the absolute paths for the running config, new config location,
# and where to archive the config.
active_names = self.get_active_names()
active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"]))
new_names = [name + ".new" for name in active_names]
new_dirs = [name + ".new" for name in active_dirs]
old_names = [name + ".old" for name in active_names]
# Remove all pre-existing new and old directories
for name in chain(new_names, old_names):
if os.path.exists(name):
if os.path.isdir(name):
shutil.rmtree(name)
else:
os.remove(name)
# Make the directories for the new config
for name in new_dirs:
os.makedirs(name)
def symlink_all(src, dest):
if not os.path.isdir(src):
return
symlink_tree(src, dest)
# Set the new LD_LIBRARY_PATH, PATH.
env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
active_buildinfo_full = {}
dcos_service_configuration = self._get_dcos_configuration_template()
# Building up the set of users
sysusers = UserManagement(self.__manage_users, self.__add_users)
def _get_service_files(_dir):
service_files = []
for root, directories, filenames in os.walk(_dir):
for filename in filter(lambda name: name.endswith(".service"), filenames):
service_files.append(os.path.join(root, filename))
return service_files
def _get_service_names(_dir):
service_files = list(map(os.path.basename, _get_service_files(_dir)))
if not service_files:
return []
return list(map(lambda name: os.path.splitext(name)[0], service_files))
# Add the folders, config in each package.
for package in packages:
# Package folders
# NOTE: Since active is at the end of the folder list it will be
# removed by the zip. This is the desired behavior, since it will be
# populated later.
# Do the basename since some well known dirs are full paths (dcos.target.wants)
# while inside the packages they are always top level directories.
for new, dir_name in zip(new_dirs, self.__well_known_dirs):
dir_name = os.path.basename(dir_name)
pkg_dir = os.path.join(package.path, dir_name)
assert os.path.isabs(new)
assert os.path.isabs(pkg_dir)
try:
symlink_all(pkg_dir, new)
# Symlink all applicable role-based config
for role in self.__roles:
role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role))
symlink_all(role_dir, new)
except ConflictingFile as ex:
raise ValidationError("Two packages are trying to install the same file {0} or "
"two roles in the set of roles {1} are causing a package "
"to try activating multiple versions of the same file. "
"One of the package files is {2}.".format(ex.dest,
self.__roles,
ex.src))
# Add to the active folder
os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name))
# Add to the environment and environment.export contents
env_contents += "# package: {0}\n".format(package.id)
env_export_contents += "# package: {0}\n".format(package.id)
for k, v in package.environment.items():
env_contents += "{0}={1}\n".format(k, v)
env_export_contents += "export {0}={1}\n".format(k, v)
env_contents += "\n"
env_export_contents += "\n"
# Add to the buildinfo
try:
active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json"))
except FileNotFoundError:
# TODO(cmaloney): These only come from setup-packages. Should update
# setup-packages to add a buildinfo.full for those packages
active_buildinfo_full[package.name] = None
# NOTE: It is critical the state dir, the package name and the user name are all the
# same. Otherwise on upgrades we might remove access to a files by changing their chown
# to something incompatible. We survive the first upgrade because everything goes from
# root to specific users, and root can access all user files.
if package.username is not None:
sysusers.add_user(package.username, package.group)
# Ensure the state directory in `/var/lib/dcos` exists
# TODO(cmaloney): On upgrade take a snapshot?
if self.__manage_state_dir:
state_dir_path = '/var/lib/dcos/{}'.format(package.name)
if package.state_directory:
check_call(['mkdir', '-p', state_dir_path])
if package.username:
uid = sysusers.get_uid(package.username)
check_call(['chown', '-R', str(uid), state_dir_path])
if package.sysctl:
service_names = _get_service_names(package.path)
if not service_names:
raise ValueError("service name required for sysctl could not be determined for {package}".format(
package=package.id))
for service in service_names:
if service in package.sysctl:
dcos_service_configuration["sysctl"][service] = package.sysctl[service]
dcos_service_configuration_file = os.path.join(self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE)
write_json(dcos_service_configuration_file, dcos_service_configuration)
# Write out the new environment file.
new_env = self._make_abs("environment.new")
write_string(new_env, env_contents)
# Write out the new environment.export file
new_env_export = self._make_abs("environment.export.new")
write_string(new_env_export, env_export_contents)
# Write out the buildinfo of every active package
new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
write_json(new_buildinfo_meta, active_buildinfo_full)
self.swap_active(".new")
0
Example 86
Project: pth-toolkit Source File: backend.py
def provision(self):
from samba.provision import ProvisioningError, setup_path
if self.ldap_backend_extra_port is not None:
serverport = "ServerPort=%d" % self.ldap_backend_extra_port
else:
serverport = ""
setup_file(setup_path("fedorads.inf"), self.fedoradsinf,
{"ROOT": self.root,
"HOSTNAME": self.hostname,
"DNSDOMAIN": self.names.dnsdomain,
"LDAPDIR": self.ldapdir,
"DOMAINDN": self.names.domaindn,
"LDAP_INSTANCE": self.ldap_instance,
"LDAPMANAGERDN": self.names.ldapmanagerdn,
"LDAPMANAGERPASS": self.ldapadminpass,
"SERVERPORT": serverport})
setup_file(setup_path("fedorads-partitions.ldif"),
self.partitions_ldif,
{"CONFIGDN": self.names.configdn,
"SCHEMADN": self.names.schemadn,
"SAMBADN": self.sambadn,
})
setup_file(setup_path("fedorads-sasl.ldif"), self.sasl_ldif,
{"SAMBADN": self.sambadn,
})
setup_file(setup_path("fedorads-dna.ldif"), self.dna_ldif,
{"DOMAINDN": self.names.domaindn,
"SAMBADN": self.sambadn,
"DOMAINSID": str(self.domainsid),
})
setup_file(setup_path("fedorads-pam.ldif"), self.pam_ldif)
lnkattr = self.schema.linked_attributes()
f = open(setup_path("fedorads-refint-delete.ldif"), 'r')
try:
refint_config = f.read()
finally:
f.close()
memberof_config = ""
index_config = ""
argnum = 3
for attr in lnkattr.keys():
if lnkattr[attr] is not None:
refint_config += read_and_sub_file(
setup_path("fedorads-refint-add.ldif"),
{ "ARG_NUMBER" : str(argnum),
"LINK_ATTR" : attr })
memberof_config += read_and_sub_file(
setup_path("fedorads-linked-attributes.ldif"),
{ "MEMBER_ATTR" : attr,
"MEMBEROF_ATTR" : lnkattr[attr] })
index_config += read_and_sub_file(
setup_path("fedorads-index.ldif"), { "ATTR" : attr })
argnum += 1
f = open(self.refint_ldif, 'w')
try:
f.write(refint_config)
finally:
f.close()
f = open(self.linked_attrs_ldif, 'w')
try:
f.write(memberof_config)
finally:
f.close()
attrs = ["lDAPDisplayName"]
res = self.schema.ldb.search(expression="(&(objectclass=attributeSchema)(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs)
for i in range (0, len(res)):
attr = res[i]["lDAPDisplayName"][0]
if attr == "objectGUID":
attr = "nsUniqueId"
index_config += read_and_sub_file(
setup_path("fedorads-index.ldif"), { "ATTR" : attr })
f = open(self.index_ldif, 'w')
try:
f.write(index_config)
finally:
f.close()
setup_file(setup_path("fedorads-samba.ldif"), self.samba_ldif, {
"SAMBADN": self.sambadn,
"LDAPADMINPASS": self.ldapadminpass
})
mapping = "schema-map-fedora-ds-1.0"
backend_schema = "99_ad.ldif"
# Build a schema file in Fedora DS format
f = open(setup_path(mapping), 'r')
try:
backend_schema_data = self.schema.convert_to_openldap("fedora-ds",
f.read())
finally:
f.close()
assert backend_schema_data is not None
f = open(os.path.join(self.ldapdir, backend_schema), 'w')
try:
f.write(backend_schema_data)
finally:
f.close()
self.credentials.set_bind_dn(self.names.ldapmanagerdn)
# Destory the target directory, or else setup-ds.pl will complain
fedora_ds_dir = os.path.join(self.ldapdir,
"slapd-" + self.ldap_instance)
shutil.rmtree(fedora_ds_dir, True)
self.slapd_provision_command = [self.slapd_path, "-D", fedora_ds_dir,
"-i", self.slapd_pid]
# In the 'provision' command line, stay in the foreground so we can
# easily kill it
self.slapd_provision_command.append("-d0")
#the command for the final run is the normal script
self.slapd_command = [os.path.join(self.ldapdir,
"slapd-" + self.ldap_instance, "start-slapd")]
# If we were just looking for crashes up to this point, it's a
# good time to exit before we realise we don't have Fedora DS on
if self.ldap_dryrun_mode:
sys.exit(0)
# Try to print helpful messages when the user has not specified the
# path to the setup-ds tool
if self.setup_ds_path is None:
raise ProvisioningError("Fedora DS LDAP-Backend must be setup with path to setup-ds, e.g. --setup-ds-path=\"/usr/sbin/setup-ds.pl\"!")
if not os.path.exists(self.setup_ds_path):
self.logger.warning("Path (%s) to slapd does not exist!",
self.setup_ds_path)
# Run the Fedora DS setup utility
retcode = subprocess.call([self.setup_ds_path, "--silent", "--file",
self.fedoradsinf], close_fds=True, shell=False)
if retcode != 0:
raise ProvisioningError("setup-ds failed")
# Load samba-admin
retcode = subprocess.call([
os.path.join(self.ldapdir, "slapd-" + self.ldap_instance, "ldif2db"), "-s", self.sambadn, "-i", self.samba_ldif],
close_fds=True, shell=False)
if retcode != 0:
raise ProvisioningError("ldif2db failed")
0
Example 87
Project: gitian-builder Source File: gitian_updater.py
def run():
full_prog = sys.argv[0]
prog = os.path.basename(full_prog)
parser = argparse.ArgumentParser(description='Download a verify a gitian package')
parser.add_argument('-u', '--url', metavar='URL', type=str, nargs='+', required=False,
help='one or more URLs where the package can be found')
parser.add_argument('-c', '--config', metavar='CONF', type=str, required=not have_injected_config,
help='a configuration file')
parser.add_argument('-d', '--dest', metavar='DEST', type=str, required=False,
help='the destination directory for unpacking')
parser.add_argument('-q', '--quiet', action='append_const', const=1, default=[], help='be quiet')
parser.add_argument('-f', '--force', action='store_true', help='force downgrades and such')
parser.add_argument('-n', '--dryrun', action='store_true', help='do not actually copy to destination')
parser.add_argument('-m', '--customize', metavar='OUTPUT', type=str, help='generate a customized version of the script with the given config')
parser.add_argument('-w', '--wait', type=float, metavar='HOURS', help='observe a waiting period or use zero for no waiting')
parser.add_argument('-g', '--gpg', metavar='GPG', type=str, help='path to GnuPG')
parser.add_argument('-p', '--post', metavar='COMMAND', type=str, help='Run after a successful install')
args = parser.parse_args()
quiet = len(args.quiet)
if args.config:
f = file(args.config, 'r')
if args.customize:
s = file(full_prog, 'r')
script = s.read()
s.close()
config = f.read()
script = script.replace(inject_config_string, config)
s = file(args.customize, 'w')
s.write(script)
s.close()
os.chmod(args.customize, 0750)
sys.exit(0)
config = yaml.safe_load(f)
f.close()
else:
config = yaml.safe_load(injected_config)
dest_path = args.dest
if not dest_path:
parser.error('argument -d/--dest is required unless -m is specified')
if args.wait is not None:
config['waiting_period'] = args.wait
gpg_path = args.gpg
if not gpg_path:
gpg_path = 'gpg'
rsses = []
if args.url:
urls = [{ 'url' : url, 'version_url' : None} for url in args.url]
else:
urls = config.get('urls')
if not urls:
parser.error('argument -u/--url is required since config does not specify it')
if config.has_key('rss'):
rsses = config['rss']
# TODO: rss, atom, etc.
old_manifest = None
if path.exists(dest_path):
files = os.listdir(dest_path)
if path.dirname(full_prog) == dest_path:
files.remove(prog)
if not files.count('.gitian-manifest') and len(files) > 0:
print>>sys.stderr, "destination already exists, no .gitian-manifest and directory not empty. Please empty destination."
sys.exit(1)
f = file(os.path.join(dest_path,'.gitian-manifest'), 'r')
old_manifest = yaml.load(f, OrderedDictYAMLLoader)
f.close()
if config.get('waiting_period', 0) > 0:
waiting_file = path.join(dest_path, '.gitian-waiting')
if path.exists(waiting_file):
f = file(waiting_file, 'r')
waiting = yaml.load(f)
f.close()
wait_start = waiting['time']
out_manifest = waiting['out_manifest']
waiting_path = waiting['waiting_path']
wait_time = wait_start + config['waiting_period'] * 3600 - time.time()
if wait_time > 0:
print>>sys.stderr, "Waiting another %.2f hours before applying update in %s"%(wait_time / 3600, waiting_path)
sys.exit(100)
os.remove(waiting_file)
if args.dryrun:
print>>sys.stderr, "Dry run, not copying"
else:
copy_to_destination(path.join(waiting_path, 'unpack'), dest_path, out_manifest, old_manifest)
if args.post:
os.system(args.post)
if quiet == 0:
print>>sys.stderr, "Copied from waiting area to destination"
shutil.rmtree(waiting_path)
sys.exit(0)
temp_dir = tempfile.mkdtemp('', prog)
atexit.register(remove_temp, temp_dir)
package_file = path.join(temp_dir, 'package')
downloaded = False
checked = False
if rsses:
import libxml2
for rss in rsses:
try:
feed = libxml2.parseDoc(urllib2.urlopen(rss['url']).read())
url = None
release = None
# Find the first matching node
for node in feed.xpathEval(rss['xpath']):
m = re.search(rss['pattern'], str(node))
if m:
if len(m.groups()) > 0:
release = m.group(1)
url = str(node)
break
# Make sure it's a new release
if old_manifest and release == old_manifest['release'] and not args.force:
checked = True
else:
try:
download(url, package_file)
downloaded = True
break
except:
print>>sys.stderr, "could not download from %s, trying next rss"%(url)
pass
except:
print>>sys.stderr, "could read not from rss %s"%(rss)
pass
if not downloaded:
for url in urls:
try:
release = None
if url['version_url']:
f = urllib2.urlopen(url['version_url'])
release = f.read(100).strip()
f.close()
if old_manifest and release == old_manifest['release'] and not args.force:
checked = True
else:
download(url['url'], package_file)
downloaded = True
except:
print>>sys.stderr, "could not download from %s, trying next url"%(url)
raise
if not downloaded:
if checked:
if quiet == 0:
print>>sys.stderr, "same release, not downloading"
else:
print>>sys.stderr, "out of places to try downloading from, try later"
sys.exit(2)
unpack_dir = path.join(temp_dir, 'unpack')
files = extract(unpack_dir, package_file)
import_keys(gpg_path, temp_dir, config)
(success, assertions, out_manifest) = get_assertions(gpg_path, temp_dir, unpack_dir, files)
if old_manifest:
check_name_and_version(out_manifest, old_manifest)
if not success and quiet <= 1:
print>>sys.stderr, "There were errors getting assertions"
total_weight = check_assertions(config, assertions)
if total_weight is None:
print>>sys.stderr, "There were errors checking assertions, build is untrusted, aborting"
sys.exit(5)
if quiet == 0:
print>>sys.stderr, "Successful with signature weight %d"%(total_weight)
if config.get('waiting_period', 0) > 0 and path.exists(dest_path):
waiting_path = tempfile.mkdtemp('', prog)
shutil.copytree(unpack_dir, path.join(waiting_path, 'unpack'))
f = file(path.join(dest_path, '.gitian-waiting'), 'w')
yaml.dump({'time': time.time(), 'out_manifest': out_manifest, 'waiting_path': waiting_path}, f)
f.close()
if quiet == 0:
print>>sys.stderr, "Started waiting period"
else:
if args.dryrun:
print>>sys.stderr, "Dry run, not copying"
else:
copy_to_destination(unpack_dir, dest_path, out_manifest, old_manifest)
if args.post:
os.system(args.post)
0
Example 88
Project: deep_recommend_system Source File: local_cli_wrapper.py
def on_run_end(self, request):
"""Overrides on-run-end callback.
Actions taken:
1) Load the debug dump.
2) Bring up the Analyzer CLI.
Args:
request: An instance of OnSessionInitRequest.
Returns:
An instance of OnSessionInitResponse.
"""
if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
partition_graphs = None
if request.run_metadata and request.run_metadata.partition_graphs:
partition_graphs = request.run_metadata.partition_graphs
elif request.client_graph_def:
partition_graphs = [request.client_graph_def]
debug_dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=partition_graphs)
if request.tf_error:
op_name = request.tf_error.op.name
# Prepare help introduction for the TensorFlow error that occurred
# during the run.
help_intro = [
"--------------------------------------",
"!!! An error occurred during the run !!!",
"",
" * Use command \"ni %s\" to see the information about the "
"failing op." % op_name,
" * Use command \"li -r %s\" to see the inputs to the "
"failing op." % op_name,
" * Use command \"lt\" to view the dumped tensors.",
"",
"Op name: " + op_name,
"Error type: " + str(type(request.tf_error)),
"",
"Details:",
str(request.tf_error),
"",
"WARNING: Using client GraphDef due to the error, instead of "
"executor GraphDefs.",
"--------------------------------------",
"",
]
init_command = "help"
title_color = "red"
else:
help_intro = None
init_command = "lt"
title_color = "green"
if self._run_till_filter_pass:
if not debug_dump.find(
self._tensor_filters[self._run_till_filter_pass], first_n=1):
# No dumped tensor passes the filter in this run. Clean up the dump
# directory and move on.
shutil.rmtree(self._dump_root)
return framework.OnRunEndResponse()
else:
# Some dumped tensor(s) from this run passed the filter.
init_command = "lt -f %s" % self._run_till_filter_pass
title_color = "red"
self._run_till_filter_pass = None
analyzer = analyzer_cli.DebugAnalyzer(debug_dump)
# Supply all the available tensor filters.
for filter_name in self._tensor_filters:
analyzer.add_tensor_filter(filter_name,
self._tensor_filters[filter_name])
run_end_cli = curses_ui.CursesUI()
run_end_cli.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
run_end_cli.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
run_end_cli.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
run_end_cli.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
run_end_cli.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
run_end_cli.register_command_handler(
"run",
self._run_end_run_command_handler,
"Helper command for incorrectly entered run command at the run-end "
"prompt.",
prefix_aliases=["r"]
)
# Get names of all dumped tensors.
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" %
(datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
run_end_cli.register_tab_comp_context(["print_tensor", "pt"],
dumped_tensor_names)
# Tab completion for commands "node_info", "list_inputs" and
# "list_outputs". The list comprehension is used below because nodes()
# output can be unicodes and they need to be converted to strs.
run_end_cli.register_tab_comp_context(
["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
[str(node_name) for node_name in debug_dump.nodes()])
# TODO(cais): Reduce API surface area for aliases vis-a-vis tab
# completion contexts and registered command handlers.
title = "run-end: " + self._run_description
run_end_cli.set_help_intro(help_intro)
run_end_cli.run_ui(
init_command=init_command, title=title, title_color=title_color)
# Clean up the dump directory.
shutil.rmtree(self._dump_root)
else:
print("No debug information to show following a non-debug run() call.")
# Return placeholder response that currently holds no additional
# information.
return framework.OnRunEndResponse()
0
Example 89
Project: ges Source File: ges.py
def assisted_start(options):
_help = r'''
ges.py - Git Enablement Server v1.1
Note only the folder that contains folders and object that you normally see
in .git folder is considered a "repo folder." This means that either a
"bare" folder name or a working folder's ".git" folder will be a "repo" folder
discussed in the examples below.
This server automatically creates "bare" repo folders on push.
Note, the folder does NOT have to have ".git" in the name to be a "repo" folder.
You can name bare repo folders whatever you like. If the signature (right files
and folders are found inside) matches a typical git repo, it's a "repo."
Options:
--content_path (Defaults to random temp folder)
Serving contents of folder path passed in. Accepts relative paths,
including things like "./../" and resolves them agains current path.
(If you set this to actual .git folder, you don't need to specify the
folder's name on URI as the git repo will be served at the root level
of the URI.)
If not specified, a random, temp folder is created in the OS-specific
temporary storage path. This folder will be NOT be deleted after
server exits unless the switch "--remove_temp" is used.
--remove_temp (Defaults to False)
When --content_path is not specified, this server will create a folder
in a temporary file storage location that is OS-specific and will NOT
remove it after the server shuts down.
This switch, if included on command line, enables automatic removal of
the created folder and all of its contents.
--uri_marker (Defaults to '')
Acts as a "virtual folder" - separator between decorative URI portion
and the actual (relative to path_prefix) path that will be appended
to path_prefix and used for pulling an actual file.
the URI does not have to start with contents of repo_uri_marker. It can
be preceeded by any number of "virtual" folders.
For --repo_uri_marker 'my' all of these will take you to the same repo:
http://localhost/my/HEAD
http://localhost/admysf/mylar/zxmy/my/HEAD
If you are using reverse proxy server, pick the virtual, decorative URI
prefix / path of your choice. This hanlder will cut and rebase the URI.
Default of '' means that no cutting marker is used, and whole URI after
FQDN is used to find file relative to path_prefix.
--port (Defaults to 8888)
--demo (Defaults to False)
You do not have to provide any arguments for this option. It's a switch.
If "--demo" is part of the command-line options, a sample tree of folders
with some repos will be extracted into the folder specified as content_path.
If --content_path was not specified (we use temp folder) and "--demo"
switch is present, we assume --remove_temp is on.
Examples:
ges.py
(no arguments)
A random temp folder is created on the file system and now behaves as the
root of the served git repos folder tree.
ges.py --demo
This server is shipped with a small demo tree of Git repositories. This
command deploys that tree into a temp folder and deletes that temp folder
after the server is shut down.
ges.py --content_path "~/somepath/repofolder" --uri_marker "myrepo"
Will serve chosen repo folder as http://localhost/myrepo/ or
http://localhost:8888/does/not/matter/what/you/type/here/myrepo/
This "repo uri marker" is useful for making a repo server appear as part of
a server applications structure while serving from behind a reverse proxy.
cd c:\myproject_workingfolder\.git
ges.py --port 80 --content_path '.'
This project's repo will be one and only served directly over
http://localhost/
'''
# options = dict([
# ['content_path',None],
# ['static_content_path', None],
# ['uri_marker',''],
# ['port', None],
# ['devel', False],
# ['demo',False],
# ['remove_temp',False]
# ])
# let's decide what port to serve on.
port = options['port']
if not port:
import socket
# let's see if we can reuse our preferred default of 8888
s = socket.socket()
try:
s.bind(('',8888))
ip, port = s.getsockname()
except:
pass
s.close()
del s
if not port:
# looks like our default of 8888 is already occupied.
# taking next available port.
s = socket.socket()
s.bind(('',0))
ip, port = s.getsockname()
s.close()
del s
options['port'] = port
# next we determine if the static server contents folder is visible to us.
if not options['static_content_path'] or not os.path.isfile(
os.path.join(
options['static_content_path'],
'static',
'favicon.ico'
)):
if sys.path[0] and os.path.isfile(os.path.join(sys.path[0],'static','favicon.ico')):
options['static_content_path'] = os.path.join(sys.path[0],'static')
else:
raise Exception('G.E.S.: Specified static content directory - "%s" - does not contain expected files. Please, provide correct "static_content_path" variable value.' % options['static_content_path'])
# now we pick a random temp folder for Git folders tree if none were specified.
if options['content_path']:
CONTENT_PATH_IS_TEMP = False
else:
import tempfile
import shutil
CONTENT_PATH_IS_TEMP = True
options['content_path'] = tempfile.mkdtemp()
if options['demo']:
import zipfile
demo_repos_zip = os.path.join(sys.path[0],'test','sample_tree_of_repos_v2.zip')
try:
zipfile.ZipFile(demo_repos_zip).extractall(options['content_path'])
except:
pass
if 'help' in options:
print _help
else:
app = assemble_ges_app(**options)
import wsgiserver
httpd = wsgiserver.CherryPyWSGIServer(('0.0.0.0',int(options['port'])),app)
if options['uri_marker']:
_s = '"/%s/".' % options['uri_marker']
example_URI = '''http://localhost:%s/whatever/you/want/here/%s/myrepo.git
(Note: "whatever/you/want/here" cannot include the "/%s/" segment)''' % (
options['port'],
options['uri_marker'],
options['uri_marker'])
else:
_s = 'not chosen.'
example_URI = 'http://localhost:%s/' % (options['port'])
print '''
===========================================================================
Run this command with "--help" option to see available command-line options
Chosen repo folders' base file system path:
%s
Starting GES server on port %s
URI segment indicating start of git repo foler name is %s
Application URI:
%s
Use Keyboard Interrupt key combination (usually CTRL+C) to stop the server
===========================================================================
''' % (os.path.abspath(options['content_path']),
options['port'],
_s,
example_URI)
# running with CherryPy's WSGI Server
try:
httpd.start()
except KeyboardInterrupt:
pass
finally:
httpd.stop()
if (CONTENT_PATH_IS_TEMP and options['remove_temp']) or (CONTENT_PATH_IS_TEMP and options['demo']):
shutil.rmtree(options['content_path'], True)
0
Example 90
Project: cgat Source File: psl2wiggle.py
def main(argv=sys.argv):
parser = E.OptionParser(
version="%prog version: $Id: psl2wiggle.py 2834 2009-11-24 16:11:23Z andreas $", usage=globals()["__doc__"])
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome [default=%default].")
parser.add_option("-b", "--output-filename-pattern", dest="output_filename", type="string",
help="filename for output [default=%default]")
parser.add_option("-o", "--output-format", dest="output_format", type="choice",
choices=("bedgraph", "wiggle", "bigbed", "bigwig"),
help="output format [default=%default]")
parser.set_defaults(genome_file=None,
typecode=numpy.int16,
output_filename=None,
output_format="wiggle",
test=None)
(options, args) = E.Start(parser, add_pipe_options=True)
typecode = options.typecode
if options.genome_file:
fasta = IndexedFasta.IndexedFasta(options.genome_file)
counts = {}
contig_sizes = fasta.getContigSizes(with_synonyms=False)
E.info("allocating memory for %i contigs and %i bytes" %
(len(contig_sizes), sum(contig_sizes.values()) * typecode().itemsize))
for contig, size in list(contig_sizes.items()):
E.debug("allocating %s: %i bases" % (contig, size))
counts[contig] = numpy.zeros(size, typecode)
E.info("allocated memory for %i contigs" % len(fasta))
else:
fasta = None
contig_sizes = {}
if options.output_format in ("bigwig", "bigbed"):
if not options.genome_file:
raise ValueError(
"please supply genome file for bigwig/bigbed computation.")
if not options.output_filename:
raise ValueError(
"please output file for bigwig/bigbed computation.")
if options.output_format == "bigwig":
executable_name = "wigToBigWig"
elif options.output_format == "bigbed":
executable_name = "bedToBigBed"
else:
raise ValueError("unknown output format `%s`" %
options.output_format)
executable = IOTools.which(executable_name)
if not executable:
raise OSError("could not find %s in path." % executable_name)
tmpdir = tempfile.mkdtemp()
E.debug("temporary files are in %s" % tmpdir)
tmpfile_wig = os.path.join(tmpdir, "wig")
tmpfile_sizes = os.path.join(tmpdir, "sizes")
# write contig sizes
outfile_size = IOTools.openFile(tmpfile_sizes, "w")
for contig, size in list(contig_sizes.items()):
outfile_size.write("%s\t%s\n" % (contig, size))
outfile_size.close()
outfile = IOTools.openFile(tmpfile_wig, "w")
else:
outfile = options.stdout
iterator = Blat.BlatIterator(sys.stdin)
ninput, ncontigs, nskipped = 0, 0, 0
E.info("started counting")
while 1:
if options.test and ninput >= options.test:
break
match = next(iterator)
if match is None:
break
ninput += 1
contig = match.mSbjctId
for start, length in zip(match.mSbjctBlockStarts, match.mBlockSizes):
counts[contig][start:start + length] += 1
E.info("finished counting")
if options.output_format in ("wig", "bigwig"):
E.info("starting wig output")
for contig, vals in list(counts.items()):
E.debug("output for %s" % contig)
for val, iter in itertools.groupby(enumerate(vals), lambda x: x[1]):
l = list(iter)
start, end = l[0][0], l[-1][0]
val = vals[start]
if val > 0:
outfile.write("variableStep chrom=%s span=%i\n" %
(contig, end - start + 1))
outfile.write("%i\t%i\n" % (start, val))
ncontigs += 1
elif options.output_format in ("bedgraph", "bigbed"):
E.info("starting bedgraph output")
for contig, vals in list(counts.items()):
E.debug("output for %s" % contig)
for val, iter in itertools.groupby(enumerate(vals), lambda x: x[1]):
l = list(iter)
start, end = l[0][0], l[-1][0]
val = vals[start]
if val > 0:
outfile.write("%s\t%i\t%i\t%i\n" %
(contig, start, end + 1, val))
ncontigs += 1
E.info("finished output")
if options.output_format in ("bigwig", "bigbed"):
outfile.close()
E.info("starting bigwig conversion")
try:
retcode = subprocess.call(" ".join((executable,
tmpfile_wig,
tmpfile_sizes,
os.path.abspath(options.output_filename)), ),
shell=True)
if retcode < 0:
warn("wigToBigWig terminated with signal: %i" % -retcode)
return -retcode
except OSError as msg:
warn("Error while executing bigwig: %s" % e)
return 1
shutil.rmtree(tmpdir)
E.info("finished bigwig conversion")
E.info("ninput=%i, ncontigs=%i, nskipped=%i\n" %
(ninput, ncontigs, nskipped))
E.Stop()
0
Example 91
Project: akvo-rsr Source File: test_iati_export.py
def test_complete_project_export(self):
"""
Test the export of a fully filled project.
"""
# Create project
project = Project.objects.create(
title="Test project for IATI export",
subtitle="Test project for IATI export (subtitle)",
iati_activity_id="NL-KVK-1234567890-1234",
language="en",
hierarchy=1,
humanitarian=True,
default_tied_status="1",
default_flow_type="1",
default_finance_type="1",
default_aid_type="1",
collaboration_type="1",
capital_spend_percentage=100,
iati_status="1",
project_scope="1",
project_plan_summary="Project summary",
goals_overview="Goals overview",
current_status="Current status",
project_plan="Project plan",
sustainability="Sustainability",
background="Background",
target_group="Target group",
date_start_planned=datetime.date.today(),
date_start_actual=datetime.date.today(),
date_end_planned=datetime.date.today() + datetime.timedelta(days=1),
date_end_actual=datetime.date.today() + datetime.timedelta(days=1),
country_budget_vocabulary="1",
current_image=SimpleUploadedFile(
name='test_image.jpg',
content=open(self.image_path, 'rb').read(),
content_type='image/jpeg'
),
current_image_caption="Caption",
current_image_credit="Credit",
)
# Create another project
related_project = Project.objects.create(
title="Test related project for IATI export",
iati_activity_id="NL-KVK-1234567890-12345",
)
# Create partnership
Partnership.objects.create(
organisation=self.reporting_org,
project=project,
iati_organisation_role=Partnership.IATI_REPORTING_ORGANISATION,
internal_id="123"
)
# Create another partnership
Partnership.objects.create(
organisation=self.reporting_org,
project=project,
iati_organisation_role=Partnership.IATI_ACCOUNTABLE_PARTNER,
iati_activity_id="NL-KVK-Test"
)
# Add project condition
ProjectCondition.objects.create(
project=project,
type="1",
text="Test condition"
)
# Add legacy data
LegacyData.objects.create(
project=project,
name="Name",
value="Value",
iati_equivalent="IATI equivalent"
)
# Add recipient country
RecipientCountry.objects.create(
project=project,
country="NL",
percentage=100,
text="The Netherlands"
)
# Add related projects
RelatedProject.objects.create(
project=project,
related_project=related_project,
relation='1'
)
RelatedProject.objects.create(
project=project,
related_iati_id="NL-KVK-related",
relation='1'
)
RelatedProject.objects.create(
project=related_project,
related_project=project,
relation='1'
)
# Add sector
Sector.objects.create(
project=project,
sector_code="140",
vocabulary="1",
vocabulary_uri="http://akvo.org",
percentage=100,
text="WASH",
)
# Add recipient region
RecipientRegion.objects.create(
project=project,
region="100",
percentage=100,
region_vocabulary="1",
region_vocabulary_uri="http://akvo.org",
text="Some region",
)
# Add policy marker
PolicyMarker.objects.create(
project=project,
policy_marker="1",
significance="1",
vocabulary="1",
vocabulary_uri="http://akvo.org",
description="Description",
)
# Add humanitarian scope
HumanitarianScope.objects.create(
project=project,
code="1",
type="1",
vocabulary="1",
vocabulary_uri="http://akvo.org",
text="Humanitarian scope",
)
# Add country budget item
CountryBudgetItem.objects.create(
project=project,
code="1",
description="Description",
)
# Add FSS and forecast
fss = Fss.objects.create(
project=project,
extraction_date=datetime.date.today(),
priority=True,
phaseout_year=2016,
)
FssForecast.objects.create(
fss=fss,
value=1,
year=2016,
value_date=datetime.date.today(),
currency="EUR",
)
# Add budget item and label
label = BudgetItemLabel.objects.create(
label="Test"
)
BudgetItem.objects.create(
project=project,
type="1",
status="1",
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
amount=1,
value_date=datetime.date.today(),
currency="EUR",
label=label,
)
# Add project contact
ProjectContact.objects.create(
project=project,
type="1",
organisation="Org",
department="Dep",
person_name="Person",
job_title="Job title",
telephone="06123",
email="[email protected]",
website="http://akvo.org",
mailing_address="Mailing address",
)
# Add planned disbursement
PlannedDisbursement.objects.create(
project=project,
type="1",
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
value=1,
value_date=datetime.date.today(),
currency="EUR",
provider_organisation_activity="NL-KVK-prov",
receiver_organisation_activity="NL-KVK-rec",
provider_organisation=self.reporting_org,
receiver_organisation=self.reporting_org,
)
# Add link
Link.objects.create(
project=project,
url="http://rsr.akvo.org/",
caption="RSR",
)
# Add project docuements
doc = ProjectDocuement.objects.create(
project=project,
url="http://rsr.akvo.org/",
format="application/http",
title="RSR",
title_language="en",
language="en",
docuement_date=datetime.date.today(),
)
ProjectDocuementCategory.objects.create(
docuement=doc,
category="A1"
)
ProjectDocuement.objects.create(
project=project,
docuement=SimpleUploadedFile(
name='test_image.jpg',
content=open(self.image_path, 'rb').read(),
content_type='image/jpeg'
),
)
# Add project update
ProjectUpdate.objects.create(
project=project,
user=self.user,
title="Update title",
language="en"
)
# Add project location
loc = ProjectLocation.objects.create(
location_target=project,
reference="ref",
location_reach="1",
location_code="1",
vocabulary="1",
name="Name",
description="Description",
activity_description="Activity description",
latitude=10,
longitude=10,
exactness="1",
location_class="1",
feature_designation="1"
)
AdministrativeLocation.objects.create(
location=loc,
vocabulary="1",
code="1",
level=1,
)
# Add CRS++
crs = CrsAdd.objects.create(
project=project,
loan_terms_rate1=1,
loan_terms_rate2=2,
repayment_type="1",
repayment_plan="1",
commitment_date=datetime.date.today(),
repayment_first_date=datetime.date.today(),
repayment_final_date=datetime.date.today(),
loan_status_year=2016,
loan_status_currency="EUR",
loan_status_value_date=datetime.date.today(),
interest_received=1,
principal_outstanding=1,
principal_arrears=1,
interest_arrears=1,
channel_code="1",
)
CrsAddOtherFlag.objects.create(
crs=crs,
code="1",
significance=True,
)
# Add transaction
transaction = Transaction.objects.create(
project=project,
reference="ref",
humanitarian=True,
transaction_type="1",
transaction_date=datetime.date.today(),
value=1,
currency="EUR",
value_date=datetime.date.today(),
description="Description",
disbursement_channel="1",
provider_organisation_activity="NL-KVK-prov",
receiver_organisation_activity="NL-KVK-rec",
provider_organisation=self.reporting_org,
receiver_organisation=self.reporting_org,
recipient_country="NL",
recipient_region="110",
recipient_region_vocabulary="1",
recipient_region_vocabulary_uri="http://akvo.org",
flow_type="1",
finance_type="1",
aid_type="1",
tied_status="1",
)
TransactionSector.objects.create(
transaction=transaction,
code="140",
vocabulary="1",
vocabulary_uri="http://akvo.org",
text="WASH",
)
# Add results framework
result = Result.objects.create(
project=project,
type="1",
aggregation_status=True,
title="Title",
description="Description",
)
indicator = Indicator.objects.create(
result=result,
measure="1",
ascending=True,
title="Title",
description="Description",
baseline_year=2016,
baseline_value="1",
baseline_comment="Comment"
)
IndicatorReference.objects.create(
indicator=indicator,
vocabulary="1",
reference="ref",
vocabulary_uri="http://akvo.org/",
)
period = IndicatorPeriod.objects.create(
indicator=indicator,
period_start=datetime.date.today(),
period_end=datetime.date.today() + datetime.timedelta(days=1),
target_value="1",
target_comment="Comment",
actual_value="1",
actual_comment="Comment",
)
IndicatorPeriodTargetLocation.objects.create(
period=period,
location="loc",
)
IndicatorPeriodActualLocation.objects.create(
period=period,
location="loc",
)
IndicatorPeriodTargetDimension.objects.create(
period=period,
name="Name",
value="Value",
)
IndicatorPeriodActualDimension.objects.create(
period=period,
name="Name",
value="Value",
)
# Create IATI export
iati_export = IatiExport.objects.create(
reporting_organisation=self.reporting_org,
user=self.user
)
# Add a project to the IATI export
iati_export.projects.add(project)
# Remove folder
media_root = '/var/akvo/rsr/mediaroot/'
directory = 'db/org/%s/iati/' % str(self.reporting_org.pk)
if os.path.exists(media_root + directory):
shutil.rmtree(media_root + directory)
# Run IATI export
iati_export.create_iati_file()
# In order to easily access the XML file, generate the IATI file again
tmp_iati_xml = IatiXML(iati_export.projects.all(), iati_export.version, iati_export)
iati_xml = etree.tostring(tmp_iati_xml.iati_activities)
# Perform checks on IATI export
self.assertEqual(iati_export.status, 3)
self.assertNotEqual(iati_export.iati_file, '')
# Perform checks on IATI XML file
root_test = self.assertXmlDocuement(iati_xml)
self.assertXmlNode(root_test, tag='iati-activities')
self.assertXmlHasAttribute(root_test, 'generated-datetime')
self.assertXmlHasAttribute(root_test, 'version')
self.assertXpathsExist(root_test, ('./iati-activity',
'./iati-activity/iati-identifier',
'./iati-activity/reporting-org',
'./iati-activity/title'))
0
Example 92
Project: pygifme Source File: pygifme.py
def main():
from os import system, mkdir, listdir, environ, path, chdir, getcwd, \
EX_USAGE, EX_DATAERR, EX_CONFIG, EX_CANTCREAT
from time import gmtime, strftime
from sys import exit, path as spath
from argparse import ArgumentParser, ONE_OR_MORE, ArgumentDefaultsHelpFormatter
from argcomplete import autocomplete
# Avoid problems with /usr/local/bin first in sys.path
if path.dirname(__file__) == spath[0]:
spath.append(spath[0])
del spath[0]
try:
from pygifme import __version__
except ImportError as e:
#from . import __version__
__version__ = '0.1' # @Fixme: Bad package import from /usr/local/bin/pygifme.py
description = ' FILES can be listed out, like `file1.jpg file2.jpg`, or it\n' \
' can be a normal shell glob, like `*.jpg`.'
parser = ArgumentParser(
prog = 'pygifme',
description = description,
epilog = None,
parents = [],
formatter_class = ArgumentDefaultsHelpFormatter,
prefix_chars = '-',
fromfile_prefix_chars = None,
argument_default = None,
conflict_handler = 'error',
add_help = True,
)
def valid_directory(directory):
if path.exists(directory):
if path.isdir(directory):
return directory
else:
parser.error('Path {0} is not a directory'.format(directory))
return directory
else:
parser.error('Directory path {0} does not exist'.format(directory))
parser.add_argument('-r', '--reverse',
action = 'store_true',
dest = 'reverse',
default = False,
help = 'Reverse the GIF to make it loopable')
parser.add_argument('-o', '--output',
action = 'store',
metavar = '/path/to/output',
type = lambda d:valid_directory(d),
dest = 'output',
choices = None,
help = 'Set the animation\'s output directory')
parser.add_argument('-d', '--delay',
action = 'store',
metavar = 'DELAY',
dest = 'delay',
default = 20,
type = int,
choices = None,
help = 'Set the delay between frames')
parser.add_argument('-w', '--width',
action = 'store',
metavar = 'PIXELS',
dest = 'width',
default = 500,
type = int,
choices = None,
help = 'Set the width of the image')
parser.add_argument('-q', '--quiet',
action = 'store_true',
dest = 'quiet',
default = False,
help = 'Don\'t print status messages to stdout')
parser.add_argument(option_strings = ['FILES'],
metavar = 'FILES',
nargs = ONE_OR_MORE,
type=str,
dest = 'FILES',
help = 'One or more files to process')
parser.add_argument('-v', '--version',
action ='version',
version ='%(prog)s {version}'.format(version = __version__),
help = 'Shows the program version')
autocomplete(parser)
args = parser.parse_args()
#print(vars(args)) # For debugging
if system("which convert 2>&1 > /dev/null") != 0:
parser.error('You need to install ImageMagick first.\n\n' \
'If you\'re on GNU/LINUX Debian systems, this should be as easy as:\n'\
' sudo apt-get install imagemagick\n' \
'If you\'re on a Mac, this should be as easy as:\n' \
' brew install imagemagick')
exit(EX_CONFIG)
if not args.FILES: # no files given
parser.error('no FILES given to process')
exit(EX_USAGE)
else:
for pfile in args.FILES:
if pfile[0:4] != 'http': # skip remote files
if path.exists(pfile):
if not path.isfile(pfile):
parser.error('{0} is not a valid file'.format(pfile))
else:
import glob
result_glob = glob.glob(pfile)
if not result_glob:
parser.error('File {0} does not exist'.format(pfile))
else:
for gfile in result_glob:
if path.exists(gfile):
if not path.isfile(gfile):
parser.error('{0} is not a valid file'.format(gfile))
else:
parser.error('File {0} does not exist'.format(gfile))
# WORKING WITH REMOTE FILES
if args.FILES[0][0:4] == 'http':
from urllib import request
from shutil import rmtree
if path.exists('/tmp/pygifme'):
rmtree('/tmp/pygifme')
mkdir('/tmp/pygifme', 0o777)
local_path = '/tmp/downloaded-pygifme.gif'
with open(local_path, mode='wb') as wfile:
remote_file = request.urlopen(args.FILES[0]).read()
wfile.write(remote_file)
wd = getcwd()
chdir('/tmp/pygifme')
status = system('convert {0} -coalesce frame_%03d.gif'.format(local_path))
chdir(wd)
if status != 0:
parser.error('Could not process remote file {0}'.format(local_path))
exit(EX_DATAERR)
del args.FILES[0]
args.FILES += [path.join('/tmp/pygifme/', f) for f in listdir('/tmp/pygifme/')]
if args.reverse:
args.FILES += args.FILES[1:-2]
if not args.output:
import subprocess
desktop = str(subprocess.check_output(['xdg-user-dir', 'DESKTOP'], universal_newlines = True)).strip()
if not desktop:
home = environ.get('HOME')
if not home:
home = '/tmp/pygifme'
mkdir(home, 0o777)
else:
home = '{0}/Desktop'.format(home)
else:
home = desktop
args.output = home
args.output = str(args.output) + '/animated-{0}.gif'.format(strftime('%F_%Hh-%Mm-%Ss', gmtime()))
cmd = 'convert -delay {0} -loop 0 -resize {1} -layers OptimizeTransparency {2} {3}'.format(int(args.delay), int(args.width), ' '.join(args.FILES), str(args.output))
if system(cmd) == 0:
if not args.quiet:
print('You now have a handsome animation at {0}'.format(args.output))
else:
parser.error('Something broke when we were animating your gif. Shit.')
exit(EX_CANTCREAT)
if system('which cloudapp 2>&1 > /dev/null') == 0 and not environ.get('DISABLE_CLOUPAPP'):
if not args.quiet:
print('Now we\'re uploading it to CloudApp')
system('cloudapp {0}'.format(args.output))
0
Example 93
Project: HealthStarter Source File: install.py
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.download_dir:
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
0
Example 94
Project: wharf Source File: forms.py
@app.route('/forms', methods=['POST'])
def forms():
try:
filename = request.json['filename']
url = request.json['url']
services = request.json['services']
i = 0
j = 0
if filename:
file_ext1 = filename.rsplit('.', 1)[1]
file_ext2 = filename.rsplit('.', 2)[1]
if file_ext1 == "zip":
j = move_services(filename, j, 1)
elif file_ext1 == "gz":
j = move_services(filename, j, 2)
missing_files = request.json['missing_files']
if j == 0:
j = ""
service_path = app.config['SERVICES_FOLDER']+file_ext1+str(j)
service_path2 = app.config['SERVICES_FOLDER']+file_ext2+str(j)
if "description" in missing_files:
description = ""
try:
description = request.json['description']
except:
pass
if file_ext1 == "zip":
with open(service_path+"/"+app.config['SERVICE_DICT']['description'], 'w') as f:
f.write(description)
elif file_ext1 == "gz":
with open(service_path2+"/"+app.config['SERVICE_DICT']['description'], 'w') as f:
f.write(description)
if "client" in missing_files:
client = ""
clientLanguage = ""
clientFilename = "dummy.txt"
try:
client = request.json['client']
clientLanguage = request.json['clientLanguage']
clientFilename = request.json['clientFilename']
except:
pass
if file_ext1 == "zip":
if not path.exists(service_path+"/client"):
mkdir(service_path+"/client")
with open(service_path+"/"+app.config['SERVICE_DICT']['client'], 'w') as f:
f.write(clientLanguage+"\n")
f.write(clientFilename)
with open(service_path+"/client/"+clientFilename, 'w') as f:
f.write(client)
elif file_ext1 == "gz":
if not path.exists(service_path2+"/client"):
mkdir(service_path2+"/client")
with open(service_path2+"/"+app.config['SERVICE_DICT']['client'], 'w') as f:
f.write(clientLanguage+"\n")
f.write(clientFilename)
with open(service_path2+"/client/"+clientFilename, 'w') as f:
f.write(client)
if "about" in missing_files:
missing_metadata(j, filename, "about")
if "body" in missing_files:
missing_metadata(j, filename, "body")
if "link" in missing_files:
link = "#"
linkName = "None"
try:
link = request.json['link']
linkName = request.json['linkName']
except:
pass
if file_ext1 == "zip":
if not path.exists(service_path+"/html"):
mkdir(service_path+"/html")
with open(service_path+"/"+app.config['SERVICE_DICT']['link'], 'w') as f:
f.write(link+" "+linkName)
elif file_ext1 == "gz":
if not path.exists(service_path2+"/html"):
mkdir(service_path2+"/html")
with open(service_path2+"/"+app.config['SERVICE_DICT']['link'], 'w') as f:
f.write(link+" "+linkName)
elif url:
j_array = []
if not "." in url or not "git" in url:
# docker index
j = 0
j_array.append(j)
elif url.rsplit('.', 1)[1] == "git":
# move to services folder
i = 0
# keeps track of the number of the service (if there is more than one)
j = 0
try:
services = services.replace(''', "'")
services = [ item.encode('ascii') for item in literal_eval(services) ]
except:
pass
service_path = path.join(app.config['UPLOAD_FOLDER'], (url.rsplit('/', 1)[1]).rsplit('.', 1)[0])
if not services:
return render_template("failed.html")
elif len(services) == 1:
while i != -1:
try:
if i == 0:
mv(service_path, app.config['SERVICES_FOLDER'])
elif i == 1:
mv(service_path, service_path+str(i))
mv(service_path+str(i), app.config['SERVICES_FOLDER'])
else:
mv(service_path+str(i-1), service_path+str(i))
mv(service_path+str(i), app.config['SERVICES_FOLDER'])
j = i
i = -1
except:
i += 1
try:
# remove leftover files in tmp
rmdir(service_path)
except:
pass
else:
for service in services:
i = 0
while i != -1:
try:
if i == 0:
mv(path.join(service_path, service),
app.config['SERVICES_FOLDER'])
elif i == 1:
mv(path.join(service_path, service),
path.join(service_path, service+str(i)))
mv(path.join(service_path, service+str(i)),
app.config['SERVICES_FOLDER'])
else:
mv(path.join(service_path, service+str(i-1)),
path.join(service_path, service+str(i)))
mv(path.join(service_path, service+str(i)),
app.config['SERVICES_FOLDER'])
j = i
i = -1
except:
i += 1
j_array.append(j)
try:
# remove leftover files in tmp
rmtree(service_path)
except:
pass
# !! TODO
# array of services
# return array of missing files, empty slots for ones that don't need replacing
# eventually allow this for file upload as well
# something different is git repo versus docker index
# can all git repos be handled the same, or are there ones that might be different?
try:
services = services.replace(''', "'")
services = [item.encode('ascii') for item in literal_eval(services)]
except:
pass
if len(services) > 1:
counter = 0
for service in services:
# update missing_files for array of them,
# similarly with description, client, about, body, link, etc.
missing_files = request.json['missing_files']
if j_array[counter] == 0:
j_array[counter] = ""
index_service = service.replace("/", "-")
meta_path = app.config['SERVICES_FOLDER']+index_service+str(j_array[counter])
description_meta(missing_files, counter, url, meta_path)
if "client" in missing_files:
client = ""
clientLanguage = ""
clientFilename = "dummy.txt"
try:
client = request.json['client'+str(counter)]
clientLanguage = request.json['clientLanguage'+str(counter)]
clientFilename = request.json['clientFilename'+str(counter)]
except:
pass
# if url is docker index
if not "." in url or not "git" in url:
if not path.exists(meta_path):
mkdir(meta_path)
if not "." in url or not "git" in url or url.rsplit('.', 1)[1] == "git":
if not path.exists(meta_path+"/client"):
mkdir(meta_path+"/client")
with open(meta_path+"/"+app.config['SERVICE_DICT']['client'], 'w') as f:
f.write(clientLanguage+"\n")
f.write(clientFilename)
with open(meta_path+"/client/"+clientFilename, 'w') as f:
f.write(client)
if "about" in missing_files:
missing_metadata3(counter, j_array, url, index_service, service, "about")
if "body" in missing_files:
missing_metadata3(counter, j_array, url, index_service, service, "body")
if "link" in missing_files:
link = "#"
linkName = "None"
try:
link = request.json['link'+str(counter)]
linkName = request.json['linkName'+str(counter)]
except:
pass
# if url is docker index
if not "." in url or not "git" in url:
if not path.exists(meta_path):
mkdir(meta_path)
if not "." in url or not "git" in url or url.rsplit('.', 1)[1] == "git":
if not path.exists(meta_path+"/html"):
mkdir(meta_path+"/html")
with open(meta_path+"/"+app.config['SERVICE_DICT']['link'], 'w') as f:
f.write(link+" "+linkName)
counter += 1
else:
missing_files = request.json['missing_files']
if j == 0:
j = ""
index_service = services[0].replace("/", "-")
meta_path = app.config['SERVICES_FOLDER']+index_service+str(j)
meta_path2 = app.config['SERVICES_FOLDER']+(url.rsplit('/', 1)[1]).rsplit('.', 1)[0]+str(j)
description_meta(missing_files, "", url, meta_path)
if "client" in missing_files:
client = ""
clientLanguage = ""
clientFilename = "dummy.txt"
try:
client = request.json['client']
clientLanguage = request.json['clientLanguage']
clientFilename = request.json['clientFilename']
except:
pass
# if url is docker index
if not "." in url or not "git" in url:
if not path.exists(meta_path):
mkdir(meta_path)
if not path.exists(meta_path+"/client"):
mkdir(meta_path+"/client")
with open(meta_path+"/"+app.config['SERVICE_DICT']['client'], 'w') as f:
f.write(clientLanguage+"\n")
f.write(clientFilename)
with open(meta_path+"/client/"+clientFilename, 'w') as f:
f.write(client)
elif url.rsplit('.', 1)[1] == "git":
if not path.exists(meta_path2+"/client"):
mkdir(meta_path2+"/client")
with open(meta_path2+"/"+app.config['SERVICE_DICT']['client'], 'w') as f:
f.write(clientLanguage+"\n")
f.write(clientFilename)
with open(meta_path2+"/client/"+clientFilename, 'w') as f:
f.write(client)
if "about" in missing_files:
missing_metadata2(j, url, index_service, services, 'about')
if "body" in missing_files:
missing_metadata2(j, url, index_service, services, 'body')
if "link" in missing_files:
link = "#"
linkName = "None"
try:
link = request.json['link']
linkName = request.json['linkName']
except:
pass
# if url is docker index
if not "." in url or not "git" in url:
if not path.exists(meta_path):
mkdir(meta_path)
if not path.exists(meta_path+"/html"):
mkdir(meta_path+"/html")
with open(meta_path+"/"+app.config['SERVICE_DICT']['link'], 'w') as f:
f.write(link+" "+linkName)
elif url.rsplit('.', 1)[1] == "git":
if not path.exists(meta_path2+"/html"):
mkdir(meta_path2+"/html")
with open(meta_path2+"/"+app.config['SERVICE_DICT']['link'], 'w') as f:
f.write(link+" "+linkName)
except:
pass
return jsonify(url=app.config['DOMAIN'])
0
Example 95
Project: TrustRouter Source File: gencache.py
def EnsureModule(typelibCLSID, lcid, major, minor, progressInstance = None, bValidateFile=not is_readonly, bForDemand = bForDemandDefault, bBuildHidden = 1):
"""Ensure Python support is loaded for a type library, generating if necessary.
Given the IID, LCID and version information for a type library, check and if
necessary (re)generate, then import the necessary support files. If we regenerate the file, there
is no way to totally snuff out all instances of the old module in Python, and thus we will regenerate the file more than necessary,
unless makepy/genpy is modified accordingly.
Returns the Python module. No exceptions are caught during the generate process.
Params
typelibCLSID -- IID of the type library.
major -- Integer major version.
minor -- Integer minor version
lcid -- Integer LCID for the library.
progressInstance -- Instance to use as progress indicator, or None to
use the GUI progress bar.
bValidateFile -- Whether or not to perform cache validation or not
bForDemand -- Should a complete generation happen now, or on demand?
bBuildHidden -- Should hidden members/attributes etc be generated?
"""
bReloadNeeded = 0
try:
try:
module = GetModuleForTypelib(typelibCLSID, lcid, major, minor)
except ImportError:
# If we get an ImportError
# We may still find a valid cache file under a different MinorVersion #
# (which windows will search out for us)
#print "Loading reg typelib", typelibCLSID, major, minor, lcid
module = None
try:
tlbAttr = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid).GetLibAttr()
# if the above line doesn't throw a pythoncom.com_error, check if
# it is actually a different lib than we requested, and if so, suck it in
if tlbAttr[1] != lcid or tlbAttr[4]!=minor:
#print "Trying 2nd minor #", tlbAttr[1], tlbAttr[3], tlbAttr[4]
try:
module = GetModuleForTypelib(typelibCLSID, tlbAttr[1], tlbAttr[3], tlbAttr[4])
except ImportError:
# We don't have a module, but we do have a better minor
# version - remember that.
minor = tlbAttr[4]
# else module remains None
except pythoncom.com_error:
# couldn't load any typelib - mod remains None
pass
if module is not None and bValidateFile:
assert not is_readonly, "Can't validate in a read-only gencache"
try:
typLibPath = pythoncom.QueryPathOfRegTypeLib(typelibCLSID, major, minor, lcid)
# windows seems to add an extra \0 (via the underlying BSTR)
# The mainwin toolkit does not add this erroneous \0
if typLibPath[-1]=='\0':
typLibPath=typLibPath[:-1]
suf = getattr(os.path, "supports_unicode_filenames", 0)
if not suf:
# can't pass unicode filenames directly - convert
try:
typLibPath=typLibPath.encode(sys.getfilesystemencoding())
except AttributeError: # no sys.getfilesystemencoding
typLibPath=str(typLibPath)
tlbAttributes = pythoncom.LoadRegTypeLib(typelibCLSID, major, minor, lcid).GetLibAttr()
except pythoncom.com_error:
# We have a module, but no type lib - we should still
# run with what we have though - the typelib may not be
# deployed here.
bValidateFile = 0
if module is not None and bValidateFile:
assert not is_readonly, "Can't validate in a read-only gencache"
filePathPrefix = "%s\\%s" % (GetGeneratePath(), GetGeneratedFileName(typelibCLSID, lcid, major, minor))
filePath = filePathPrefix + ".py"
filePathPyc = filePathPrefix + ".py"
if __debug__:
filePathPyc = filePathPyc + "c"
else:
filePathPyc = filePathPyc + "o"
# Verify that type library is up to date.
# If we have a differing MinorVersion or genpy has bumped versions, update the file
from . import genpy
if module.MinorVersion != tlbAttributes[4] or genpy.makepy_version != module.makepy_version:
#print "Version skew: %d, %d" % (module.MinorVersion, tlbAttributes[4])
# try to erase the bad file from the cache
try:
os.unlink(filePath)
except os.error:
pass
try:
os.unlink(filePathPyc)
except os.error:
pass
if os.path.isdir(filePathPrefix):
import shutil
shutil.rmtree(filePathPrefix)
minor = tlbAttributes[4]
module = None
bReloadNeeded = 1
else:
minor = module.MinorVersion
filePathPrefix = "%s\\%s" % (GetGeneratePath(), GetGeneratedFileName(typelibCLSID, lcid, major, minor))
filePath = filePathPrefix + ".py"
filePathPyc = filePathPrefix + ".pyc"
#print "Trying py stat: ", filePath
fModTimeSet = 0
try:
pyModTime = os.stat(filePath)[8]
fModTimeSet = 1
except os.error as e:
# If .py file fails, try .pyc file
#print "Trying pyc stat", filePathPyc
try:
pyModTime = os.stat(filePathPyc)[8]
fModTimeSet = 1
except os.error as e:
pass
#print "Trying stat typelib", pyModTime
#print str(typLibPath)
typLibModTime = os.stat(typLibPath)[8]
if fModTimeSet and (typLibModTime > pyModTime):
bReloadNeeded = 1
module = None
except (ImportError, os.error):
module = None
if module is None:
# We need to build an item. If we are in a read-only cache, we
# can't/don't want to do this - so before giving up, check for
# a different minor version in our cache - according to COM, this is OK
if is_readonly:
key = str(typelibCLSID), lcid, major, minor
# If we have been asked before, get last result.
try:
return versionRedirectMap[key]
except KeyError:
pass
# Find other candidates.
items = []
for desc in GetGeneratedInfos():
if key[0]==desc[0] and key[1]==desc[1] and key[2]==desc[2]:
items.append(desc)
if items:
# Items are all identical, except for last tuple element
# We want the latest minor version we have - so just sort and grab last
items.sort()
new_minor = items[-1][3]
ret = GetModuleForTypelib(typelibCLSID, lcid, major, new_minor)
else:
ret = None
# remember and return
versionRedirectMap[key] = ret
return ret
#print "Rebuilding: ", major, minor
module = MakeModuleForTypelib(typelibCLSID, lcid, major, minor, progressInstance, bForDemand = bForDemand, bBuildHidden = bBuildHidden)
# If we replaced something, reload it
if bReloadNeeded:
module = reload(module)
AddModuleToCache(typelibCLSID, lcid, major, minor)
return module
0
Example 96
Project: sdaps Source File: generic.py
def create_stamp_pdf(survey, output_filename, questionnaire_ids):
sheets = 1 if questionnaire_ids is None else len(questionnaire_ids)
questionnaire_length = survey.questionnaire.page_count
have_pdftk = False
# Test if pdftk is present, if it is we can use it to be faster
try:
result = subprocess.Popen(['pdftk', '--version'], stdout=subprocess.PIPE)
# Just assume pdftk is there, if it was executed sucessfully
if result is not None:
have_pdftk = True
except OSError:
pass
if not have_pdftk:
try:
import pyPdf
except:
log.error(_(u'You need to have either pdftk or pyPdf installed. pdftk is the faster method.'))
sys.exit(1)
# Write the "stamp" out to tmp.pdf if are using pdftk.
if have_pdftk:
stampsfile = file(survey.path('tmp.pdf'), 'wb')
else:
stampsfile = StringIO.StringIO()
canvas = \
reportlab.pdfgen.canvas.Canvas(stampsfile,
bottomup=False,
pagesize=(survey.defs.paper_width * mm,
survey.defs.paper_height * mm))
# bottomup = False =>(0, 0) is the upper left corner
print ungettext(u'Creating stamp PDF for %i sheet', u'Creating stamp PDF for %i sheets', sheets) % sheets
log.progressbar.start(sheets)
for i in range(sheets):
if questionnaire_ids is not None:
id = questionnaire_ids.pop(0)
for j in range(questionnaire_length):
if survey.defs.style == "classic":
draw_corner_marks(survey, canvas)
draw_corner_boxes(survey, canvas, j)
if not survey.defs.duplex or j % 2:
if questionnaire_ids is not None:
draw_questionnaire_id(canvas, survey, id)
if survey.defs.print_survey_id:
draw_survey_id(canvas, survey)
elif survey.defs.style == "code128":
draw_corner_marks(survey, canvas)
if not survey.defs.duplex or j % 2:
if questionnaire_ids is not None:
draw_code128_questionnaire_id(canvas, survey, id)
# Survey ID has to be printed in CODE128 mode, because it
# contains the page number and rotation.
draw_code128_sdaps_info(canvas, survey, j + 1)
if survey.global_id is not None:
draw_code128_global_id(canvas, survey)
elif survey.defs.style == "qr":
draw_corner_marks(survey, canvas)
if not survey.defs.duplex or j % 2:
if questionnaire_ids is not None:
draw_qr_questionnaire_id(canvas, survey, id)
# Survey ID has to be printed in QR mode, because it
# contains the page number and rotation.
draw_qr_sdaps_info(canvas, survey, j + 1)
if survey.global_id is not None:
draw_qr_global_id(canvas, survey)
elif survey.defs.style == "custom":
# Only draw corner marker
draw_corner_marks(survey, canvas)
pass
else:
raise AssertionError()
canvas.showPage()
log.progressbar.update(i + 1)
canvas.save()
print ungettext(u'%i sheet; %f seconds per sheet', u'%i sheet; %f seconds per sheet', log.progressbar.max_value) % (
log.progressbar.max_value,
float(log.progressbar.elapsed_time) /
float(log.progressbar.max_value)
)
if have_pdftk:
stampsfile.close()
# Merge using pdftk
print _("Stamping using pdftk")
tmp_dir = tempfile.mkdtemp()
if sheets == 1:
# Shortcut if we only have one sheet.
# In this case form data in the PDF will *not* break, in
# the other code path it *will* break.
print _(u"pdftk: Overlaying the original PDF with the markings.")
subprocess.call(['pdftk',
survey.path('questionnaire.pdf'),
'multistamp',
survey.path('tmp.pdf'),
'output',
output_filename])
else:
for page in xrange(1, questionnaire_length + 1):
print ungettext(u"pdftk: Splitting out page %d of each sheet.", u"pdftk: Splitting out page %d of each sheet.", page) % page
args = []
args.append('pdftk')
args.append(survey.path('tmp.pdf'))
args.append('cat')
cur = page
for i in range(sheets):
args.append('%d' % cur)
cur += questionnaire_length
args.append('output')
args.append(os.path.join(tmp_dir, 'stamp-%d.pdf' % page))
subprocess.call(args)
print _(u"pdftk: Splitting the questionnaire for watermarking.")
subprocess.call(['pdftk', survey.path('questionnaire.pdf'),
'dump_data', 'output',
os.path.join(tmp_dir, 'doc_data.txt')])
for page in xrange(1, questionnaire_length + 1):
subprocess.call(['pdftk', survey.path('questionnaire.pdf'), 'cat',
'%d' % page, 'output',
os.path.join(tmp_dir, 'watermark-%d.pdf' % page)])
if sheets == 1:
for page in xrange(1, questionnaire_length + 1):
print ungettext(u"pdftk: Watermarking page %d of all sheets.", u"pdftk: Watermarking page %d of all sheets.", page) % page
subprocess.call(['pdftk',
os.path.join(tmp_dir, 'stamp-%d.pdf' % page),
'background',
os.path.join(tmp_dir, 'watermark-%d.pdf' % page),
'output',
os.path.join(tmp_dir, 'watermarked-%d.pdf' % page)])
else:
for page in xrange(1, questionnaire_length + 1):
print ungettext(u"pdftk: Watermarking page %d of all sheets.", u"pdftk: Watermarking page %d of all sheets.", page) % page
subprocess.call(['pdftk',
os.path.join(tmp_dir, 'stamp-%d.pdf' % page),
'background',
os.path.join(tmp_dir, 'watermark-%d.pdf' % page),
'output',
os.path.join(tmp_dir, 'watermarked-%d.pdf' % page)])
args = []
args.append('pdftk')
for page in xrange(1, questionnaire_length + 1):
char = chr(ord('A') + page - 1)
args.append('%s=' % char + os.path.join(tmp_dir, 'watermarked-%d.pdf' % page))
args.append('cat')
for i in range(sheets):
for page in xrange(1, questionnaire_length + 1):
char = chr(ord('A') + page - 1)
args.append('%s%d' % (char, i + 1))
args.append('output')
args.append(os.path.join(tmp_dir, 'final.pdf'))
print _(u"pdftk: Assembling everything into the final PDF.")
subprocess.call(args)
subprocess.call(['pdftk', os.path.join(tmp_dir, 'final.pdf'),
'update_info', os.path.join(tmp_dir, 'doc_data.txt'),
'output', output_filename])
# Remove tmp.pdf
os.unlink(survey.path('tmp.pdf'))
# Remove all the temporary files
shutil.rmtree(tmp_dir)
else:
# Merge using pyPdf
stamped = pyPdf.PdfFileWriter()
stamped._info.getObject().update({
pyPdf.generic.NameObject('/Producer'): pyPdf.generic.createStringObject(u'sdaps'),
pyPdf.generic.NameObject('/Title'): pyPdf.generic.createStringObject(survey.title),
})
subject = []
for key, value in survey.info.iteritems():
subject.append(u'%(key)s: %(value)s' % {'key': key, 'value': value})
subject = u'\n'.join(subject)
stamped._info.getObject().update({
pyPdf.generic.NameObject('/Subject'): pyPdf.generic.createStringObject(subject),
})
stamps = pyPdf.PdfFileReader(stampsfile)
del stampsfile
questionnaire = pyPdf.PdfFileReader(
file(survey.path('questionnaire.pdf'), 'rb')
)
print _(u'Stamping using pyPdf. For faster stamping, install pdftk.')
log.progressbar.start(sheets)
for i in range(sheets):
for j in range(questionnaire_length):
s = stamps.getPage(i * questionnaire_length + j)
if not have_pdftk:
q = questionnaire.getPage(j)
s.mergePage(q)
stamped.addPage(s)
log.progressbar.update(i + 1)
stamped.write(open(output_filename, 'wb'))
print ungettext(u'%i sheet; %f seconds per sheet', u'%i sheet; %f seconds per sheet',
log.progressbar.max_value) % (
log.progressbar.max_value,
float(log.progressbar.elapsed_time) /
float(log.progressbar.max_value))
0
Example 97
Project: fuzzer Source File: fuzzer.py
def __init__(self, binary_path, work_dir, afl_count=1, library_path=None, time_limit=None,
target_opts=None, extra_opts=None, create_dictionary=False,
seeds=None, crash_mode=False, never_resume=False):
'''
:param binary_path: path to the binary to fuzz. List or tuple for multi-CB.
:param work_dir: the work directory which contains fuzzing jobs, our job directory will go here
:param afl_count: number of AFL jobs total to spin up for the binary
:param library_path: library path to use, if none is specified a default is chosen
:param timelimit: amount of time to fuzz for, has no effect besides returning True when calling timed_out
:param seeds: list of inputs to seed fuzzing with
:param target_opts: extra options to pass to the target
:param extra_opts: extra options to pass to AFL when starting up
:param crash_mode: if set to True AFL is set to crash explorer mode, and seed will be expected to be a crashing input
:param never_resume: never resume an old fuzzing run, even if it's possible
'''
self.binary_path = binary_path
self.work_dir = work_dir
self.afl_count = afl_count
self.time_limit = time_limit
self.library_path = library_path
self.target_opts = [ ] if target_opts is None else target_opts
self.crash_mode = crash_mode
Fuzzer._perform_env_checks()
if isinstance(binary_path,basestring):
self.is_multicb = False
self.binary_id = os.path.basename(binary_path)
elif isinstance(binary_path,(list,tuple)):
self.is_multicb = True
self.binary_id = os.path.basename(binary_path[0])
else:
raise ValueError("Was expecting either a string or a list/tuple for binary_path! It's {} instead.".format(type(binary_path)))
# sanity check crash mode
if self.crash_mode:
if seeds is None:
raise ValueError("Seeds must be specified if using the fuzzer in crash mode")
l.info("AFL will be started in crash mode")
self.seeds = ["fuzz"] if seeds is None or len(seeds) == 0 else seeds
self.job_dir = os.path.join(self.work_dir, self.binary_id)
self.in_dir = os.path.join(self.job_dir, "input")
self.out_dir = os.path.join(self.job_dir, "sync")
# sanity check extra opts
self.extra_opts = extra_opts
if self.extra_opts is not None:
if not isinstance(self.extra_opts, list):
raise ValueError("extra_opts must be a list of command line arguments")
# base of the fuzzer package
self.base = Fuzzer._get_base()
self.start_time = int(time.time())
# create_dict script
self.create_dict_path = os.path.join(self.base, "bin", "create_dict.py")
# afl dictionary
self.dictionary = None
# processes spun up
self.procs = [ ]
# start the fuzzer ids at 0
self.fuzz_id = 0
# test if we're resuming an old run
self.resuming = bool(os.listdir(self.out_dir)) if os.path.isdir(self.out_dir) else False
# has the fuzzer been turned on?
self._on = False
if never_resume and self.resuming:
l.info("could resume, but starting over upon request")
shutil.rmtree(self.job_dir)
self.resuming = False
if self.is_multicb:
# Where cgc/setup's Dockerfile checks it out
# NOTE: 'afl/fakeforksrv' serves as 'qemu', as far as AFL is concerned
# Will actually invoke 'fakeforksrv/multicb-qemu'
# This QEMU cannot run standalone (always speaks the forkserver "protocol"),
# but 'fakeforksrv/run_via_fakeforksrv' allows it.
# XXX: There is no driller/angr support, and probably will never be.
self.afl_path = shellphish_afl.afl_bin('multi-cgc')
self.afl_path_var = shellphish_afl.afl_path_var('multi-cgc')
else:
p = angr.Project(binary_path)
self.os = p.loader.main_bin.os
self.afl_dir = shellphish_afl.afl_dir(self.os)
# the path to AFL capable of calling driller
self.afl_path = shellphish_afl.afl_bin(self.os)
if self.os == 'cgc':
self.afl_path_var = shellphish_afl.afl_path_var('cgc')
else:
self.afl_path_var = shellphish_afl.afl_path_var(p.arch.qemu_name)
# set up libraries
self._export_library_path(p)
self.qemu_dir = self.afl_path_var
l.debug("self.start_time: %r", self.start_time)
l.debug("self.afl_path: %s", self.afl_path)
l.debug("self.afl_path_var: %s", self.afl_path_var)
l.debug("self.qemu_dir: %s", self.qemu_dir)
l.debug("self.binary_id: %s", self.binary_id)
l.debug("self.work_dir: %s", self.work_dir)
l.debug("self.resuming: %s", self.resuming)
# if we're resuming an old run set the input_directory to a '-'
if self.resuming:
l.info("[%s] resuming old fuzzing run", self.binary_id)
self.in_dir = "-"
else:
# create the work directory and input directory
try:
os.makedirs(self.in_dir)
except OSError:
l.warning("unable to create in_dir \"%s\"", self.in_dir)
# populate the input directory
self._initialize_seeds()
# look for a dictionary
dictionary_file = os.path.join(self.job_dir, "%s.dict" % self.binary_id)
if os.path.isfile(dictionary_file):
self.dictionary = dictionary_file
# if a dictionary doesn't exist and we aren't resuming a run, create a dict
elif not self.resuming:
# call out to another process to create the dictionary so we can
# limit it's memory
if create_dictionary:
if self._create_dict(dictionary_file):
self.dictionary = dictionary_file
else:
# no luck creating a dictionary
l.warning("[%s] unable to create dictionary", self.binary_id)
# set environment variable for the AFL_PATH
os.environ['AFL_PATH'] = self.afl_path_var
0
Example 98
Project: mirage Source File: export_commands.py
def export_stubs_to_commands_format(handler, scenario_name_key, scenario_name, session_id,
runnable, playback_session, static_dir, export_dir):
"""
:param handler:
:param scenario_name_key:
:param scenario_name:
:param session_id:
:param runnable:
:param playback_session:
:param static_dir:
:param export_dir:
:return: :raise exception_response:
"""
# cache = Cache(get_hostname(handler.request))
# scenario_name_key = cache.scenario_key_name(scenario_name)
# use user arg or epoch time
if not session_id:
session_id = int(time.time())
# session_id = handler.get_argument('session_id', int(time.time()))
session = u'{0}_{1}'.format(scenario_name, session_id)
cmds = [
'delete/stubs?scenario={0}'.format(scenario_name),
'begin/session?scenario={0}&session={1}&mode=record'.format(
scenario_name, session)
]
files = []
scenario = Scenario()
# get scenario pre stubs for specified scenario
stubs = list(scenario.get_pre_stubs(scenario_name_key))
if stubs:
for i in range(len(stubs)):
entry = stubs[i]
stub = Stub(entry['stub'], scenario_name_key)
# if stub is rest - matcher may be None, checking that
if stub.contains_matchers() is None:
cmds.append('# Stub skipped since no matchers were found. Consider using .yaml format for additional '
'capabilities')
# skipping to next stub, this stub is not compatible with .commands format
continue
matchers = [('{0}_{1}_{2}.textMatcher'.format(session, i, x), stub.contains_matchers()[x])
for x in range(len(stub.contains_matchers()))]
matchers_str = ",".join(x[0] for x in matchers)
url_args = stub.args()
url_args['session'] = session
module_info = stub.module()
if module_info:
# Note: not including put/module in the export, modules are shared
# by multiple scenarios.
url_args['ext_module'] = module_info['name']
url_args['stub_created_date'] = stub.recorded()
url_args['stubbedSystemDate'] = module_info.get('recorded_system_date')
url_args['system_date'] = module_info.get('system_date')
url_args = urlencode(url_args)
responses = stub.response_body()
assert(len(responses) == 1)
response = responses[0]
response = ('{0}_{1}.response'.format(session, i), response)
cmds.append('put/stub?{0},{1},{2}'.format(url_args, matchers_str,
response[0]))
files.append(response)
files.extend(matchers)
else:
cmds.append('put/stub?session={0},text=a_dummy_matcher,text=a_dummy_response'.format(session))
cmds.append('end/session?session={0}'.format(session))
runnable_info = dict()
# if this scenario is runnable
if runnable:
# playback_session = handler.get_argument('playback_session', None)
if not playback_session:
raise exception_response(400,
title="'playback_session' argument required with 'runnable")
runnable_info['playback_session'] = playback_session
tracker = Tracker()
last_used = tracker.session_last_used(scenario_name_key,
playback_session, 'playback')
if not last_used:
raise exception_response(400,
title="Unable to find playback session")
runnable_info['last_used'] = dict(remote_ip=last_used['remote_ip'],
start_time=str(last_used['start_time']))
playback = tracker.get_last_playback(scenario_name, playback_session,
last_used['start_time'])
playback = list(playback)
if not playback:
raise exception_response(400,
title="Unable to find a playback for scenario='{0}', playback_session='{1}'".format(scenario_name, playback_session))
cmds.append('begin/session?scenario={0}&session={1}&mode=playback'.format(
scenario_name, session))
number_of_requests = len(playback)
runnable_info['number_of_playback_requests'] = number_of_requests
for nrequest in range(number_of_requests):
track = playback[nrequest]
request_text = track.get('request_text')
if not request_text:
raise exception_response(400, title='Unable to obtain playback details, was full tracking enabled?')
request_file_name = '{0}_{1}.request'.format(session, nrequest)
files.append((request_file_name, request_text))
stubo_response_text = track['stubo_response']
if not isinstance(stubo_response_text, basestring):
stubo_response_text = unicode(stubo_response_text)
stubo_response_file_name = '{0}_{1}.stubo_response'.format(session, nrequest)
files.append((stubo_response_file_name, stubo_response_text))
url_args = track['request_params']
url_args['session'] = session
url_args = urlencode(url_args)
cmds.append(u'get/response?{0},{1}'.format(url_args,
request_file_name))
cmds.append('end/session?session={0}'.format(session))
files.append(('{0}.commands'.format(scenario_name),
b"\r\n".join(cmds)))
# checking whether export dir parameter is provided
if not export_dir:
export_dir = scenario_name_key.replace(':', '_')
export_dir_path = os.path.join(static_dir, 'exports', export_dir)
if os.path.exists(export_dir_path):
shutil.rmtree(export_dir_path)
os.makedirs(export_dir_path)
archive_name = os.path.join(export_dir_path, scenario_name)
zout = zipfile.ZipFile(archive_name+'.zip', "w")
tar = tarfile.open(archive_name+".tar.gz", "w:gz")
for finfo in files:
fname, contents = finfo
file_path = os.path.join(export_dir_path, fname)
with codecs.open(file_path, mode='wb', encoding='utf-8') as f:
f.write(contents)
f.close()
tar.add(file_path, fname)
zout.write(file_path, fname)
tar.close()
zout.close()
shutil.copy(archive_name+'.zip', archive_name+'.jar')
files.extend([(scenario_name+'.zip',), (scenario_name+'.tar.gz',),
(scenario_name+'.jar',)])
# getting links
links = get_export_links(handler, scenario_name_key, files)
return links
0
Example 99
Project: galah Source File: zip_bulk_submissions.py
def _zip_bulk_submissions(archive_id, requester, assignment, email = ""):
archive_id = ObjectId(archive_id)
archive_file = temp_directory = ""
# Find any expired archives and remove them
deleted_files = []
for i in Archive.objects(expires__lt = datetime.datetime.today()):
deleted_files.append(i.file_location)
if i.file_location:
try:
os.remove(i.file_location)
except OSError as e:
logger.warning(
"Could not remove expired archive at %s: %s.",
i.file_location, str(e)
)
i.delete()
if deleted_files:
logger.info("Deleted archives %s.", str(deleted_files))
# This is the archive object we will eventually add to the database
new_archive = Archive(
id = archive_id,
requester = requester,
archive_type = "assignment_package"
)
temp_directory = archive_file = None
try:
# Form the query
query = {"assignment": ObjectId(assignment)}
# Only mention email in the query if it's not None or the empty
# string, otherwise mongo will look for submissions that list the
# user as None or the empty string (which should be exactly none of
# the submission in the system).
if email:
query["user"] = email
else:
# Otherwise, we need to be careful not to get teacher/TA submissions.
assn = Assignment.objects.get(id = ObjectId(assignment))
students = User.objects(
account_type="student",
classes = assn.for_class
)
query["user__in"] = [i.id for i in students]
# Grab all the submissions
submissions = list(Submission.objects(**query))
if not submissions:
logger.info("No submissions found matching query.")
return
# Organize all the submissions by user name, as this will closely
# match the structure of the archive we will build.
submission_map = {}
for i in submissions:
if i.user in submission_map:
submission_map[i.user].append(i)
else:
submission_map[i.user] = [i]
# Create a temporary directory we will create our archive in.
temp_directory = tempfile.mkdtemp()
# Create our directory tree. Instead of making new folders for each
# submission and copying the user's files over however, we will
# create symlinks to save space and time.
for user, user_submissions in submission_map.items():
# Create a directory for the user
os.makedirs(os.path.join(temp_directory, user))
# Create symlinks for all his submissions. Each symlink is
# named after the submission date.
for i in user_submissions:
time_stamp = i.timestamp.strftime("%Y-%m-%d-%H-%M-%S")
symlink_path = \
os.path.join(temp_directory, user, time_stamp)
# In the highly unlikely event that two of the same user's
# submissions have the same exact time stamp, we'll need to
# add a marker to the end of the timestamp.
marker = 0
while os.path.exists(symlink_path +
("-%d" % marker if marker > 0 else "")):
marker += 1
if marker > 0:
symlink_path += "-%d" % marker
original_path = i.getFilePath()
# Detect if the submission's files are still on the filesystem
if os.path.isdir(original_path):
# Create a symlink pointing to the actual submission
# directory with the name we gnerated
os.symlink(original_path, symlink_path)
else:
# Create an empty text file marking the fact that a
# submissions existed but is no longer available.
open(symlink_path, "w").close()
# Create the actual archive file.
# TODO: Create it in galah's /var/ directory
file_descriptor, archive_file = tempfile.mkstemp(suffix = ".zip")
os.close(file_descriptor)
# Run zip and do the actual archiving. Will block until it's finished.
zipdir(temp_directory, archive_file)
new_archive.file_location = archive_file
new_archive.expires = \
datetime.datetime.today() + config["TEACHER_ARCHIVE_LIFETIME"]
new_archive.save(force_insert = True)
except Exception as e:
# If we created a temporary archive file we need to delete it.
new_archive.file_location = None
if archive_file:
os.remove(archive_file)
new_archive.error_string = str(e)
new_archive.save(force_insert = True)
raise
finally:
if temp_directory:
shutil.rmtree(temp_directory)
0
Example 100
Project: crossbar Source File: fileupload.py
def render_POST(self, request):
headers = {x.decode('iso-8859-1'): y.decode('iso-8859-1')
for x, y in request.getAllHeaders().items()}
origin = headers['host']
postFields = cgi.FieldStorage(
fp=request.content,
headers=headers,
environ={"REQUEST_METHOD": "POST"})
f = self._form_fields
filename = postFields[f['file_name']].value
totalSize = int(postFields[f['total_size']].value)
totalChunks = int(postFields[f['total_chunks']].value)
chunkSize = int(postFields[f['chunk_size']].value)
chunkNumber = int(postFields[f['chunk_number']].value)
fileContent = postFields[f['content']].value
if 'chunk_extra' in f and f['chunk_extra'] in postFields:
chunk_extra = json.loads(postFields[f['chunk_extra']].value)
else:
chunk_extra = {}
if 'finish_extra' in f and f['finish_extra'] in postFields:
finish_extra = json.loads(postFields[f['finish_extra']].value)
else:
finish_extra = {}
fileId = filename
# # prepare user specific upload areas
# # NOT YET IMPLEMENTED
# #
# if 'auth_id' in f and f['auth_id'] in postFields:
# auth_id = postFields[f['auth_id']].value
# mydir = os.path.join(self._uploadRoot, auth_id)
# my_temp_dir = os.path.join(self._tempDirRoot, auth_id)
#
# # check if auth_id is a valid directory_name
# #
# if auth_id != auth_id.encode('ascii', 'ignore'):
# msg = "The requestor auth_id must be an ascii string."
# # 415 Unsupported Media Type
# request.setResponseCode(415, msg)
# return msg
# else:
# auth_id = 'anonymous'
# create user specific folder
# mydir = self._uploadRoot
# my_temp_dir = self._tempDirRoot
# if not os.path.exists(mydir):
# os.makedirs(mydir)
# if not os.path.exists(my_temp_dir):
# os.makedirs(my_temp_dir)
# prepare the on_progress publisher
if 'on_progress' in f and f['on_progress'] in postFields and self._fileupload_session != {}:
topic = postFields[f['on_progress']].value
if 'session' in f and f['session'] in postFields:
session = int(postFields[f['session']].value)
publish_options = PublishOptions(eligible=[session])
else:
publish_options = None
def fileupload_publish(payload):
self._fileupload_session.publish(topic, payload, options=publish_options)
else:
def fileupload_publish(payload):
pass
# Register upload right at the start to avoid overlapping upload conflicts
#
if fileId not in self._uploads:
self._uploads[fileId] = {'chunk_list': [], 'origin': origin}
chunk_is_first = True
self.log.debug('Started upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
file_name=fileId, total_size=totalSize, total_chunks=totalChunks, chunk_size=chunkSize, chunk_number=chunkNumber)
else:
chunk_is_first = False
# If the chunks are read at startup of crossbar any client may claim and resume the pending upload !
#
upl = self._uploads[fileId]
if upl['origin'] == 'startup':
self.log.debug('Will try to resume upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
file_name=fileId, total_size=totalSize, total_chunks=totalChunks, chunk_size=chunkSize, chunk_number=chunkNumber)
upl['origin'] = origin
else:
# check if another session is uploading this file already
#
if upl['origin'] != origin:
msg = "File being uploaded is already uploaded in a different session."
self.log.debug(msg)
# 409 Conflict
request.setResponseCode(409, msg.encode('utf8'))
return msg.encode('utf8')
else:
# check if the chunk is being uploaded in this very session already
# this should never happen !
if chunkNumber in upl['chunk_list']:
msg = "Chunk beeing uploaded is already uploading."
self.log.debug(msg)
# Don't throw a conflict. This may be a wanted behaviour.
# Even if an upload would be resumable, you don't have to resume.
# 409 Conflict
# request.setResponseCode(409, msg.encode('utf8'))
# return msg.encode('utf8')
# check file size
#
if totalSize > self._max_file_size:
msg = "Size {} of file to be uploaded exceeds maximum {}".format(totalSize, self._max_file_size)
self.log.debug(msg)
# 413 Request Entity Too Large
request.setResponseCode(413, msg.encode('utf8'))
return msg.encode('utf8')
# check file extensions
#
extension = os.path.splitext(filename)[1]
if self._fileTypes and extension not in self._fileTypes:
msg = "Type '{}' of file to be uploaded is in allowed types {}".format(extension, self._fileTypes)
self.log.debug(msg)
# 415 Unsupported Media Type
request.setResponseCode(415, msg.encode('utf8'))
return msg.encode('utf8')
# TODO: check mime type
#
fileTempDir = os.path.join(self._tempDirRoot, fileId)
chunkName = os.path.join(fileTempDir, 'chunk_' + str(chunkNumber))
_chunkName = os.path.join(fileTempDir, '#kfhf3kz412uru578e38viokbjhfvz4w__' + 'chunk_' + str(chunkNumber))
def mergeFile():
# every chunk has to check if it is the last chunk written, except in a single chunk scenario
if totalChunks > 1 and len(self._uploads[fileId]['chunk_list']) >= totalChunks:
# last chunk
self.log.debug('Finished file upload after chunk {chunk_number} with chunk_list {chunk_list}', chunk_number=chunkNumber, chunk_list=self._uploads)
# Merge all files into one file and remove the temp files
# TODO: How to avoid the extra file IO ?
finalFileName = os.path.join(self._uploadRoot, fileId)
_finalFileName = os.path.join(fileTempDir, '#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)
with open(_finalFileName, 'wb') as _finalFile:
for cn in range(1, totalChunks + 1):
with open(os.path.join(fileTempDir, 'chunk_' + str(cn)), 'rb') as ff:
_finalFile.write(ff.read())
os.rename(_finalFileName, finalFileName)
if self._file_permissions:
perm = int(self._file_permissions, 8)
try:
os.chmod(finalFileName, perm)
except Exception as e:
msg = "file upload resource - could not change file permissions of uploaded file"
self.log.debug(msg)
self.log.debug(e)
self._uploads.pop(fileId, None)
request.setResponseCode(500, msg.encode('utf8'))
return msg.encode('utf8')
else:
self.log.debug("Changed permissions on {file_name} to {permissions}", file_name=finalFileName, permissions=self._file_permissions)
# remove the file temp folder
shutil.rmtree(fileTempDir)
self._uploads.pop(fileId, None)
# publish file upload progress to file_progress_URI
fileupload_publish(
{
u"id": fileId,
u"chunk": chunkNumber,
u"name": filename,
u"total": totalSize,
u"remaining": 0,
u"status": "finished",
u"progress": 1.,
u"finish_extra": finish_extra,
u"chunk_extra": chunk_extra,
}
)
if chunk_is_first:
# first chunk of file
# publish file upload start
#
fileupload_publish(
{
u"id": fileId,
u"chunk": chunkNumber,
u"name": filename,
u"total": totalSize,
u"remaining": totalSize,
u"status": "started",
u"progress": 0.,
u"chunk_extra": chunk_extra,
}
)
if totalChunks == 1:
# only one chunk overall -> write file directly
finalFileName = os.path.join(self._uploadRoot, fileId)
_finalFileName = os.path.join(self._tempDirRoot, '#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)
with open(_finalFileName, 'wb') as _finalFile:
_finalFile.write(fileContent)
if self._file_permissions:
perm = int(self._file_permissions, 8)
try:
os.chmod(_finalFileName, perm)
except Exception as e:
# finalFileName.remove()
msg = "Could not change file permissions of uploaded file"
self.log.debug(msg)
self.log.debug(e)
request.setResponseCode(500, msg.encode('utf8'))
return msg.encode('utf8')
else:
self.log.debug("Changed permissions on {file_name} to {permissions}", file_name=finalFileName, permissions=self._file_permissions)
os.rename(_finalFileName, finalFileName)
if chunkNumber not in self._uploads[fileId]['chunk_list']:
self._uploads[fileId]['chunk_list'].append(chunkNumber)
self._uploads.pop(fileId, None)
# publish file upload progress to file_progress_URI
fileupload_publish(
{
u"id": fileId,
u"chunk": chunkNumber,
u"name": filename,
u"total": totalSize,
u"remaining": 0,
u"status": "finished",
u"progress": 1.,
u"finish_extra": finish_extra,
u"chunk_extra": chunk_extra,
}
)
else:
# first of more chunks
# fileTempDir.remove() # any potential conflict should have been resolved above. This should not be necessary!
if not os.path.isdir(fileTempDir):
os.makedirs(fileTempDir)
with open(_chunkName, 'wb') as chunk:
chunk.write(fileContent)
os.rename(_chunkName, chunkName) # atomic file system operation
self.log.debug('chunk_' + str(chunkNumber) + ' written and moved to ' + chunkName)
# publish file upload progress
#
fileupload_publish(
{
u"id": fileId,
u"chunk": chunkNumber,
u"name": filename,
u"total": totalSize,
u"remaining": totalSize - chunkSize,
u"status": "progress",
u"progress": round(float(chunkSize) / float(totalSize), 3),
u"chunk_extra": chunk_extra,
}
)
if chunkNumber not in self._uploads[fileId]['chunk_list']:
self._uploads[fileId]['chunk_list'].append(chunkNumber)
mergeFile()
# clean the temp dir once per file upload
self._remove_stale_uploads()
else:
# intermediate chunk
if not os.path.isdir(fileTempDir):
os.makedirs(fileTempDir)
with open(_chunkName, 'wb') as chunk:
chunk.write(fileContent)
os.rename(_chunkName, chunkName)
self.log.debug('chunk_' + str(chunkNumber) + ' written and moved to ' + chunkName)
if chunkNumber not in self._uploads[fileId]['chunk_list']:
self._uploads[fileId]['chunk_list'].append(chunkNumber)
received = sum(os.path.getsize(os.path.join(fileTempDir, f)) for f in os.listdir(fileTempDir))
fileupload_publish(
{
u"id": fileId,
u"chunk": chunkNumber,
u"name": filename,
u"total": totalSize,
u"remaining": totalSize - received,
u"status": "progress",
u"progress": round(float(received) / float(totalSize), 3),
u"chunk_extra": chunk_extra,
}
)
mergeFile()
# no errors encountered -> respond success
request.setResponseCode(200)
return b''