Here are the examples of the python api tempfile.mkdtemp taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
160 Examples
0
Example 101
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try:
i = sys.argv.index('--build-dir')
except ValueError:
i = None
if i is not None:
build_dir = sys.argv[i + 1]
del sys.argv[i + 1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = tempfile.mkdtemp()
_reg1 = re.compile(r'[-][-]link[-]')
sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
_reg2 = re.compile(
r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include')
f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:', 'skip:']:
fl = 1
elif a == ':':
fl = 0
if fl or a == ':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1] != ':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
_reg3 = re.compile(
r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)')
flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
_reg4 = re.compile(
r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))')
fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
if 1:
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)] == v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags) <= 2, repr(flib_flags)
_reg5 = re.compile(r'[-][-](verbose)')
setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
for optname in ['--include_paths', '--include-paths']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i + 1]
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('', '[.](o|a|so)', sources)
include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
for i in range(len(define_macros)):
name_value = define_macros[i].split('=', 1)
if len(name_value) == 1:
name_value.append(None)
if len(name_value) == 2:
define_macros[i] = tuple(name_value)
else:
print('Invalid use of -D:', name_value)
from numpy.distutils.system_info import get_info
num_info = {}
if num_info:
include_dirs.extend(num_info.get('include_dirs', []))
from numpy.distutils.core import setup, Extension
ext_args = {'name': modulename, 'sources': sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'
' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp', build_dir,
'--build-base', build_dir,
'--build-platlib', '.'])
if fc_flags:
sys.argv.extend(['config_fc'] + fc_flags)
if flib_flags:
sys.argv.extend(['build_ext'] + flib_flags)
setup(ext_modules=[ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n' % (build_dir))
shutil.rmtree(build_dir)
0
Example 102
Project: aminator Source File: cli.py
def plugin_manager():
import subprocess
import requests
import argparse
import tempfile
import tarfile
import shutil
import yaml
import re
import os
from cStringIO import StringIO
parser = argparse.ArgumentParser(description='Aminator plugin install utility')
parser.add_argument('--branch', help='Which branch to pull the plugin list from. Valid options: production, testing, alpha. Default value: production', default='production', choices=['production', 'testing', 'alpha'], dest='branch', metavar='branch')
parser.add_argument('--type', help='The type of plugin to search for. Valid options: cloud, volume, blockdevice, provision, distro, finalizer, metrics', choices=['cloud', 'volume', 'blockdevice', 'provision', 'distro', 'finalizer', 'metrics'], dest='type', metavar='plugin-type')
parser.add_argument('command', help='Command to run. Valid commands: search install list', choices=['search', 'install', 'list'], metavar='command')
parser.add_argument('name', help='Name of the plugin', metavar='name', nargs='?')
args = parser.parse_args()
req = requests.get('https://raw.github.com/aminator-plugins/metadata/%s/plugins.yml' % (args.branch))
plugins = yaml.load(req.text)
if args.command == 'search':
if not args.name:
print "ERROR: You must supply a keyword to search for"
sys.exit()
results = []
rgx = re.compile(args.name, re.I)
for name, data in plugins.items():
m = rgx.search(name)
if not m:
for alias in data['aliases']:
m = rgx.search(alias)
if m:
break
if m:
if args.type and args.type != data['type']:
continue
results.append("Name: %s\nAliases: %s\nType: %s\nDescription: %s" % (name, ", ".join(data['aliases']), data['type'], data['description']))
if len(results) == 0:
print "No plugins found for keyword %s" % args.name
else:
print "\n----------\n".join(results)
elif args.command == 'list':
results = []
for name, data in plugins.items():
if args.type and args.type != data['type']:
continue
results.append("Name: %s\nAliases: %s\nType: %s\nDescription: %s" % (name, ", ".join(data['aliases']), data['type'], data['description']))
if len(results) == 0:
print "No plugins found"
else:
print "\n----------\n".join(results)
elif args.command == 'install':
if not args.name:
print "ERROR: You must supply a plugin name to install"
sys.exit()
if os.geteuid() != 0:
print "ERROR: You must run installs as root (or through sudo)"
sys.exit()
rgx = re.compile('^%s$' % args.name, re.I)
plugin = None
for name, data in plugins.items():
m = rgx.match(name)
if not m:
for alias in data['aliases']:
m = rgx.match(alias)
if m:
plugin = data
break
else:
plugin = data
if not plugin:
print "Unable to find a plugin named %s. You should use the search to find the correct name or alias for the plugin you want to install" % args.name
sys.exit()
else:
url = 'https://github.com/aminator-plugins/%s/archive/%s.tar.gz' % (plugin['repo_name'], plugin['branch'])
print "Downloading latest version of %s from %s" % (args.name, url)
req = requests.get(url, stream=True)
tar = tarfile.open(mode="r:*", fileobj=StringIO(req.raw.read()))
tmpdir = tempfile.mkdtemp()
tar.extractall(path=tmpdir)
install_path = os.path.join(tmpdir, "%s-%s" % (plugin['repo_name'], plugin['branch']))
exe = subprocess.Popen([sys.executable, 'setup.py', 'install'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=install_path)
out, err = exe.communicate()
if exe.returncode > 0:
outf = open(os.path.join(tmpdir, "install.log"), 'w')
outf.write(out)
outf.close()
errf = open(os.path.join(tmpdir, "install.err"), 'w')
errf.write(err)
errf.close()
print "Plugin installation failed. You should look at install.log and install.err in the installation folder, %s, for the cause of the failure" % tmpdir
else:
print "%s plugin installed successfully, removing temp dir %s" % (args.name, tmpdir)
shutil.rmtree(tmpdir)
0
Example 103
Project: hydroshare Source File: test_scimeta_swat.py
def test_put_scimeta_swat_model_instance(self):
# Update science metadata XML
title_1 = 'Flat River SWAT Instance'
title_2 = 'Cannon river'
abstract_text_1 = 'This model is created for Flat River.'
abstract_text_2 = ('This is a test to the SWAT Model Instance resource. '
'All the data had been obtained from real share SWAT '
'model from SWATShare https://mygeohub.org/groups/water-hub/swatshare. '
'Some of the metadata entries are assumed just used '
'to test the resource implementation')
kwords_1 = ('SWAT2009', 'FlatRIver')
kwords_2 = ('Cannon River', 'SWAT', 'SWATShare')
model_output_1 = 'No'
model_output_2 = 'Yes'
model_prog_name_1 = 'Unspecified'
model_prog_name_2 = self.title_prog
model_prog_id_1 = 'None'
model_prog_id_2 = self.pid_prog
tmp_dir = tempfile.mkdtemp()
res = resource.create_resource('SWATModelInstanceResource',
self.user,
'Test SWAT Model Instance Resource')
pid = res.short_id
self.resources_to_delete.append(pid)
try:
# Apply metadata from saved file
# First update the resource ID so that it matches the ID of the
# newly created resource.
scimeta = etree.parse('hs_core/tests/data/swat-resourcemetadata-1.xml')
self.updateScimetaResourceID(scimeta, pid)
# Write out to a file
out = etree.tostring(scimeta, pretty_print=True)
sci_meta_new = os.path.join(tmp_dir, self.RESOURCE_METADATA)
with open(sci_meta_new, 'w') as f:
f.writelines(out)
# Send updated metadata to REST API
self.updateScimeta(pid, sci_meta_new)
# Get science metadata
response = self.getScienceMetadata(pid, exhaust_stream=False)
sci_meta_updated = os.path.join(tmp_dir, self.RESOURCE_METADATA_UPDATED)
with open(sci_meta_updated, 'w') as f:
for l in response.streaming_content:
f.write(l)
scimeta = etree.parse(sci_meta_updated)
abstract = self.getAbstract(scimeta)
self.assertEqual(abstract, abstract_text_1)
title = self.getTitle(scimeta)
self.assertEqual(title, title_1)
keywords = self.getKeywords(scimeta)
kw_comp = zip(kwords_1, keywords)
for k in kw_comp:
self.assertEqual(k[0], k[1])
model_output = scimeta.xpath(self.MOD_OUT_PATH,
namespaces=self.NS)
self.assertEqual(len(model_output), 1)
self.assertEqual(model_output_1, model_output[0].text)
prog_name = scimeta.xpath(self.EXECUTED_BY_NAME_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_name), 1)
self.assertEqual(model_prog_name_1, prog_name[0].text)
prog_id = scimeta.xpath(self.EXECUTED_BY_ID_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_id), 1)
self.assertEqual(model_prog_id_1, prog_id[0].text)
# Make sure metadata update is idempotent
self.updateScimeta(pid, sci_meta_new)
# Get science metadata
response = self.getScienceMetadata(pid, exhaust_stream=False)
sci_meta_updated = os.path.join(tmp_dir, self.RESOURCE_METADATA_UPDATED)
with open(sci_meta_updated, 'w') as f:
for l in response.streaming_content:
f.write(l)
scimeta = etree.parse(sci_meta_updated)
abstract = self.getAbstract(scimeta)
self.assertEqual(abstract, abstract_text_1)
title = self.getTitle(scimeta)
self.assertEqual(title, title_1)
keywords = self.getKeywords(scimeta)
kw_comp = zip(kwords_1, keywords)
for k in kw_comp:
self.assertEqual(k[0], k[1])
model_output = scimeta.xpath(self.MOD_OUT_PATH,
namespaces=self.NS)
self.assertEqual(len(model_output), 1)
self.assertEqual(model_output_1, model_output[0].text)
prog_name = scimeta.xpath(self.EXECUTED_BY_NAME_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_name), 1)
self.assertEqual(model_prog_name_1, prog_name[0].text)
prog_id = scimeta.xpath(self.EXECUTED_BY_ID_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_id), 1)
self.assertEqual(model_prog_id_1, prog_id[0].text)
# Overwrite metadata with other resource metadata
# First update the resource ID so that it matches the ID of the
# newly created resource.
scimeta = etree.parse('hs_core/tests/data/swat-resourcemetadata-2.xml')
self.updateScimetaResourceID(scimeta, pid)
self.updateExecutedBy(scimeta, model_prog_name_2, model_prog_id_2)
# Write out to a file
out = etree.tostring(scimeta, pretty_print=True)
sci_meta_new = os.path.join(tmp_dir, self.RESOURCE_METADATA)
with open(sci_meta_new, 'w') as f:
f.writelines(out)
# Send updated metadata to REST API
self.updateScimeta(pid, sci_meta_new)
# Get science metadata
response = self.getScienceMetadata(pid, exhaust_stream=False)
sci_meta_updated = os.path.join(tmp_dir, self.RESOURCE_METADATA_UPDATED)
with open(sci_meta_updated, 'w') as f:
for l in response.streaming_content:
f.write(l)
scimeta = etree.parse(sci_meta_updated)
abstract = self.getAbstract(scimeta)
self.assertEqual(abstract, abstract_text_2)
title = self.getTitle(scimeta)
self.assertEqual(title, title_2)
keywords = self.getKeywords(scimeta)
kw_comp = zip(kwords_2, keywords)
for k in kw_comp:
self.assertEqual(k[0], k[1])
model_output = scimeta.xpath(self.MOD_OUT_PATH,
namespaces=self.NS)
self.assertEqual(len(model_output), 1)
self.assertEqual(model_output_2, model_output[0].text)
prog_name = scimeta.xpath(self.EXECUTED_BY_NAME_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_name), 1)
self.assertEqual(model_prog_name_2, prog_name[0].text)
prog_id = scimeta.xpath(self.EXECUTED_BY_ID_PATH,
namespaces=self.NS)
self.assertEqual(len(prog_id), 1)
prog_id_2 = prog_id[0].text.strip('/').rpartition('/')[-1]
self.assertEqual(model_prog_id_2, prog_id_2)
finally:
shutil.rmtree(tmp_dir)
0
Example 104
Project: luci-py Source File: bot_main.py
def run_manifest(botobj, manifest, start):
"""Defers to task_runner.py.
Return True if the task succeeded.
"""
# Ensure the manifest is valid. This can throw a json decoding error. Also
# raise if it is empty.
if not manifest:
raise ValueError('Empty manifest')
# Necessary to signal an internal_failure. This occurs when task_runner fails
# to execute the command. It is important to note that this data is extracted
# before any I/O is done, like writting the manifest to disk.
task_id = manifest['task_id']
hard_timeout = manifest['hard_timeout'] or None
# Default the grace period to 30s here, this doesn't affect the grace period
# for the actual task.
grace_period = manifest['grace_period'] or 30
if manifest['hard_timeout']:
# One for the child process, one for run_isolated, one for task_runner.
hard_timeout += 3 * manifest['grace_period']
# For isolated task, download time is not counted for hard timeout so add
# more time.
if not manifest['command']:
hard_timeout += manifest['io_timeout'] or 600
# Get the server info to pass to the task runner so it can provide updates.
url = botobj.server
is_grpc = botobj.remote.is_grpc()
if not is_grpc and 'host' in manifest:
# The URL in the manifest includes the version - eg not https://chromium-
# swarm-dev.appspot.com, but https://<some-version>-dot-chromiium-swarm-
# dev.appspot.com. That way, if a new server version becomes the default,
# old bots will continue to work with a server version that can manipulate
# the old data (the new server will only ever have to read it, which is
# much simpler) while new bots won't accidentally contact an old server
# which the GAE engine hasn't gotten around to updating yet.
#
# With a gRPC proxy, we could theoretically run into the same problem
# if we change the meaning of some data without changing the protos.
# However, if we *do* change the protos, we already need to make the
# change in a few steps:
# 1. Modify the Swarming server to accept the new data
# 2. Modify the protos and the proxy to accept the new data
# in gRPC calls and translate it to "native" Swarming calls.
# 3. Update the bots to transmit the new protos.
# Throughout all this, the proto format itself irons out minor differences
# and additions. But because we deploy in three steps, the odds of a
# newer bot contacting an older server is very low.
#
# None of this applies if we don't actually update the protos but just
# change the semantics. If this becomes a significant problem, we could
# start transmitting the expected server version using gRPC metadata.
# - aludwin, Nov 2016
url = manifest['host']
task_dimensions = manifest['dimensions']
task_result = {}
failure = False
internal_failure = False
msg = None
auth_params_dumper = None
# Use 'w' instead of 'work' because path length is precious on Windows.
work_dir = os.path.join(botobj.base_dir, 'w')
try:
try:
if os.path.isdir(work_dir):
file_path.rmtree(work_dir)
except OSError:
# If a previous task created an undeleteable file/directory inside 'w',
# make sure that following tasks are not affected. This is done by working
# around the undeleteable directory by creating a temporary directory
# instead. This is not normal behavior. The bot will report a failure on
# start.
work_dir = tempfile.mkdtemp(dir=botobj.base_dir, prefix='w')
else:
os.makedirs(work_dir)
env = os.environ.copy()
# Windows in particular does not tolerate unicode strings in environment
# variables.
env['SWARMING_TASK_ID'] = task_id.encode('ascii')
env['SWARMING_SERVER'] = botobj.server.encode('ascii')
task_in_file = os.path.join(work_dir, 'task_runner_in.json')
with open(task_in_file, 'wb') as f:
f.write(json.dumps(manifest))
handle, bot_file = tempfile.mkstemp(
prefix='bot_file', suffix='.json', dir=work_dir)
os.close(handle)
call_hook(botobj, 'on_before_task', bot_file)
task_result_file = os.path.join(work_dir, 'task_runner_out.json')
if os.path.exists(task_result_file):
os.remove(task_result_file)
# Start a thread that periodically puts authentication headers and other
# authentication related information to a file on disk. task_runner reads it
# from there before making authenticated HTTP calls.
auth_params_file = os.path.join(work_dir, 'bot_auth_params.json')
if botobj.remote.uses_auth:
auth_params_dumper = file_refresher.FileRefresherThread(
auth_params_file,
lambda: bot_auth.prepare_auth_params_json(botobj, manifest))
auth_params_dumper.start()
command = [
sys.executable, THIS_FILE, 'task_runner',
'--swarming-server', url,
'--in-file', task_in_file,
'--out-file', task_result_file,
'--cost-usd-hour', str(botobj.state.get('cost_usd_hour') or 0.),
# Include the time taken to poll the task in the cost.
'--start', str(start),
'--min-free-space', str(get_min_free_space(botobj)),
'--bot-file', bot_file,
]
if botobj.remote.uses_auth:
command.extend(['--auth-params-file', auth_params_file])
if is_grpc:
command.append('--is-grpc')
logging.debug('Running command: %s', command)
# Put the output file into the current working directory, which should be
# the one containing swarming_bot.zip.
log_path = os.path.join(botobj.base_dir, 'logs', 'task_runner_stdout.log')
os_utilities.roll_log(log_path)
os_utilities.trim_rolled_log(log_path)
with open(log_path, 'a+b') as f:
proc = subprocess42.Popen(
command,
detached=True,
cwd=botobj.base_dir,
env=env,
stdin=subprocess42.PIPE,
stdout=f,
stderr=subprocess42.STDOUT,
close_fds=sys.platform != 'win32')
try:
proc.wait(hard_timeout)
except subprocess42.TimeoutExpired:
# That's the last ditch effort; as task_runner should have completed a
# while ago and had enforced the timeout itself (or run_isolated for
# hard_timeout for isolated task).
logging.error('Sending SIGTERM to task_runner')
proc.terminate()
internal_failure = True
msg = 'task_runner hung'
try:
proc.wait(grace_period)
except subprocess42.TimeoutExpired:
logging.error('Sending SIGKILL to task_runner')
proc.kill()
proc.wait()
return False
logging.info('task_runner exit: %d', proc.returncode)
if os.path.exists(task_result_file):
with open(task_result_file, 'rb') as fd:
task_result = json.load(fd)
if proc.returncode:
msg = 'Execution failed: internal error (%d).' % proc.returncode
internal_failure = True
elif not task_result:
logging.warning('task_runner failed to write metadata')
msg = 'Execution failed: internal error (no metadata).'
internal_failure = True
elif task_result[u'must_signal_internal_failure']:
msg = (
'Execution failed: %s' % task_result[u'must_signal_internal_failure'])
internal_failure = True
failure = bool(task_result.get('exit_code')) if task_result else False
return not internal_failure and not failure
except Exception as e:
# Failures include IOError when writing if the disk is full, OSError if
# swarming_bot.zip doesn't exist anymore, etc.
logging.exception('run_manifest failed')
msg = 'Internal exception occured: %s\n%s' % (
e, traceback.format_exc()[-2048:])
internal_failure = True
finally:
if auth_params_dumper:
auth_params_dumper.stop()
if internal_failure:
post_error_task(botobj, msg, task_id)
call_hook(
botobj, 'on_after_task', failure, internal_failure, task_dimensions,
task_result)
if os.path.isdir(work_dir):
try:
file_path.rmtree(work_dir)
except Exception as e:
botobj.post_error(
'Failed to delete work directory %s: %s' % (work_dir, e))
0
Example 105
Project: anchore Source File: analyzer.py
def run_analyzers(self, image):
success = True
analyzers = self.list_analyzers()
imagename = image.meta['imagename']
outputdir = image.anchore_imagedir
shortid = image.meta['shortId']
imagedir = None
analyzer_status = self.anchoreDB.load_analyzer_manifest(image.meta['imageId'])
results = {}
outputdirs = {}
torun = list()
skip = False
for atype in ['user_scripts_dir', 'extra_scripts_dir', 'base']:
for script in analyzers[atype]:
try:
with open(script, 'r') as FH:
csum = hashlib.md5(FH.read()).hexdigest()
except:
csum = "N/A"
# decide whether or not to run the analyzer
dorun = True
if self.force:
dorun = True
elif script in analyzer_status:
if csum == analyzer_status[script]['csum'] and analyzer_status[script]['returncode'] == 0:
dorun = False
outputdir = cmdstr = outstr = ""
if dorun:
if not skip:
if not imagedir:
self._logger.info(image.meta['shortId'] + ": analyzing ...")
imagedir = image.unpack()
outputdir = tempfile.mkdtemp(dir=imagedir)
cmdline = ' '.join([imagename, self.config['image_data_store'], outputdir, imagedir])
cmdstr = script + " " + cmdline
cmd = cmdstr.split()
try:
self._logger.debug("running analyzer: " + cmdstr)
timer = time.time()
outstr = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self._logger.debug("analyzer time (seconds): " + str(time.time() - timer))
rc = 0
self._logger.debug("analyzer status: success")
self._logger.debug("analyzer exitcode: " + str(rc))
self._logger.debug("analyzer output: " + outstr)
except subprocess.CalledProcessError as err:
rc = err.returncode
outstr = err.output
outstr = outstr.decode('utf8')
if rc:
status = 'FAILED'
skip = True
success = False
self._logger.error("analyzer status: failed")
self._logger.error("analyzer exitcode: " + str(rc))
self._logger.error("analyzer output: " + outstr)
else:
status = 'SUCCESS'
else:
# this means that a prior analyzer failed, so we skip the rest
self._logger.debug("skipping analyzer (due to prior analyzer failure): " + script)
outstr = ""
rc = 1
status = 'SKIPPED'
mtype = "base"
if atype == 'user_scripts_dir':
mtype = 'user'
elif atype == 'extra_scripts_dir':
mtype = 'extra'
results[script] = {}
results[script]['command'] = cmdstr
results[script]['returncode'] = rc
results[script]['output'] = outstr
results[script]['outputdir'] = outputdir
results[script]['atype'] = atype
results[script]['csum'] = csum
results[script]['timestamp'] = time.time()
results[script]['status'] = status
if os.path.exists(os.path.join(outputdir, 'analyzer_output')):
for d in os.listdir(os.path.join(outputdir, 'analyzer_output')):
if os.path.exists(os.path.join(outputdir, 'analyzer_output', d)):
for dd in os.listdir(os.path.join(outputdir, 'analyzer_output', d)):
module_name = d
module_value = dd
if 'analyzer_outputs' not in results[script]:
#results[script]['analyzer_outputs'] = {}
results[script]['analyzer_outputs'] = list()
aoutput = {'module_name':module_name, 'module_value':module_value, 'module_type':mtype}
if os.path.isdir(os.path.join(outputdir, 'analyzer_output', d, dd)):
aoutput['data_type'] = 'dir'
else:
aoutput['data_type'] = 'file'
results[script]['analyzer_outputs'].append(aoutput)
analyzer_status[script] = {}
analyzer_status[script].update(results[script])
else:
self._logger.debug("skipping analyzer (no change in analyzer/config and prior run succeeded): " + script)
# process and store analyzer outputs
didsave = False
for script in results.keys():
result = results[script]
if result['status'] == 'SUCCESS':
mtype = None
if result['atype'] == 'user_scripts_dir':
mtype = 'user'
elif result['atype'] == 'extra_scripts_dir':
mtype = 'extra'
if os.path.exists(os.path.join(result['outputdir'], 'analyzer_output')):
for d in os.listdir(os.path.join(result['outputdir'], 'analyzer_output')):
if os.path.exists(os.path.join(result['outputdir'], 'analyzer_output', d)):
for dd in os.listdir(os.path.join(result['outputdir'], 'analyzer_output', d)):
dfile = os.path.join(result['outputdir'], 'analyzer_output', d, dd)
module_name = d
module_value = dd
if os.path.isfile(dfile):
adata = anchore_utils.read_kvfile_todict(dfile)
self.anchoreDB.save_analysis_output(image.meta['imageId'], module_name, module_value, adata, module_type=mtype)
didsave = True
elif os.path.isdir(dfile):
self.anchoreDB.save_analysis_output(image.meta['imageId'], module_name, module_value, dfile, module_type=mtype, directory_data=True)
didsave = True
self.anchoreDB.save_analyzer_manifest(image.meta['imageId'], analyzer_status)
if success:
self._logger.debug("analyzer commands all finished with successful exit codes")
if didsave:
self._logger.debug("generating analysis report from analyzer outputs and saving")
report = self.generate_analysis_report(image)
self.anchoreDB.save_analysis_report(image.meta['imageId'], report)
self._logger.debug("saving image information with updated analysis data")
image.save_image()
self._logger.info(image.meta['shortId'] + ": analyzed.")
self._logger.debug("running analyzers on image: " + str(image.meta['imagename']) + ": end")
return(success)
0
Example 106
Project: pyxer Source File: command.py
def command(engine = None):
parser = OptParser(
# "usage: %prog [options] command",
"Usage: pyxer [options] command",
description = _description,
version = VERSION_STR,
# epilog="Neu\n\r\n" + 20*"hallo ",
)
parser.add_option(
"-q",
"--quiet",
action = "store_false",
dest = "verbose",
default = True,
help = "Do not print status messages to stdout")
#parser.add_option(
# "-f",
# "--force",
# action="store_false",
# dest="force",
# default=True,
# help="don't print status messages to stdout")
parser.add_option(
"-d",
"--debug",
action = "store_true",
dest = "debug",
default = False,
help = "Activate debug logging")
if not engine:
parser.add_option(
"--engine",
dest = "engine",
default = "",
help = "Engine that will be used: gae (default), wsgi, paster")
parser.add_option(
"--port",
dest = "port",
default = "8080",
help = "serving on port")
parser.add_option(
"--host",
dest = "host",
default = "0.0.0.0",
help = "Serving on host")
parser.add_option(
"-r",
"--reload",
dest = "reload",
action = "store_true",
help = "Reload on changing files")
#parser.add_option(
# "-u",
# "--update",
# dest = "update",
# action = "store_true",
# help = "update suplementary data and files")
parser.add_option(
"-U",
"--develop",
dest = "develop",
action = "store_true",
help = "Update projects Pyxer version")
parser.add_option(
"-c",
"--clear",
dest = "clear",
action = "store_true",
help = "Empty local GAE datastore")
parser.add_option(
"-f",
"--force",
dest = "force",
action = "store_true",
help = "Force updates; overwrites pyxer-app.py")
(opt, args) = parser.parse_args()
showlog(opt.debug)
#config_default = {
# "pyxer.debug": (cBOOL, False),
# "pyxer.sessions": (cBOOL, False),
# "pyxer.engine": (cSTRING, ""),
# "pyxer.templating": (cSTRING, ""),
# "pyxer.host": (cSTRING, "127.0.0.1"),
# "pyxer.port": (cINT, 8080, 0, 65536),
# }
if (len(args) < 1) or (len(args) > 2):
log.debug("Minimum 1 argument, maximum 2")
parser.print_help()
# parser.error("incorrect number of arguments")
sys.exit(1)
command = args[0].lower()
# Directory argument
if len(args) == 2:
here = os.path.abspath(args[1])
else:
here = os.getcwd()
# Get engine
if engine:
opt.engine = engine
log.debug("Command %r for engine %r in directory %r", command, engine, here)
if opt.engine in ("paster", "paste", "p"):
print "Paster"
opt.engine = "paster"
import pyxer.paster as engine
elif opt.engine in ("wsgi", "w"):
print "Python WSGI"
engine = None
else:
print "Google AppEngine"
opt.engine = "gae"
import pyxer.gae as engine
# Update version
if opt.develop and command not in ("setup", "create", "init", "pyxer"):
import pyxer.create
pyxer.create.self_setup(opt)
# Serve
if command == "serve":
if engine:
engine.serve(opt)
else:
if opt.debug:
logging.basicConfig(level = logging.DEBUG)
import pyxer.app
pyxer.app.serve(opt)
# Setup
elif (command in ("setup", "create", "init")):
import pyxer.create
pyxer.create.create(opt, here)
# Install
elif (command in ("install")):
if len(args)==2:
install_package(os.getcwd(), args[1])
# ZIP Install
elif (command in ("zipinstall")):
if len(args)==2:
install_package(os.getcwd(), args[1], zip=True)
# Activate
# elif (command in ("open", "activate", "vm")):
#
# root = find_root()
# if not root:
# print "No project found"
# elif iswin:
# # call_subprocess([os.path.join(root, "scripts", "activate.bat")])
# system("start " + os.path.join(root, "scripts", "activate.bat"))
# else:
# print "IMPORTANT! Leave VM with command 'exit'."
# call_subprocess(["bash", "--init-file", os.path.join(root, "bin", "activate")], raise_on_returncode = False)
# Deactivate
# elif (command == "close" or command == "deactivate"):
#
# root = find_root()
# if not root:
# print "No project found"
# elif iswin:
# system(os.path.join(root, "scripts", "deactivate.bat"))
# else:
# pass
# Daemon
elif command == "start" and opt.engine == "paster":
engine.serve(opt, daemon = "start")
elif command == "stop" and opt.engine == "paster":
engine.serve(opt, daemon = "stop")
elif command == "status" and opt.engine == "paster":
engine.serve(opt, daemon = "status")
elif (command in ("reload", "restart")) and opt.engine == "paster":
engine.serve(opt, daemon = "restart")
# GAE Upload
elif (command in ("upload", "deploy", "push")) and opt.engine == "gae":
engine.upload(opt)
# GAE empty
elif (command in ("push_empty")) and opt.engine == "gae":
if len(args)==2:
name = args[1]
import tempfile
tmpdir = tempfile.mkdtemp()
print "Empty project", name , "created at", tmpdir
tmpfle = os.path.join(tmpdir, 'app.yaml')
open(tmpfle, 'w').write("""
application: %s
version: 0
runtime: python
api_version: 1
handlers:
- url: /
static_dir: empty
""".strip() % name)
engine.upload(opt, root=tmpdir)
os.remove(tmpfle)
os.rmdir(tmpdir)
print
print "ATTENTION: Go to GAE dasboard/versions and switch to version '0' to turn off your project"
else:
print '*** Project name needed as last argument'
# GAE fix
#elif (command == "fix" or command == "fixup") and opt.engine == "gae":
# engine.fix()
# Setup Pyxer
elif command in ("pyxer", "update", "up"):
import pyxer.create
pyxer.create.self_setup(opt)
else:
parser.print_help()
sys.exit(1)
0
Example 107
Project: mysql-utilities Source File: serverclone.py
def clone_server(conn_val, options):
"""Clone an existing server
This method creates a new instance of a running server using a datadir
set to the new_data parametr, with a port set to new_port, server_id
set to new_id and a root password of root_pass. You can also specify
additional parameters for the mysqld command line as well as turn on
verbosity mode to display more diagnostic information during the clone
process.
The method will build a new base database installation from the .sql
files used to construct a new installation. Once the database is
created, the server will be started.
dest_val[in] a dictionary containing connection information
including:
(user, password, host, port, socket)
options[in] dictionary of options:
new_data[in] An existing path to create the new database and use
as datadir for new instance
(default = None)
new_port[in] Port number for new instance
(default = 3307)
new_id[in] Server_id for new instance
(default = 2)
root_pass[in] Password for root user on new instance (optional)
mysqld_options[in] Additional command line options for mysqld
verbosity[in] Print additional information during operation
(default is 0)
quiet[in] If True, do not print messages.
(default is False)
cmd_file[in] file name to write startup command
start_timeout[in] Number of seconds to wait for server to start
"""
new_data = os.path.abspath(options.get('new_data', None))
new_port = options.get('new_port', '3307')
root_pass = options.get('root_pass', None)
verbosity = options.get('verbosity', 0)
user = options.get('user', 'root')
quiet = options.get('quiet', False)
cmd_file = options.get('cmd_file', None)
start_timeout = int(options.get('start_timeout', 10))
mysqld_options = options.get('mysqld_options', '')
force = options.get('force', False)
quote_char = "'" if os.name == "posix" else '"'
if not check_port_in_use('localhost', int(new_port)):
raise UtilError("Port {0} in use. Please choose an "
"available port.".format(new_port))
# Check if path to database files is greater than MAX_DIR_SIZE char,
if len(new_data) > MAX_DATADIR_SIZE and not force:
raise UtilError("The --new-data path '{0}' is too long "
"(> {1} characters). Please use a smaller one. "
"You can use the --force option to skip this "
"check".format(new_data, MAX_DATADIR_SIZE))
# Clone running server
if conn_val is not None:
# Try to connect to the MySQL database server.
server1_options = {
'conn_info': conn_val,
'role': "source",
}
server1 = Server(server1_options)
server1.connect()
if not quiet:
print "# Cloning the MySQL server running on %s." % \
conn_val["host"]
# Get basedir
rows = server1.exec_query("SHOW VARIABLES LIKE 'basedir'")
if not rows:
raise UtilError("Unable to determine basedir of running server.")
basedir = os.path.normpath(rows[0][1])
# Cloning downed or offline server
else:
basedir = os.path.abspath(options.get("basedir", None))
if not quiet:
print "# Cloning the MySQL server located at %s." % basedir
new_data_deleted = False
# If datadir exists, has data, and user said it was Ok, delete it
if os.path.exists(new_data) and options.get("delete", False) and \
os.listdir(new_data):
new_data_deleted = True
shutil.rmtree(new_data, True)
# Create new data directory if it does not exist
if not os.path.exists(new_data):
if not quiet:
print "# Creating new data directory..."
try:
os.mkdir(new_data)
except OSError as err:
raise UtilError("Unable to create directory '{0}', reason: {1}"
"".format(new_data, err.strerror))
# After create the new data directory, check for free space, so the errors
# regarding invalid or inaccessible path had been dismissed already.
# If not force specified verify and stop if there is not enough free space
if not force and os.path.exists(new_data) and \
estimate_free_space(new_data) < REQ_FREE_SPACE:
# Don't leave empty folders, delete new_data if was previously deleted
if os.path.exists(new_data) and new_data_deleted:
shutil.rmtree(new_data, True)
raise UtilError(LOW_SPACE_ERRR_MSG.format(directory=new_data,
megabytes=REQ_FREE_SPACE))
# Check for warning of using --skip-innodb
mysqld_path = get_tool_path(basedir, "mysqld")
version_str = get_mysqld_version(mysqld_path)
# convert version_str from str tuple to integer tuple if possible
if version_str is not None:
version = tuple([int(digit) for digit in version_str])
else:
version = None
if mysqld_options is not None and ("--skip-innodb" in mysqld_options or
"--innodb" in mysqld_options) and version is not None and \
version >= (5, 7, 5):
print("# WARNING: {0}".format(WARN_OPT_SKIP_INNODB))
if not quiet:
print "# Configuring new instance..."
print "# Locating mysql tools..."
mysqladmin_path = get_tool_path(basedir, "mysqladmin")
mysql_basedir = basedir
if os.path.exists(os.path.join(basedir, "local/mysql/share/")):
mysql_basedir = os.path.join(mysql_basedir, "local/mysql/")
# for source trees
elif os.path.exists(os.path.join(basedir, "/sql/share/english/")):
mysql_basedir = os.path.join(mysql_basedir, "/sql/")
locations = [
("mysqld", mysqld_path),
("mysqladmin", mysqladmin_path),
]
# From 5.7.6 version onwards, bootstrap is done via mysqld with the
# --initialize-insecure option, so no need to get information about the
# sql system tables that need to be loaded.
if version < (5, 7, 6):
system_tables = get_tool_path(basedir, "mysql_system_tables.sql",
False)
system_tables_data = get_tool_path(basedir,
"mysql_system_tables_data.sql",
False)
test_data_timezone = get_tool_path(basedir,
"mysql_test_data_timezone.sql",
False)
help_data = get_tool_path(basedir, "fill_help_tables.sql", False)
locations.extend([("mysql_system_tables.sql", system_tables),
("mysql_system_tables_data.sql", system_tables_data),
("mysql_test_data_timezone.sql", test_data_timezone),
("fill_help_tables.sql", help_data),
])
if verbosity >= 3 and not quiet:
print "# Location of files:"
if cmd_file is not None:
locations.append(("write startup command to", cmd_file))
for location in locations:
print "# % 28s: %s" % location
# Create the new mysql data with mysql_import_db-like process
if not quiet:
print "# Setting up empty database and mysql tables..."
fnull = open(os.devnull, 'w')
# For MySQL versions before 5.7.6, use regular bootstrap procedure.
if version < (5, 7, 6):
# Get bootstrap SQL statements
sql = list()
sql.append("CREATE DATABASE mysql;")
sql.append("USE mysql;")
innodb_disabled = False
if mysqld_options:
innodb_disabled = '--innodb=OFF' in mysqld_options
for sqlfile in [system_tables, system_tables_data, test_data_timezone,
help_data]:
lines = open(sqlfile, 'r').readlines()
# On MySQL 5.7.5, the root@localhost account creation was
# moved from the system_tables_data sql file into the
# mysql_install_db binary. Since we don't use mysql_install_db
# directly we need to create the root user account ourselves.
if (version is not None and version == (5, 7, 5) and
sqlfile == system_tables_data):
lines.extend(_CREATE_ROOT_USER)
for line in lines:
line = line.strip()
# Don't fail when InnoDB is turned off (Bug#16369955)
# (Ugly hack)
if (sqlfile == system_tables and
"SET @sql_mode_orig==@@SES" in line and innodb_disabled):
for line in lines:
if 'SET SESSION sql_mode=@@sql' in line:
break
sql.append(line)
# Bootstap to setup mysql tables
cmd = [
mysqld_path,
"--no-defaults",
"--bootstrap",
"--datadir={0}".format(new_data),
"--basedir={0}".format(os.path.abspath(mysql_basedir)),
]
if verbosity >= 1 and not quiet:
proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=fnull, stderr=fnull)
proc.communicate('\n'.join(sql))
# From 5.7.6 onwards, mysql_install_db has been replaced by mysqld and
# the --initialize option
else:
cmd = [
mysqld_path,
"--no-defaults",
"--initialize-insecure=on",
"--datadir={0}".format(new_data),
"--basedir={0}".format(os.path.abspath(mysql_basedir))
]
if verbosity >= 1 and not quiet:
proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,
stdout=fnull, stderr=fnull)
# Wait for subprocess to finish
res = proc.wait()
# Kill subprocess just in case it didn't finish - Ok if proc doesn't exist
if int(res) != 0:
if os.name == "posix":
try:
os.kill(proc.pid, subprocess.signal.SIGTERM)
except OSError as error:
if not error.strerror.startswith("No such process"):
raise UtilError("Failed to kill process with pid '{0}'"
"".format(proc.pid))
else:
ret_code = subprocess.call("taskkill /F /T /PID "
"{0}".format(proc.pid), shell=True)
# return code 0 means it was successful and 128 means it tried
# to kill a process that doesn't exist
if ret_code not in (0, 128):
raise UtilError("Failed to kill process with pid '{0}'. "
"Return code {1}".format(proc.pid,
ret_code))
# Drop the bootstrap file
if os.path.isfile("bootstrap.sql"):
os.unlink("bootstrap.sql")
# Start the instance
if not quiet:
print "# Starting new instance of the server..."
# If the user is not the same as the user running the script...
# and this is a Posix system... and we are running as root
if user_change_as_root(options):
subprocess.call(['chown', '-R', user, new_data])
subprocess.call(['chgrp', '-R', user, new_data])
socket_path = os.path.join(new_data, 'mysql.sock')
# If socket path is too long, use mkdtemp to create a tmp dir and
# use it instead to store the socket
if os.name == 'posix' and len(socket_path) > MAX_SOCKET_PATH_SIZE:
socket_path = os.path.join(tempfile.mkdtemp(), 'mysql.sock')
if not quiet:
print("# WARNING: The socket file path '{0}' is too long (>{1}), "
"using '{2}' instead".format(
os.path.join(new_data, 'mysql.sock'),
MAX_SOCKET_PATH_SIZE, socket_path))
cmd = {
'datadir': '--datadir={0}'.format(new_data),
'tmpdir': '--tmpdir={0}'.format(new_data),
'pid-file': '--pid-file={0}'.format(
os.path.join(new_data, "clone.pid")),
'port': '--port={0}'.format(new_port),
'server': '--server-id={0}'.format(options.get('new_id', 2)),
'basedir': '--basedir={0}'.format(mysql_basedir),
'socket': '--socket={0}'.format(socket_path),
}
if user:
cmd.update({'user': '--user={0}'.format(user)})
if mysqld_options:
if isinstance(mysqld_options, (list, tuple)):
cmd.update(dict(zip(mysqld_options, mysqld_options)))
else:
new_opts = mysqld_options.strip(" ")
# Drop the --mysqld=
if new_opts.startswith("--mysqld="):
new_opts = new_opts[9:]
if new_opts.startswith('"') and new_opts.endswith('"'):
list_ = shlex.split(new_opts.strip('"'))
cmd.update(dict(zip(list_, list_)))
elif new_opts.startswith("'") and new_opts.endswith("'"):
list_ = shlex.split(new_opts.strip("'"))
cmd.update(dict(zip(list_, list_)))
# Special case where there is only 1 option
elif len(new_opts.split("--")) == 1:
cmd.update({mysqld_options: mysqld_options})
else:
list_ = shlex.split(new_opts)
cmd.update(dict(zip(list_, list_)))
# set of options that must be surrounded with quotes
options_to_quote = set(["datadir", "tmpdir", "basedir", "socket",
"pid-file"])
# Strip spaces from each option
for key in cmd:
cmd[key] = cmd[key].strip(' ')
# Write startup command if specified
if cmd_file is not None:
if verbosity >= 0 and not quiet:
print "# Writing startup command to file."
cfile = open(cmd_file, 'w')
comment = " Startup command generated by mysqlserverclone.\n"
if os.name == 'posix' and cmd_file.endswith('.sh'):
cfile.write("#!/bin/sh\n")
cfile.write("#{0}".format(comment))
elif os.name == 'nt' and cmd_file.endswith('.bat'):
cfile.write("REM{0}".format(comment))
else:
cfile.write("#{0}".format(comment))
start_cmd_lst = ["{0}{1}{0} --no-defaults".format(quote_char,
mysqld_path)]
# build start command
for key, val in cmd.iteritems():
if key in options_to_quote:
val = "{0}{1}{0}".format(quote_char, val)
start_cmd_lst.append(val)
cfile.write("{0}\n".format(" ".join(start_cmd_lst)))
cfile.close()
if os.name == "nt" and verbosity >= 1:
cmd.update({"console": "--console"})
start_cmd_lst = [mysqld_path, "--no-defaults"]
sorted_keys = sorted(cmd.keys())
start_cmd_lst.extend([cmd[val] for val in sorted_keys])
if verbosity >= 1 and not quiet:
if verbosity >= 2:
print("# Startup command for new server:\n"
"{0}".format(" ".join(start_cmd_lst)))
proc = subprocess.Popen(start_cmd_lst, shell=False)
else:
proc = subprocess.Popen(start_cmd_lst, shell=False, stdout=fnull,
stderr=fnull)
# Try to connect to the new MySQL instance
if not quiet:
print "# Testing connection to new instance..."
new_sock = None
if os.name == "posix":
new_sock = socket_path
port_int = int(new_port)
conn = {
"user": "root",
"passwd": "",
"host": conn_val["host"] if conn_val is not None else "localhost",
"port": port_int,
"unix_socket": new_sock
}
server2_options = {
'conn_info': conn,
'role': "clone",
}
server2 = Server(server2_options)
i = 0
while i < start_timeout:
i += 1
time.sleep(1)
try:
server2.connect()
i = start_timeout + 1
except:
pass
finally:
if verbosity >= 1 and not quiet:
print "# trying again..."
if i == start_timeout:
raise UtilError("Unable to communicate with new instance. "
"Process id = {0}.".format(proc.pid))
elif not quiet:
print "# Success!"
# Set the root password
if root_pass:
if not quiet:
print "# Setting the root password..."
cmd = [mysqladmin_path, '--no-defaults', '-v', '-uroot']
if os.name == "posix":
cmd.append("--socket={0}".format(new_sock))
else:
cmd.append("--port={0}".format(int(new_port)))
cmd.extend(["password", root_pass])
if verbosity > 0 and not quiet:
proc = subprocess.Popen(cmd, shell=False)
else:
proc = subprocess.Popen(cmd, shell=False,
stdout=fnull, stderr=fnull)
# Wait for subprocess to finish
res = proc.wait()
if not quiet:
conn_str = "# Connection Information:\n"
conn_str += "# -uroot"
if root_pass:
conn_str += " -p%s" % root_pass
if os.name == "posix":
conn_str += " --socket=%s" % new_sock
else:
conn_str += " --port=%s" % new_port
print conn_str
print "#...done."
fnull.close()
0
Example 108
Project: ganga Source File: feedback_report.py
def report(job=None):
""" Upload error reports (snapshot of configuration,job parameters, input/output files, command history etc.). Job argument is optional. """
import mimetypes
import urllib
import urllib2
import httplib
import string
import random
import sys
import os
import platform
import Ganga.GPIDev.Lib.Config.config as config
from Ganga.GPIDev.Base.VPrinter import full_print
import Ganga
# global variables that will print sumamry report to the user along with
# the download link
global JOB_REPORT, GANGA_VERSION, BACKEND_NAME, APPLICATION_NAME, PYTHON_PATH
JOB_REPORT = False
GANGA_VERSION = ''
BACKEND_NAME = ''
APPLICATION_NAME = ''
PYTHON_PATH = ''
def random_string(length):
return ''.join([random.choice(string.letters) for ii in range(length + 1)])
def encode_multipart_formdata(files):
boundary = random_string(30)
retnl = '\r\n'
lines = []
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
fields = {'title': 'Ganga Error Report'}
for (key, value) in fields.iteritems():
lines.append('--' + boundary)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for field_name, file in files.iteritems():
lines.append('--' + boundary)
lines.append(
'Content-Disposition: form-data; name="file"; filename="%s"' % (file))
lines.append('Content-Type: %s' % get_content_type(file))
lines.append('')
lines.append(open(file, 'rb').read())
lines.append('--' + boundary + '--')
lines.append('')
body = retnl.join(lines)
headers = {'content-type': 'multipart/form-data; boundary=%s' %
boundary, 'content-length': str(len(body))}
return body, headers
def make_upload_file(server):
def upload_file(path):
# print 'Uploading %r to %r' % (path, server)
data = {'MAX_FILE_SIZE': '3145728',
'sub': '',
'mode': 'regist'}
files = {'file': path}
send_post(server, files)
return upload_file
def send_post(url, files):
logger.debug("Sending Post to %s , containing %s" % (url, files))
encoded_data = encode_multipart_formdata(files)
data = urllib.urlencode(encoded_data[1])
req = urllib2.Request(url, data=data)
if req.has_data():
logger.debug("urllib2: Success!")
else:
logger.debug("urllib2: Fail!!!")
connection = httplib.HTTPConnection(req.get_host())
# connection.set_debuglevel(1)
logger.debug("Requesting: 'POST', %s, %s " % (url, encoded_data[1]))
# connection.request( method='POST', url=req.get_selector(), body=encoded_data[0], headers=encoded_data[1] )
connection.request(
method='POST', url=url, body=encoded_data[0], headers=encoded_data[1])
response = connection.getresponse()
logger.debug("httplib POST request response was: %s , because: %s" % (
response.status, response.reason))
responseResult = response.read()
#logger.debug("Responce.read(): --%s--" % responseResult )
responseResult = responseResult[
responseResult.find("<span id=\"download_path\""):]
startIndex = responseResult.find("path:") + 5
endIndex = responseResult.find("</span>")
logger.debug("Responce.read(): --%s--" %
responseResult[startIndex:endIndex])
logger.info(
'Your error report was uploaded to ganga developers with the following URL. ')
logger.info(
'You may include this URL and the following summary information in your bug report or in the support email to the developers.')
logger.info('')
logger.info('***' + str(responseResult[startIndex:endIndex]) + '***')
logger.info('')
global GANGA_VERSION, JOB_REPORT, APPLICATION_NAME, BACKEND_NAME, PYTHON_PATH
logger.info('Ganga Version : ' + GANGA_VERSION)
logger.info('Python Version : ' + "%s.%s.%s" %
(sys.version_info[0], sys.version_info[1], sys.version_info[2]))
logger.info('Operation System Version : ' + platform.platform())
if JOB_REPORT:
logger.info('Application Name : ' + APPLICATION_NAME)
logger.info('Backend Name : ' + BACKEND_NAME)
logger.info('Python Path : ' + PYTHON_PATH)
logger.info('')
JOB_REPORT = False
GANGA_VERSION = ''
BACKEND_NAME = ''
APPLICATION_NAME = ''
PYTHON_PATH = ''
def run_upload(server, path):
upload_file = make_upload_file(server)
upload_file(path)
def report_inner(job=None, isJob=False, isTask=False):
userInfoDirName = "userreport"
tempDirName = "reportsRepository"
# job relevant info
jobSummaryFileName = "jobsummary.txt"
jobFullPrintFileName = "jobfullprint.txt"
repositoryPath = "repository/$usr/LocalXML/6.0/jobs/$thousandsNumxxx"
# task relevant info
taskSummaryFileName = "tasksummary.txt"
taskFullPrintFileName = "taskfullprint.txt"
tasksRepositoryPath = "repository/$usr/LocalXML/6.0/tasks/$thousandsNumxxx"
# user's info
environFileName = "environ.txt"
userConfigFileName = "userconfig.txt"
defaultConfigFileName = "gangarc.txt"
ipythonHistoryFileName = "ipythonhistory.txt"
gangaLogFileName = "gangalog.txt"
jobsListFileName = "jobslist.txt"
tasksListFileName = "taskslist.txt"
thread_trace_file_name = 'thread_trace.html'
from Ganga.Utility import Config
uploadFileServer = Config.getConfig('Feedback')['uploadServer']
#uploadFileServer= "http://gangamon.cern.ch/django/errorreports/"
#uploadFileServer= "http://ganga-ai-02.cern.ch/django/errorreports/"
#uploadFileServer= "http://127.0.0.1:8000/errorreports"
def printDictionary(dictionary, file=sys.stdout):
for k, v in dictionary.iteritems():
print('%s: %s' % (k, v), file=file)
if k == 'PYTHONPATH':
global PYTHON_PATH
PYTHON_PATH = v
def extractFileObjects(fileName, targetDirectoryName):
try:
fileToRead = open(fileName, 'r')
try:
fileText = fileToRead.read()
import re
pattern = "File\(name=\'(.+?)\'"
matches = re.findall(pattern, fileText)
for fileName in matches:
fileName = os.path.expanduser(fileName)
targetFileName = os.path.join(
targetDirectoryName, os.path.basename(fileName))
shutil.copyfile(fileName, targetFileName)
finally:
fileToRead.close()
# except IOError, OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
def writeErrorLog(errorMessage):
try:
fileToWrite = open(errorLogPath, 'a')
try:
fileToWrite.write(errorMessage)
fileToWrite.write("\n")
except Exception as err:
logger.debug("Err: %s" % err)
raise
finally:
fileToWrite.close()
except Exception as err2:
logger.debug("Err: %s" % err2)
pass
def writeStringToFile(fileName, stringToWrite):
try:
# uncomment this to try the error logger
#fileName = '~/' + fileName
fileToWrite = open(fileName, 'w')
try:
fileToWrite.write(stringToWrite)
except Exception as err:
logger.debug("Err: %s" % err)
raise err
finally:
fileToWrite.close()
# except IOError:
except Exception as err:
logger.debug("Err2: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
def renameDataFiles(directory):
for fileName in os.listdir(directory):
fullFileName = os.path.join(directory, fileName)
if os.path.isfile(fullFileName):
if fileName == 'data':
os.rename(fullFileName, fullFileName + '.txt')
else:
renameDataFiles(fullFileName)
import shutil
import tarfile
import tempfile
import os
userHomeDir = os.getenv("HOME")
tempDir = tempfile.mkdtemp()
errorLogPath = os.path.join(tempDir, 'reportErrorLog.txt')
fullPathTempDir = os.path.join(tempDir, tempDirName)
fullLogDirName = ''
# create temp dir and specific dir for the job/user
try:
if not os.path.exists(fullPathTempDir):
os.mkdir(fullPathTempDir)
import datetime
now = datetime.datetime.now()
userInfoDirName = userInfoDirName + \
now.strftime("%Y-%m-%d-%H:%M:%S")
fullLogDirName = os.path.join(fullPathTempDir, userInfoDirName)
# if report directory exists -> delete it's content(we would like
# last version of the report)
if os.path.exists(fullLogDirName):
shutil.rmtree(fullLogDirName)
os.mkdir(fullLogDirName)
# except OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import os.environ in a file
fullEnvironFileName = os.path.join(fullLogDirName, environFileName)
try:
inputFile = open(fullEnvironFileName, 'w')
try:
printDictionary(os.environ, file=inputFile)
print('OS VERSION : ' + platform.platform(), file=inputFile)
finally:
inputFile.close()
# except IOError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import user config in a file
userConfigFullFileName = os.path.join(
fullLogDirName, userConfigFileName)
try:
inputFile = open(userConfigFullFileName, 'w')
try:
print("#GANGA_VERSION = %s" %
config.System.GANGA_VERSION, file=inputFile)
global GANGA_VERSION
GANGA_VERSION = config.System.GANGA_VERSION
# this gets the default values
# Ganga.GPIDev.Lib.Config.Config.print_config_file()
# this should get the changed values
for c in config:
print(config[c], file=inputFile)
finally:
inputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# write gangarc - default configuration
defaultConfigFullFileName = os.path.join(
fullLogDirName, defaultConfigFileName)
try:
outputFile = open(os.path.join(userHomeDir, '.gangarc'), 'r')
try:
writeStringToFile(defaultConfigFullFileName, outputFile.read())
finally:
outputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import ipython history in a file
try:
ipythonFile = open(
os.path.join(os.environ['IPYTHONDIR'], 'history'), 'r')
try:
lastIPythonCommands = ipythonFile.readlines()[-20:]
writeStringToFile(os.path.join(
fullLogDirName, ipythonHistoryFileName), '\n'.join(lastIPythonCommands))
#writeStringToFile(os.path.join(fullLogDirName, ipythonHistoryFileName), ipythonFile.read())
finally:
ipythonFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import gangalog in a file
userLogFileLocation = config["Logging"]._logfile
userLogFileLocation = os.path.expanduser(userLogFileLocation)
try:
gangaLogFile = open(userLogFileLocation, 'r')
try:
writeStringToFile(
os.path.join(fullLogDirName, gangaLogFileName), gangaLogFile.read())
finally:
gangaLogFile.close()
# except IOError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import the result of jobs command in the report
jobsListFullFileName = os.path.join(fullLogDirName, jobsListFileName)
try:
outputFile = open(jobsListFullFileName, 'w')
try:
from Ganga.Core.GangaRegistry import getRegistryProxy
print(getRegistryProxy('jobs'), file=outputFile)
finally:
outputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import the result of tasks command in the report
tasksListFullFileName = os.path.join(fullLogDirName, tasksListFileName)
try:
outputFile = open(tasksListFullFileName, 'w')
try:
from Ganga.Core.GangaRegistry import getRegistryProxy
print(getRegistryProxy('tasks'), file=outputFile)
finally:
outputFile.close()
# except IOError does not catch the exception ???
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# save it here because we will change fullLogDirName, but we want this
# to be the archive and to be deleted
folderToArchive = fullLogDirName
# import job relevant info
if (job is not None and isJob):
global JOB_REPORT, APPLICATION_NAME, BACKEND_NAME
JOB_REPORT = True
APPLICATION_NAME = getName(job.application)
BACKEND_NAME = getName(job.backend)
# create job folder
jobFolder = 'job_%s' % job.fqid
fullLogDirName = os.path.join(fullLogDirName, jobFolder)
os.mkdir(fullLogDirName)
# import job summary in a file
fullJobSummaryFileName = os.path.join(
fullLogDirName, jobSummaryFileName)
writeStringToFile(fullJobSummaryFileName, job)
# import job full print in a file
fullJobPrintFileName = os.path.join(
fullLogDirName, jobFullPrintFileName)
try:
inputFile = open(fullJobPrintFileName, 'w')
try:
full_print(job, inputFile)
finally:
inputFile.close()
# except IOError, OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# extract file objects
try:
fileObjectsPath = os.path.join(fullLogDirName, 'fileobjects')
os.mkdir(fileObjectsPath)
extractFileObjects(fullJobSummaryFileName, fileObjectsPath)
# except OSError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy dir of the job ->input/output and subjobs
try:
parentDir, currentDir = os.path.split(job.inputdir[:-1])
workspaceDir = os.path.join(fullLogDirName, 'workspace')
shutil.copytree(parentDir, workspaceDir)
# except IOError, OSError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy shared area of the job
try:
if hasattr(job.application, 'is_prepared'):
if job.application.is_prepared is not None and job.application.is_prepared is not True:
import os
from Ganga.Utility.Config import getConfig
from Ganga.Utility.files import expandfilename
shared_path = os.path.join(expandfilename(getConfig(
'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
shareddir = os.path.join(
shared_path, job.application.is_prepared.name)
if os.path.isdir(shareddir):
sharedAreaDir = os.path.join(
fullLogDirName, 'sharedarea')
shutil.copytree(shareddir, sharedAreaDir)
# except IOError, OSError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy repository job file
try:
indexFileName = str(job.id) + '.index'
repositoryPath = repositoryPath.replace(
'$usr', os.getenv("USER"))
# check if the job is subjob -> different way of forming the
# path to the repository
is_subjob = job.fqid.find('.') > -1
if is_subjob:
jobid, subjobid = job.fqid.split(
'.')[0], job.fqid.split('.')[1]
repositoryPath = repositoryPath.replace(
'$thousandsNum', str(int(jobid) / 1000))
repositoryPath = os.path.join(repositoryPath, jobid)
else:
repositoryPath = repositoryPath.replace(
'$thousandsNum', str(job.id / 1000))
repositoryFullPath = os.path.join(
config.Configuration.gangadir, repositoryPath)
indexFileSourcePath = os.path.join(
repositoryFullPath, indexFileName)
repositoryFullPath = os.path.join(
repositoryFullPath, str(job.id))
repositoryTargetPath = os.path.join(
fullLogDirName, 'repository', str(job.id))
os.mkdir(os.path.join(fullLogDirName, 'repository'))
shutil.copytree(repositoryFullPath, repositoryTargetPath)
# data files are copied but can not be opened -> add .txt to
# their file names
renameDataFiles(repositoryTargetPath)
if not is_subjob:
# copy .index file
indexFileTargetPath = os.path.join(
fullLogDirName, 'repository', indexFileName)
shutil.copyfile(indexFileSourcePath, indexFileTargetPath)
# except OSError, IOError:
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# import task relevant info
if (job is not None and isTask):
# job is actually a task object
task = job
# create task folder
taskFolder = 'task_%s' % task.id
fullLogDirName = os.path.join(fullLogDirName, taskFolder)
os.mkdir(fullLogDirName)
# import task summary in a file
fullTaskSummaryFileName = os.path.join(
fullLogDirName, taskSummaryFileName)
writeStringToFile(fullTaskSummaryFileName, str(task))
# import task full print in a file
fullTaskPrintFileName = os.path.join(
fullLogDirName, taskFullPrintFileName)
try:
inputFile = open(fullTaskPrintFileName, 'w')
try:
full_print(task, inputFile)
except Exception as err:
logger.debug("Err: %s" % err)
raise err
finally:
inputFile.close()
# except IOError, OSError:
except Exception as err:
logger.debug("Err2: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy shared area of the task
try:
if len(task.transforms) > 0:
if hasattr(task.transforms[0], 'application') and hasattr(task.transforms[0].application, 'is_prepared'):
if task.transforms[0].application.is_prepared is not None and task.transforms[0].application.is_prepared is not True:
import os
from Ganga.Utility.Config import getConfig
from Ganga.Utility.files import expandfilename
shared_path = os.path.join(expandfilename(getConfig(
'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
shareddir = os.path.join(
shared_path, task.transforms[0].application.is_prepared.name)
if os.path.isdir(shareddir):
sharedAreaDir = os.path.join(
fullLogDirName, 'sharedarea')
shutil.copytree(shareddir, sharedAreaDir)
# except IOError, OSError
except Exception as err:
logger.debug("Err: %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# copy repository task file
try:
indexFileName = str(task.id) + '.index'
tasksRepositoryPath = tasksRepositoryPath.replace(
'$usr', os.getenv("USER"))
tasksRepositoryPath = tasksRepositoryPath.replace(
'$thousandsNum', str(task.id / 1000))
repositoryFullPath = os.path.join(
config.Configuration.gangadir, tasksRepositoryPath)
indexFileSourcePath = os.path.join(
repositoryFullPath, indexFileName)
repositoryFullPath = os.path.join(
repositoryFullPath, str(task.id))
repositoryTargetPath = os.path.join(
fullLogDirName, 'repository', str(task.id))
os.mkdir(os.path.join(fullLogDirName, 'repository'))
shutil.copytree(repositoryFullPath, repositoryTargetPath)
# data files are copied but can not be opened -> add .txt to
# their file names
renameDataFiles(repositoryTargetPath)
# copy .index file
indexFileTargetPath = os.path.join(
fullLogDirName, 'repository', indexFileName)
shutil.copyfile(indexFileSourcePath, indexFileTargetPath)
# except OSError, IOError:
except Exception as err:
logger.debug("Err %s" % err)
writeErrorLog(str(sys.exc_info()[1]))
# Copy thread stack trace file
try:
thread_trace_source_path = os.path.join(getConfig('Configuration')['gangadir'], thread_trace_file_name)
thread_trace_target_path = os.path.join(fullLogDirName, thread_trace_file_name)
shutil.copyfile(thread_trace_source_path, thread_trace_target_path)
except (OSError, IOError) as err:
logger.debug('Err %s', err)
writeErrorLog(str(sys.exc_info()[1]))
resultArchive = '%s.tar.gz' % folderToArchive
try:
resultFile = tarfile.TarFile.open(resultArchive, 'w:gz')
try:
resultFile.add(
folderToArchive, arcname=os.path.basename(folderToArchive))
# put the error log in the archive
if(os.path.exists(errorLogPath)):
resultFile.add(
errorLogPath, arcname=os.path.basename(errorLogPath))
except Exception as err:
logger.debug("Err: %s" % err)
raise
finally:
resultFile.close()
except Exception as err:
logger.debug("Err2: %s" % err)
raise # pass
# remove temp dir
if(os.path.exists(folderToArchive)):
shutil.rmtree(folderToArchive)
# print the error if there is something
if os.path.exists(errorLogPath):
logger.error('')
logger.error('An error occured while collecting report information : ' + open(errorLogPath, 'r').read())
logger.error('')
# delete the errorfile from user's pc
if(os.path.exists(errorLogPath)):
os.remove(errorLogPath)
# return the path to the archive and the path to the upload server
return (resultArchive, uploadFileServer, tempDir)
def removeTempFiles(tempDir):
import shutil
# remove temp dir
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
# remove temp files from django upload-> if the file is bigger than 2.5
# mb django internally stores it in tmp file during the upload
userTempDir = '/tmp/'
for fileName in os.listdir(userTempDir):
if fileName.find('.upload') > -1:
os.remove(os.path.join(userTempDir, fileName))
tempDir = ''
# call the report function
try:
isJob = isTask = False
# make typecheck of the param passed
if job is not None:
from Ganga.GPIDev.Lib.Job.Job import Job
from Ganga.GPIDev.Base.Proxy import stripProxy
isJob = isinstance(stripProxy(job), Job)
if hasattr(stripProxy(job), '_category') and (stripProxy(job)._category == 'tasks'):
isTask = True
if not (isJob or isTask):
logger.error("report() function argument should be reference to a job or task object")
return
resultArchive, uploadFileServer, tempDir = report_inner(
job, isJob, isTask)
report_bytes = os.path.getsize(resultArchive)
if report_bytes > 1024 * 1024 * 100: # if bigger than 100MB
logger.error(
'The report is bigger than 100MB and can not be uploaded')
else:
run_upload(server=uploadFileServer, path=resultArchive)
except Exception as err:
logger.debug("Err: %s" % err)
removeTempFiles(tempDir)
raise # pass
0
Example 109
def __init__(self, streamDef, bookmark=None, saveOutput=False,
isBlocking=True, maxTimeout=0, eofOnTimeout=False):
""" Base class constructor, performs common initialization
Parameters:
----------------------------------------------------------------
streamDef: The stream definition, potentially containing multiple sources
(not supported yet). See
/nupic/frameworks/opf/jsonschema/stream_def.json for the format
of this dict
bookmark: Bookmark to start reading from. This overrides the first_record
field of the streamDef if provided.
saveOutput: If true, save the output to a csv file in a temp directory.
The path to the generated file can be found in the log
output.
isBlocking: should read operation block *forever* if the next row of data
is not available, but the stream is not marked as 'completed'
yet?
maxTimeout: if isBlocking is False, max seconds to wait for more data before
timing out; ignored when isBlocking is True.
eofOnTimeout: If True and we get a read timeout (isBlocking must be False
to get read timeouts), assume we've reached the end of the
input and produce the last aggregated record, if one can be
completed.
"""
# Call superclass constructor
super(StreamReader, self).__init__()
loggerPrefix = 'com.numenta.nupic.data.StreamReader'
self._logger = logging.getLogger(loggerPrefix)
jsonhelpers.validate(streamDef,
schemaPath=pkg_resources.resource_filename(
jsonschema.__name__, "stream_def.json"))
assert len(streamDef['streams']) == 1, "Only 1 source stream is supported"
# Save constructor args
sourceDict = streamDef['streams'][0]
self._recordCount = 0
self._eofOnTimeout = eofOnTimeout
self._logger.debug('Reading stream with the def: %s', sourceDict)
# Dictionary to store record statistics (min and max of scalars for now)
self._stats = None
# ---------------------------------------------------------------------
# Get the stream definition params
# Limiting window of the stream. It would not return any records until
# 'first_record' ID is read (or very first with the ID above that). The
# stream will return EOS once it reads record with ID 'last_record' or
# above (NOTE: the name 'lastRecord' is misleading because it is NOT
# inclusive).
firstRecordIdx = sourceDict.get('first_record', None)
self._sourceLastRecordIdx = sourceDict.get('last_record', None)
# If a bookmark was given, then override first_record from the stream
# definition.
if bookmark is not None:
firstRecordIdx = None
# Column names must be provided in the streamdef json
# Special case is ['*'], meaning all available names from the record stream
self._streamFieldNames = sourceDict.get('columns', None)
if self._streamFieldNames != None and self._streamFieldNames[0] == '*':
self._needFieldsFiltering = False
else:
self._needFieldsFiltering = True
# Types must be specified in streamdef json, or in case of the
# file_recod_stream types could be implicit from the file
streamFieldTypes = sourceDict.get('types', None)
self._logger.debug('Types from the def: %s', streamFieldTypes)
# Validate that all types are valid
if streamFieldTypes is not None:
for dataType in streamFieldTypes:
assert FieldMetaType.isValid(dataType)
# Reset, sequence and time fields might be provided by streamdef json
streamResetFieldName = streamDef.get('resetField', None)
streamTimeFieldName = streamDef.get('timeField', None)
streamSequenceFieldName = streamDef.get('sequenceIdField', None)
self._logger.debug('r, t, s fields: %s, %s, %s', streamResetFieldName,
streamTimeFieldName,
streamSequenceFieldName)
# =======================================================================
# Open up the underlying record store
dataUrl = sourceDict.get('source', None)
assert dataUrl is not None
self._recordStore = self._openStream(dataUrl, isBlocking, maxTimeout,
bookmark, firstRecordIdx)
assert self._recordStore is not None
# =======================================================================
# Prepare the data structures we need for returning just the fields
# the caller wants from each record
recordStoreFields = self._recordStore.getFields()
self._recordStoreFieldNames = self._recordStore.getFieldNames()
if not self._needFieldsFiltering:
self._streamFieldNames = self._recordStoreFieldNames
# Build up the field definitions for each field. This is a list of tuples
# of (name, type, special)
self._streamFields = []
for dstIdx, name in enumerate(self._streamFieldNames):
if name not in self._recordStoreFieldNames:
raise RuntimeError("The column '%s' from the stream definition "
"is not present in the underlying stream which has the following "
"columns: %s" % (name, self._recordStoreFieldNames))
fieldIdx = self._recordStoreFieldNames.index(name)
fieldType = recordStoreFields[fieldIdx].type
fieldSpecial = recordStoreFields[fieldIdx].special
# If the types or specials were defined in the stream definition,
# then override what was found in the record store
if streamFieldTypes is not None:
fieldType = streamFieldTypes[dstIdx]
if streamResetFieldName is not None and streamResetFieldName == name:
fieldSpecial = FieldMetaSpecial.reset
if streamTimeFieldName is not None and streamTimeFieldName == name:
fieldSpecial = FieldMetaSpecial.timestamp
if (streamSequenceFieldName is not None and
streamSequenceFieldName == name):
fieldSpecial = FieldMetaSpecial.sequence
self._streamFields.append(FieldMetaInfo(name, fieldType, fieldSpecial))
# ========================================================================
# Create the aggregator which will handle aggregation of records before
# returning them.
self._aggregator = Aggregator(
aggregationInfo=streamDef.get('aggregation', None),
inputFields=recordStoreFields,
timeFieldName=streamDef.get('timeField', None),
sequenceIdFieldName=streamDef.get('sequenceIdField', None),
resetFieldName=streamDef.get('resetField', None))
# We rely on the aggregator to tell us the bookmark of the last raw input
# that contributed to the aggregated record
self._aggBookmark = None
# Compute the aggregation period in terms of months and seconds
if 'aggregation' in streamDef:
self._aggMonthsAndSeconds = nupic.support.aggregationToMonthsSeconds(
streamDef.get('aggregation'))
else:
self._aggMonthsAndSeconds = None
# ========================================================================
# Are we saving the generated output to a csv?
if saveOutput:
tmpDir = tempfile.mkdtemp()
outFilename = os.path.join(tmpDir, "generated_output.csv")
self._logger.info("StreamReader: Saving generated records to: '%s'" %
outFilename)
self._writer = FileRecordStream(streamID=outFilename,
write=True,
fields=self._streamFields)
else:
self._writer = None
0
Example 110
Project: gnowsys-studio Source File: dump_all_rdf.py
def rdf_all(notation='xml'):
"""
Funtion takes title of node, and rdf notation.
"""
valid_formats = ["xml", "n3", "ntriples", "trix"]
default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
configString = "/var/tmp/rdfstore"
# Get the IOMemory plugin.
store = plugin.get('IOMemory', Store)('rdfstore')
# Open previously created store, or create it if it doesn't exist yet
graph = Graph(store="IOMemory",
identifier = URIRef(default_graph_uri))
path = mkdtemp()
rt = graph.open(path, create=False)
if rt == NO_STORE:
graph.open(path, create=True)
else:
assert rt == VALID_STORE, "The underlying store is corrupt"
# Now we'll add some triples to the graph & commit the changes
graph.bind("gstudio", "http://gnowledge.org/")
exclusion_fields = ["id", "rght", "node_ptr_id", "image", "lft", "_state", "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"]
for node in NID.objects.all():
node_dict=node.ref.__dict__
node_type = node.reftype
try:
if (node_type=='Gbobject'):
node=Gbobject.objects.get(title=node)
rdflib=link(node)
elif (node_type=='None'):
node=Gbobject.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Process'):
node=Gbobject.objects.get(title=node)
rdflib=link(node)
elif (node_type=='System'):
node=Gbobject.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Objecttype'):
node=Objecttype.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Attributetype'):
node=Attributetype.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Complement'):
node=Complement.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Union'):
node=Union.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Intersection'):
node=Intersection.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Expression'):
node=Expression.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Processtype'):
node=Processtype.objects.get(title=node)
rdflib=link(node)
elif (node_type=='Systemtype'):
node=Systemtype.objects.get(title=node)
rdflib=link(node)
elif (node_type=='AttributeSpecification'):
node=AttributeSpecification.objects.get(title=node)
rdflib=link(node)
elif (node_type=='RelationSpecification'):
node=RelationSpecification.objects.get(title=node)
rdflib=link(node)
elif(node_type=='Attribute'):
node=Attribute.objects.get(title=node)
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
elif(node_type=='Relationtype' ):
node=Relationtype.objects.get(title=node)
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
elif(node_type=='Metatype'):
node=Metatype.objects.get(title=node)
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
except:
if(node_type=='Attribute'):
node=Attribute.objects.get(title=node)
rdflib= Namespace('http://sbox.gnowledge.org/gstudio/')
if(node_type=='Relationtype' ):
node=Attribute.objects.get(title=node)
rdflib= Namespace('http://sbox.gnowledge.org/gstudio/')
if(node_type=='Metatype'):
node=Attribute.objects.get(title=node)
rdflib= Namespace('http://sbox.gnowledge.org/gstudio/')
subject=str(node_dict['id'])
for key in node_dict:
if key not in exclusion_fields:
predicate=str(key)
pobject=str(node_dict[predicate])
graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))
rdf_code=graph.serialize(format=notation)
#path to store the rdf in a file
x = os.path.join(os.path.dirname(__file__), 'rdffiles.rdf')
temp_path=str(x)
file = open(temp_path, 'w')
file.write(rdf_code)
file.close()
graph.commit()
print rdf_code
graph.close()
0
Example 111
Project: medpy Source File: metadata.py
def test_MetadataConsistency(self):
"""
This test checks the ability of different image formats to consistently save
meta-data information. Especially if a conversion between formats is required,
that involves different 3rd party modules, this is not always guaranteed.
The images are saved in one format, loaded and then saved in another format.
Subsequently the differences in the meta-data is checked.
Currently this test can only check:
- voxel spacing
- image offset
Note that some other test are inherently performed by the
loadsave.TestIOFacilities class:
- data type
- shape
- content
With the verboose switches, a comprehensive list of the results can be obtianed.
"""
####
# VERBOOSE SETTINGS
# The following are two variables that can be used to print some nicely
# formatted additional output. When one of them is set to True, this unittest
# should be run stand-alone.
####
# Print a list of format to format conversion which preserve meta-data
consistent = True
# Print a list of format to format conversion which do not preserve meta-data
inconsistent = True
# Print a list of formats that failed conversion in general
unsupported = False
####
# OTHER SETTINGS
####
# debug settings
logger = Logger.getInstance()
#logger.setLevel(logging.DEBUG)
# run test either for most important formats or for all (see loadsave.TestIOFacilities)
#__suffixes = self.__important # (choice 1)
__suffixes = self.__pydicom + self.__nifti + self.__itk + self.__itk_more # (choice 2)
# dimensions and dtypes to check
__suffixes = list(set(__suffixes))
__ndims = [1, 2, 3, 4, 5]
__dtypes = [scipy.bool_,
scipy.int8, scipy.int16, scipy.int32, scipy.int64,
scipy.uint8, scipy.uint16, scipy.uint32, scipy.uint64,
scipy.float32, scipy.float64, #scipy.float128, # last one removed, as not present on every machine
scipy.complex64, scipy.complex128, ] #scipy.complex256 ## removed, as not present on every machine
# prepare struct to save settings that passed the test
consistent_types = dict.fromkeys(__suffixes)
for k0 in consistent_types:
consistent_types[k0] = dict.fromkeys(__suffixes)
for k1 in consistent_types[k0]:
consistent_types[k0][k1] = dict.fromkeys(__ndims)
for k2 in consistent_types[k0][k1]:
consistent_types[k0][k1][k2] = []
# prepare struct to save settings that did not
inconsistent_types = dict.fromkeys(__suffixes)
for k0 in inconsistent_types:
inconsistent_types[k0] = dict.fromkeys(__suffixes)
for k1 in inconsistent_types[k0]:
inconsistent_types[k0][k1] = dict.fromkeys(__ndims)
for k2 in inconsistent_types[k0][k1]:
inconsistent_types[k0][k1][k2] = dict.fromkeys(__dtypes)
# prepare struct to save settings that did not pass the data integrity test
unsupported_types = dict.fromkeys(__suffixes)
for k0 in consistent_types:
unsupported_types[k0] = dict.fromkeys(__suffixes)
for k1 in unsupported_types[k0]:
unsupported_types[k0][k1] = dict.fromkeys(__ndims)
for k2 in unsupported_types[k0][k1]:
unsupported_types[k0][k1][k2] = dict.fromkeys(__dtypes)
# create artifical images, save them, load them again and compare them
path = tempfile.mkdtemp()
try:
for ndim in __ndims:
logger.debug('Testing for dimension {}...'.format(ndim))
arr_base = scipy.random.randint(0, 10, range(10, ndim + 10))
for dtype in __dtypes:
arr_save = arr_base.astype(dtype)
for suffix_from in __suffixes:
# do not run test, if in avoid array
if ndim in self.__avoid and suffix_from in self.__avoid[ndim]:
unsupported_types[suffix_from][suffix_from][ndim][dtype] = "Test skipped, as combination in the tests __avoid array."
continue
# save array as file, load again to obtain header and set the meta-data
image_from = '{}/img{}'.format(path, suffix_from)
try:
save(arr_save, image_from, None, True)
if not os.path.exists(image_from):
raise Exception('Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.'.format(suffix_from, arr_save.shape, dtype))
except Exception as e:
unsupported_types[suffix_from][suffix_from][ndim][dtype] = e.message
continue
try:
img_from, hdr_from = load(image_from)
img_from = img_from.astype(dtype) # change dtype of loaded image again, as sometimes the type is higher (e.g. int64 instead of int32) after loading!
except Exception as e:
unsupported_types[suffix_from][suffix_from][ndim][dtype] = 'Saved reference image of type {} with shape={}/dtype={} could not be loaded. Reason: {}'.format(suffix_from, arr_save.shape, dtype, e.message)
continue
header.set_pixel_spacing(hdr_from, [scipy.random.rand() * scipy.random.randint(1, 10) for _ in range(img_from.ndim)])
try:
header.set_pixel_spacing(hdr_from, [scipy.random.rand() * scipy.random.randint(1, 10) for _ in range(img_from.ndim)])
header.set_offset(hdr_from, [scipy.random.rand() * scipy.random.randint(1, 10) for _ in range(img_from.ndim)])
except Exception as e:
logger.error('Could not set the header meta-data for image of type {} with shape={}/dtype={}. This should not happen and hints to a bug in the code. Signaled reason is: {}'.format(suffix_from, arr_save.shape, dtype, e))
unsupported_types[suffix_from][suffix_from][ndim][dtype] = e.message
continue
for suffix_to in __suffixes:
# do not run test, if in avoid array
if ndim in self.__avoid and suffix_to in self.__avoid[ndim]:
unsupported_types[suffix_from][suffix_to][ndim][dtype] = "Test skipped, as combination in the tests __avoid array."
continue
# for each other format, try format to format conversion an check if the meta-data is consistent
image_to = '{}/img_to{}'.format(path, suffix_to)
try:
save(img_from, image_to, hdr_from, True)
if not os.path.exists(image_to):
raise Exception('Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.'.format(suffix_to, arr_save.shape, dtype))
except Exception as e:
unsupported_types[suffix_from][suffix_from][ndim][dtype] = e.message
continue
try:
_, hdr_to = load(image_to)
except Exception as e:
unsupported_types[suffix_from][suffix_to][ndim][dtype] = 'Saved testing image of type {} with shape={}/dtype={} could not be loaded. Reason: {}'.format(suffix_to, arr_save.shape, dtype, e.message)
continue
msg = self.__diff(hdr_from, hdr_to)
if msg:
inconsistent_types[suffix_from][suffix_to][ndim][dtype] = msg
else:
consistent_types[suffix_from][suffix_to][ndim].append(dtype)
# remove testing image
if os.path.exists(image_to): os.remove(image_to)
# remove reference image
if os.path.exists(image_to): os.remove(image_to)
except Exception:
if not os.listdir(path): os.rmdir(path)
else: logger.debug('Could not delete temporary directory {}. Is not empty.'.format(path))
raise
if consistent:
print '\nthe following format conversions are meta-data consistent:'
print 'from\tto\tndim\tdtypes'
for suffix_from in consistent_types:
for suffix_to in consistent_types[suffix_from]:
for ndim, dtypes in consistent_types[suffix_from][suffix_to].iteritems():
if list == type(dtypes) and not 0 == len(dtypes):
print '{}\t{}\t{}D\t{}'.format(suffix_from, suffix_to, ndim, map(lambda x: str(x).split('.')[-1][:-2], dtypes))
if inconsistent:
print '\nthe following form conversions are not meta-data consistent:'
print 'from\tto\tndim\tdtype\t\terror'
for suffix_from in inconsistent_types:
for suffix_to in inconsistent_types[suffix_from]:
for ndim in inconsistent_types[suffix_from][suffix_to]:
for dtype, msg in inconsistent_types[suffix_from][suffix_to][ndim].iteritems():
if msg:
print '{}\t{}\t{}D\t{}\t\t{}'.format(suffix_from, suffix_to, ndim, str(dtype).split('.')[-1][:-2], msg)
if unsupported:
print '\nthe following form conversions could not be tested due to errors:'
print 'from\tto\tndim\tdtype\t\terror'
for suffix_from in unsupported_types:
for suffix_to in unsupported_types[suffix_from]:
for ndim in unsupported_types[suffix_from][suffix_to]:
for dtype, msg in unsupported_types[suffix_from][suffix_to][ndim].iteritems():
if msg:
print '{}\t{}\t{}D\t{}\t\t{}'.format(suffix_from, suffix_to, ndim, str(dtype).split('.')[-1][:-2], msg)
0
Example 112
Project: apogee Source File: moog.py
def moogsynth(*args,**kwargs):
"""
NAME:
moogsynth
PURPOSE:
Run a MOOG synthesis (direct interface to the MOOG code; use 'synth' for a general routine that generates the non-continuum-normalized spectrum, convolves withe LSF and macrotubulence, and optionally continuum normalizes the output)
INPUT ARGUMENTS:
lists with abundances (they don't all have to have the same length, missing ones are filled in with zeros):
[Atomic number1,diff1_1,diff1_2,diff1_3,...,diff1_N]
[Atomic number2,diff2_1,diff2_2,diff2_3,...,diff2_N]
...
[Atomic numberM,diffM_1,diffM_2,diffM_3,...,diffM_N]
SYNTHEIS KEYWORDS:
isotopes= ('solar') use 'solar' or 'arcturus' isotope ratios; can also be a dictionary with isotope ratios (e.g., isotopes= {'108.00116':'1.001','606.01212':'1.01'})
wmin, wmax, dw, width= (15000.000, 17000.000, 0.10000000, 7.0000000) spectral synthesis limits, step, and width of calculation (see MOOG)
doflux= (False) if True, calculate the continuum flux instead
LINELIST KEYWORDS:
linelist= (None) linelist to use; if this is None, the code looks for a weed-out version of the linelist appropriate for the given model atmosphere; otherwise can be set to the path of a linelist file or to the name of an APOGEE linelist
ATMOSPHERE KEYWORDS:
Either:
(a) modelatm= (None) can be set to the filename of a model atmosphere (needs to end in .mod)
(b) specify the stellar parameters for a grid point in model atm by
- lib= ('kurucz_filled') spectral library
- teff= (4500) grid-point Teff
- logg= (2.5) grid-point logg
- metals= (0.) grid-point metallicity
- cm= (0.) grid-point carbon-enhancement
- am= (0.) grid-point alpha-enhancement
- dr= return the path corresponding to this data release
vmicro= (2.) microturbulence (km/s) (only used if the MOOG-formatted atmosphere file doesn't already exist)
OUTPUT:
(wavelengths,spectra (nspec,nwave)) for synth driver
(wavelengths,continuum spectr (nwave)) for doflux driver
HISTORY:
2015-02-13 - Written - Bovy (IAS)
"""
doflux= kwargs.pop('doflux',False)
# Get the spectral synthesis limits
wmin= kwargs.pop('wmin',_WMIN_DEFAULT)
wmax= kwargs.pop('wmax',_WMAX_DEFAULT)
dw= kwargs.pop('dw',_DW_DEFAULT)
width= kwargs.pop('width',_WIDTH_DEFAULT)
linelist= kwargs.pop('linelist',None)
# Parse isotopes
isotopes= kwargs.pop('isotopes','solar')
if isinstance(isotopes,str) and isotopes.lower() == 'solar':
isotopes= {'108.00116':'1.001',
'606.01212':'1.01',
'606.01213':'90',
'606.01313':'180',
'607.01214':'1.01',
'607.01314':'90',
'607.01215':'273',
'608.01216':'1.01',
'608.01316':'90',
'608.01217':'1101',
'608.01218':'551',
'114.00128':'1.011',
'114.00129':'20',
'114.00130':'30',
'101.00101':'1.001',
'101.00102':'1000',
'126.00156':'1.00'}
elif isinstance(isotopes,str) and isotopes.lower() == 'arcturus':
isotopes= {'108.00116':'1.001',
'606.01212':'0.91',
'606.01213':'8',
'606.01313':'81',
'607.01214':'0.91',
'607.01314':'8',
'607.01215':'273',
'608.01216':'0.91',
'608.01316':'8',
'608.01217':'1101',
'608.01218':'551',
'114.00128':'1.011',
'114.00129':'20',
'114.00130':'30',
'101.00101':'1.001',
'101.00102':'1000',
'126.00156':'1.00'}
elif not isinstance(isotopes,dict):
raise ValueError("'isotopes=' input not understood, should be 'solar', 'arcturus', or a dictionary")
# Get the filename of the model atmosphere
modelatm= kwargs.pop('modelatm',None)
if not modelatm is None:
if isinstance(modelatm,str) and os.path.exists(modelatm):
modelfilename= modelatm
elif isinstance(modelatm,str):
raise ValueError('modelatm= input is a non-existing filename')
else:
raise ValueError('modelatm= in moogsynth should be set to the name of a file')
else:
modelfilename= appath.modelAtmospherePath(**kwargs)
# Check whether a MOOG version exists
if not os.path.exists(modelfilename.replace('.mod','.org')):
# Convert to MOOG format
convert_modelAtmosphere(modelatm=modelfilename,**kwargs)
modeldirname= os.path.dirname(modelfilename)
modelbasename= os.path.basename(modelfilename)
# Get the name of the linelist
if linelist is None:
linelistfilename= modelbasename.replace('.mod','.lines')
if not os.path.exists(os.path.join(modeldirname,linelistfilename)):
raise IOError('No linelist given and no weed-out version found for this atmosphere; either specify a linelist or run weedout first')
linelistfilename= os.path.join(modeldirname,linelistfilename)
elif os.path.exists(linelist):
linelistfilename= linelist
else:
linelistfilename= appath.linelistPath(linelist,
dr=kwargs.get('dr',None))
if not os.path.exists(linelistfilename):
raise RuntimeError("Linelist %s not found; download linelist w/ apogee.tools.download.linelist (if you have access)" % linelistfilename)
# We will run in a subdirectory of the relevant model atmosphere
tmpDir= tempfile.mkdtemp(dir=modeldirname)
shutil.copy(linelistfilename,tmpDir)
# Cut the linelist to the desired wavelength range
with open(os.path.join(tmpDir,'cutlines.awk'),'w') as awkfile:
awkfile.write('$1>%.3f && $1<%.3f\n' %(wmin-width,wmax+width))
keeplines= open(os.path.join(tmpDir,'lines.tmp'),'w')
stderr= open('/dev/null','w')
try:
subprocess.check_call(['awk','-f','cutlines.awk',
os.path.basename(linelistfilename)],
cwd=tmpDir,stdout=keeplines,stderr=stderr)
keeplines.close()
shutil.copy(os.path.join(tmpDir,'lines.tmp'),
os.path.join(tmpDir,os.path.basename(linelistfilename)))
except subprocess.CalledProcessError:
print("Removing unnecessary linelist entries failed ...")
finally:
os.remove(os.path.join(tmpDir,'cutlines.awk'))
os.remove(os.path.join(tmpDir,'lines.tmp'))
stderr.close()
# Also copy the strong lines
stronglinesfilename= appath.linelistPath('stronglines.vac',
dr=kwargs.get('dr',None))
if not os.path.exists(stronglinesfilename):
try:
download.linelist('stronglines.vac',dr=kwargs.get('dr',None))
except:
raise RuntimeError("Linelist stronglines.vac not found or downloading failed; download linelist w/ apogee.tools.download.linelist (if you have access)")
finally:
if os.path.exists(os.path.join(tmpDir,'synth.par')):
os.remove(os.path.join(tmpDir,'synth.par'))
if os.path.exists(os.path.join(tmpDir,'std.out')):
os.remove(os.path.join(tmpDir,'std.out'))
if os.path.exists(os.path.join(tmpDir,
os.path.basename(linelistfilename))):
os.remove(os.path.join(tmpDir,os.path.basename(linelistfilename)))
if os.path.exists(os.path.join(tmpDir,'stronglines.vac')):
os.remove(os.path.join(tmpDir,'stronglines.vac'))
os.rmdir(tmpDir)
shutil.copy(stronglinesfilename,tmpDir)
# Now write the script file
if len(args) == 0: #special case that there are *no* differences
args= ([26,0.],)
nsynths= numpy.array([len(args[ii])-1 for ii in range(len(args))])
nsynth= numpy.amax(nsynths) #Take the longest abundance list
if nsynth > 5:
raise ValueError("MOOG only allows five syntheses to be run at the same time; please reduce the number of abundance values in the apogee.modelspec.moog.moogsynth input")
nabu= len(args)
with open(os.path.join(tmpDir,'synth.par'),'w') as parfile:
if doflux:
parfile.write('doflux\n')
else:
parfile.write('synth\n')
parfile.write('terminal x11\n')
parfile.write('plot 1\n')
parfile.write("standard_out std.out\n")
parfile.write("summary_out '../synth.out'\n")
parfile.write("smoothed_out '/dev/null'\n")
parfile.write("strong 1\n")
parfile.write("damping 0\n")
parfile.write("stronglines_in stronglines.vac\n")
parfile.write("model_in '../%s'\n" % modelbasename.replace('.mod','.org'))
parfile.write("lines_in %s\n" % os.path.basename(linelistfilename))
parfile.write("atmosphere 1\n")
parfile.write("molecules 2\n")
parfile.write("lines 1\n")
parfile.write("flux/int 0\n")
# Write the isotopes
niso= len(isotopes)
parfile.write("isotopes %i %i\n" % (niso,nsynth))
for iso in isotopes:
isotopestr= iso
for ii in range(nsynth):
isotopestr+= ' '+isotopes[iso]
parfile.write(isotopestr+'\n')
# Abundances
parfile.write("abundances %i %i\n" % (nabu,nsynth))
for ii in range(nabu):
abustr= '%i' % args[ii][0]
for jj in range(nsynth):
try:
abustr+= ' %.3f' % args[ii][jj+1]
except IndexError:
abustr+= ' 0.0'
parfile.write(abustr+"\n")
# Synthesis limits
parfile.write("synlimits\n") # Add 0.001 to make sure wmax is included
parfile.write("%.3f %.3f %.3f %.3f\n" % (wmin,wmax+0.001,dw,width))
# Now run synth
sys.stdout.write('\r'+"Running MOOG synth ...\r")
sys.stdout.flush()
try:
p= subprocess.Popen(['moogsilent'],
cwd=tmpDir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.stdin.write(b'synth.par\n')
stdout, stderr= p.communicate()
except subprocess.CalledProcessError:
print("Running synth failed ...")
finally:
if os.path.exists(os.path.join(tmpDir,'synth.par')):
os.remove(os.path.join(tmpDir,'synth.par'))
if os.path.exists(os.path.join(tmpDir,'std.out')):
os.remove(os.path.join(tmpDir,'std.out'))
if os.path.exists(os.path.join(tmpDir,
os.path.basename(linelistfilename))):
os.remove(os.path.join(tmpDir,os.path.basename(linelistfilename)))
if os.path.exists(os.path.join(tmpDir,'stronglines.vac')):
os.remove(os.path.join(tmpDir,'stronglines.vac'))
os.rmdir(tmpDir)
sys.stdout.write('\r'+download._ERASESTR+'\r')
sys.stdout.flush()
# Now read the output
wavs= numpy.arange(wmin,wmax+dw,dw)
if wavs[-1] > wmax+dw/2.: wavs= wavs[:-1]
if doflux:
contdata= numpy.loadtxt(os.path.join(modeldirname,'synth.out'),
converters={0:lambda x: x.replace('D','E'),
1:lambda x: x.replace('D','E')},
usecols=[0,1])
# Wavelength in summary file appears to be wrong from comparing to
# the standard output file
out= contdata[:,1]
out/= numpy.nanmean(out) # Make the numbers more manageable
else:
with open(os.path.join(modeldirname,'synth.out')) as summfile:
out= numpy.empty((nsynth,len(wavs)))
for ii in range(nsynth):
# Skip to beginning of synthetic spectrum
while True:
line= summfile.readline()
if line[0] == 'M': break
summfile.readline()
tout= []
while True:
line= summfile.readline()
if not line or line[0] == 'A': break
tout.extend([float(s) for s in line.split()])
out[ii]= numpy.array(tout)
os.remove(os.path.join(modeldirname,'synth.out'))
if doflux:
return (wavs,out)
else:
return (wavs,1.-out)
0
Example 113
Project: dash-hack Source File: run.py
def runTest(self, path, workingDir, _stdout, _stderr, args = []):
global errorCode
if self.error:
return
args = args[:]
timestamp = datetime.datetime.now()
logfile = self.getLogName(path, timestamp)
exe = os.path.abspath(path)
userlog = [a for a in args if a.startswith("--gtest_output=")]
if len(userlog) == 0:
args.append("--gtest_output=xml:" + logfile)
else:
logfile = userlog[0][userlog[0].find(":")+1:]
if self.targetos == "android" and exe.endswith(".apk"):
print "Run java tests:", exe
try:
# get package info
output = Popen(self.aapt + ["dump", "xmltree", exe, "AndroidManifest.xml"], stdout=PIPE, stderr=_stderr).communicate()
if not output[0]:
print >> _stderr, "fail to dump manifest from", exe
return
tags = re.split(r"[ ]+E: ", output[0])
# get package name
manifest_tag = [t for t in tags if t.startswith("manifest ")]
if not manifest_tag:
print >> _stderr, "fail to read package name from", exe
return
pkg_name = re.search(r"^[ ]+A: package=\"(?P<pkg>.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", manifest_tag[0], flags=re.MULTILINE).group("pkg")
# get test instrumentation info
instrumentation_tag = [t for t in tags if t.startswith("instrumentation ")]
if not instrumentation_tag:
print >> _stderr, "can not find instrumentation detials in", exe
return
pkg_runner = re.search(r"^[ ]+A: android:name\(0x[0-9a-f]{8}\)=\"(?P<runner>.*?)\" \(Raw: \"(?P=runner)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("runner")
pkg_target = re.search(r"^[ ]+A: android:targetPackage\(0x[0-9a-f]{8}\)=\"(?P<pkg>.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("pkg")
if not pkg_name or not pkg_runner or not pkg_target:
print >> _stderr, "can not find instrumentation detials in", exe
return
if self.options.junit_package:
if self.options.junit_package.startswith("."):
pkg_target += self.options.junit_package
else:
pkg_target = self.options.junit_package
# uninstall previously installed package
print >> _stderr, "Uninstalling old", pkg_name, "from device..."
Popen(self.adb + ["uninstall", pkg_name], stdout=PIPE, stderr=_stderr).communicate()
print >> _stderr, "Installing new", exe, "to device...",
output = Popen(self.adb + ["install", exe], stdout=PIPE, stderr=PIPE).communicate()
if output[0] and output[0].strip().endswith("Success"):
print >> _stderr, "Success"
else:
print >> _stderr, "Failure"
print >> _stderr, "Failed to install", exe, "to device"
return
print >> _stderr, "Running jUnit tests for ", pkg_target
if self.setUp:
self.setUp()
Popen(self.adb + ["shell", "am instrument -w -e package " + pkg_target + " " + pkg_name + "/" + pkg_runner], stdout=_stdout, stderr=_stderr).wait()
if self.tearDown:
self.tearDown()
except OSError:
pass
return
elif self.targetos == "android":
hostlogpath = ""
usercolor = [a for a in args if a.startswith("--gtest_color=")]
if len(usercolor) == 0 and _stdout.isatty() and hostos != "nt":
args.append("--gtest_color=yes")
try:
tempdir = "/data/local/tmp/"
andoidcwd = tempdir + getpass.getuser().replace(" ","") + "_" + self.options.mode +"/"
exename = os.path.basename(exe)
androidexe = andoidcwd + exename
# upload
_stderr.write("Uploading... ")
output = Popen(self.adb + ["push", exe, androidexe], stdout=_stdout, stderr=_stderr).wait()
if output != 0:
print >> _stderr, "adb finishes unexpectedly with error code", output
return
# chmod
output = Popen(self.adb + ["shell", "chmod 777 " + androidexe], stdout=_stdout, stderr=_stderr).wait()
if output != 0:
print >> _stderr, "adb finishes unexpectedly with error code", output
return
# run
if self.options.help:
command = exename + " --help"
else:
command = exename + " " + " ".join(args)
print >> _stderr, "Run command:", command
if self.setUp:
self.setUp()
Popen(self.adb + ["shell", "export OPENCV_TEST_DATA_PATH=" + self.options.test_data_path + "&& cd " + andoidcwd + "&& ./" + command], stdout=_stdout, stderr=_stderr).wait()
if self.tearDown:
self.tearDown()
# try get log
if not self.options.help:
#_stderr.write("Pull log... ")
hostlogpath = os.path.join(workingDir, logfile)
output = Popen(self.adb + ["pull", andoidcwd + logfile, hostlogpath], stdout=_stdout, stderr=PIPE).wait()
if output != 0:
print >> _stderr, "adb finishes unexpectedly with error code", output
return
#rm log
Popen(self.adb + ["shell", "rm " + andoidcwd + logfile], stdout=PIPE, stderr=PIPE).wait()
# clean temporary files
Popen(self.adb + ["shell", "rm " + tempdir + "__opencv_temp.*"], stdout=PIPE, stderr=PIPE).wait()
except OSError:
pass
if os.path.isfile(hostlogpath):
return hostlogpath
return None
elif path == "java":
cmd = [self.ant_executable,
"-Dopencv.build.type="
+ (self.options.configuration if self.options.configuration else self.build_type),
"buildAndTest"]
print >> _stderr, "Run command:", " ".join(cmd)
try:
errorCode = Popen(cmd, stdout=_stdout, stderr=_stderr, cwd = self.java_test_binary_dir + "/.build").wait()
except:
print "Unexpected error:", sys.exc_info()[0]
return None
else:
cmd = [exe]
if self.options.help:
cmd.append("--help")
else:
cmd.extend(args)
orig_temp_path = os.environ.get('OPENCV_TEMP_PATH')
temp_path = tempfile.mkdtemp(prefix="__opencv_temp.", dir=orig_temp_path or None)
os.environ['OPENCV_TEMP_PATH'] = temp_path
print >> _stderr, "Run command:", " ".join(cmd)
try:
errorCode = Popen(cmd, stdout=_stdout, stderr=_stderr, cwd = workingDir).wait()
except:
print "Unexpected error:", sys.exc_info()[0]
# clean temporary files
if orig_temp_path:
os.environ['OPENCV_TEMP_PATH'] = orig_temp_path
else:
del os.environ['OPENCV_TEMP_PATH']
try:
shutil.rmtree(temp_path)
pass
except:
pass
logpath = os.path.join(workingDir, logfile)
if os.path.isfile(logpath):
return logpath
return None
0
Example 114
Project: mdp-toolkit Source File: facade.py
def show_training(flow, data_iterables, msg_iterables=None, stop_messages=None,
path=None, tracer=None,
debug=False, show_size=False, open_browser=True,
**kwargs):
"""Perform both the flow training and the training inspection.
The return value is the filename of the slideshow HTML file.
This function must be used with the untrained flow (no previous call
of Flow.train is required, the training happens here).
This function is more convenient than inspect_training since it includes
all required steps, but it is also less customizable. After everything
is complete the inspection slideshow is opened in the browser.
flow -- The untrained Flow or BiFlow. After this function has been called
the flow will be fully trained.
data_iterables, msg_iterables, stop_messages -- Same as for calling train
on a flow.
path -- Path were both the training snapshots and the inspection slides
will be stored. If None (default value) a temporary directory will be
used.
tracer -- Instance of InspectionHTMLTracer, can be None for
default class.
debug -- Ignore exception during training and try to complete the slideshow
(default value is False).
show_size -- Show the approximate memory footprint of all nodes.
open_browser -- If True (default value) then the slideshow file is
automatically opened in a webbrowser. One can also use string value
with the browser name (for webbrowser.get) to request a specific
browser.
**kwargs -- Additional arguments for flow.train can be specified
as keyword arguments.
"""
if path is None:
path = tempfile.mkdtemp(prefix='MDP_')
# get first part of data iterators as sample data for inspection
# if data_iterables is an array, wrap it up in a list
if isinstance(data_iterables, numx.ndarray):
data_iterables = [[data_iterables]] * len(flow)
x_samples = []
for i, data_iterable in enumerate(data_iterables):
if data_iterable is None:
x_sample, new_data_iterable = None, None
else:
x_sample, new_data_iterable = first_iterable_elem(data_iterable)
x_samples.append(x_sample)
data_iterables[i] = new_data_iterable
del x_sample
if msg_iterables:
msg_samples = []
for i, msg_iterable in enumerate(msg_iterables):
if msg_iterable is None:
msg_sample, new_msg_iterable = None, None
else:
msg_sample, new_msg_iterable = first_iterable_elem(msg_iterable)
msg_samples.append(msg_sample)
msg_iterables[i] = new_msg_iterable
del msg_sample
else:
msg_samples = None
# store the data to disk to disk to save memory and safeguard against
# any change made to the data during the training
robust_pickle(path, "training_data_samples.pckl",
(x_samples, msg_samples, stop_messages))
del x_samples
del msg_samples
# perform the training and gather snapshots
prepare_training_inspection(flow=flow, path=path)
try:
if isinstance(flow, BiFlow):
flow.train(data_iterables, msg_iterables, stop_messages, **kwargs)
else:
flow.train(data_iterables, **kwargs)
except Exception:
if debug:
traceback.print_exc()
print ("exception during training, " +
"inspecting up to failure point...")
# create the last snapshot manually
try:
# if a normal mdp.Flow instance was given then this fails
flow._bi_reset()
except Exception:
pass
filename = (flow._snapshot_name_ + "_%d" % flow._snapshot_counter_
+ PICKLE_EXT)
robust_pickle(flow._snapshot_path_, filename, flow)
else:
raise
remove_inspection_residues(flow)
# reload data samples
with open(os.path.join(path, "training_data_samples.pckl"), "rb") as sample_file:
x_samples, msg_samples, stop_messages = pickle.load(sample_file)
# create slideshow
slideshow = inspect_training(snapshot_path=path,
inspection_path=path,
x_samples=x_samples,
msg_samples=msg_samples,
stop_messages=stop_messages,
tracer=tracer,
debug=debug, show_size=show_size,
verbose=False)
filename = os.path.join(path, "training_inspection.html")
title = "Training Inspection"
with open(filename, 'w') as html_file:
html_file.write('<html>\n<head>\n<title>%s</title>\n' % title)
html_file.write('<style type="text/css" media="screen">')
html_file.write(standard_css())
html_file.write('</style>\n</head>\n<body>\n')
html_file.write('<h3>%s</h3>\n' % title)
html_file.write(slideshow)
html_file.write('</body>\n</html>')
if open_browser:
_open_custom_brower(open_browser, os.path.abspath(filename))
return filename
0
Example 115
Project: freeipa Source File: ipa_restore.py
def run(self):
options = self.options
super(Restore, self).run()
self.backup_dir = self.args[0]
if not os.path.isabs(self.backup_dir):
self.backup_dir = os.path.join(paths.IPA_BACKUP_DIR, self.backup_dir)
self.log.info("Preparing restore from %s on %s",
self.backup_dir, FQDN)
self.header = os.path.join(self.backup_dir, 'header')
try:
self.read_header()
except IOError as e:
raise admintool.ScriptError("Cannot read backup metadata: %s" % e)
if options.data_only:
restore_type = 'DATA'
else:
restore_type = self.backup_type
# These checks would normally be in the validate method but
# we need to know the type of backup we're dealing with.
if restore_type == 'FULL':
if options.online:
raise admintool.ScriptError(
"File restoration cannot be done online")
if options.instance or options.backend:
raise admintool.ScriptError(
"Restore must be in data-only mode when restoring a "
"specific instance or backend")
else:
installutils.check_server_configuration()
self.init_api()
if options.instance:
instance_dir = (paths.VAR_LIB_SLAPD_INSTANCE_DIR_TEMPLATE %
options.instance)
if not os.path.exists(instance_dir):
raise admintool.ScriptError(
"Instance %s does not exist" % options.instance)
self.instances = [options.instance]
if options.backend:
for instance in self.instances:
db_dir = (paths.SLAPD_INSTANCE_DB_DIR_TEMPLATE %
(instance, options.backend))
if os.path.exists(db_dir):
break
else:
raise admintool.ScriptError(
"Backend %s does not exist" % options.backend)
self.backends = [options.backend]
for instance, backend in itertools.product(self.instances,
self.backends):
db_dir = (paths.SLAPD_INSTANCE_DB_DIR_TEMPLATE %
(instance, backend))
if os.path.exists(db_dir):
break
else:
raise admintool.ScriptError(
"Cannot restore a data backup into an empty system")
self.log.info("Performing %s restore from %s backup" %
(restore_type, self.backup_type))
if self.backup_host != FQDN:
raise admintool.ScriptError(
"Host name %s does not match backup name %s" %
(FQDN, self.backup_host))
if self.backup_ipa_version != str(version.VERSION):
self.log.warning(
"Restoring data from a different release of IPA.\n"
"Data is version %s.\n"
"Server is running %s." %
(self.backup_ipa_version, str(version.VERSION)))
if (not options.unattended and
not user_input("Continue to restore?", False)):
raise admintool.ScriptError("Aborted")
create_ds_user()
pent = pwd.getpwnam(constants.DS_USER)
# Temporary directory for decrypting files before restoring
self.top_dir = tempfile.mkdtemp("ipa")
os.chown(self.top_dir, pent.pw_uid, pent.pw_gid)
os.chmod(self.top_dir, 0o750)
self.dir = os.path.join(self.top_dir, "ipa")
os.mkdir(self.dir)
os.chmod(self.dir, 0o750)
os.chown(self.dir, pent.pw_uid, pent.pw_gid)
cwd = os.getcwd()
try:
dirsrv = services.knownservices.dirsrv
self.extract_backup(options.gpg_keyring)
if restore_type == 'FULL':
self.restore_default_conf()
self.init_api(confdir=self.dir + paths.ETC_IPA)
databases = []
for instance in self.instances:
for backend in self.backends:
database = (instance, backend)
ldiffile = os.path.join(self.dir, '%s-%s.ldif' % database)
if os.path.exists(ldiffile):
databases.append(database)
if options.instance:
for instance, backend in databases:
if instance == options.instance:
break
else:
raise admintool.ScriptError(
"Instance %s not found in backup" % options.instance)
if options.backend:
for instance, backend in databases:
if backend == options.backend:
break
else:
raise admintool.ScriptError(
"Backend %s not found in backup" % options.backend)
# Big fat warning
if (not options.unattended and
not user_input("Restoring data will overwrite existing live data. Continue to restore?", False)):
raise admintool.ScriptError("Aborted")
self.log.info(
"Each master will individually need to be re-initialized or")
self.log.info(
"re-created from this one. The replication agreements on")
self.log.info(
"masters running IPA 3.1 or earlier will need to be manually")
self.log.info(
"re-enabled. See the man page for details.")
self.log.info("Disabling all replication.")
self.disable_agreements()
if restore_type != 'FULL':
if not options.online:
self.log.info('Stopping Directory Server')
dirsrv.stop(capture_output=False)
else:
self.log.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
self.log.info('Stopping IPA services')
result = run(['ipactl', 'stop'], raiseonerr=False)
if result.returncode not in [0, 6]:
self.log.warning('Stopping IPA failed: %s' % result.error_log)
self.restore_selinux_booleans()
http = httpinstance.HTTPInstance()
# We do either a full file restore or we restore data.
if restore_type == 'FULL':
self.remove_old_files()
if 'CA' in self.backup_services:
create_ca_user()
self.cert_restore_prepare()
self.file_restore(options.no_logs)
self.cert_restore()
if 'CA' in self.backup_services:
self.__create_dogtag_log_dirs()
if http.is_kdcproxy_configured():
httpinstance.create_kdcproxy_user()
# Always restore the data from ldif
# We need to restore both userRoot and ipaca.
for instance, backend in databases:
self.ldif2db(instance, backend, online=options.online)
if restore_type != 'FULL':
if not options.online:
self.log.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
# restore access controll configuration
auth_backup_path = os.path.join(paths.VAR_LIB_IPA, 'auth_backup')
if os.path.exists(auth_backup_path):
tasks.restore_auth_configuration(auth_backup_path)
# explicitly enable then disable the pki tomcatd service to
# re-register its instance. FIXME, this is really wierd.
services.knownservices.pki_tomcatd.enable()
services.knownservices.pki_tomcatd.disable()
self.log.info('Starting IPA services')
run(['ipactl', 'start'])
self.log.info('Restarting SSSD')
sssd = services.service('sssd')
sssd.restart()
http.remove_httpd_ccache()
finally:
try:
os.chdir(cwd)
except Exception as e:
self.log.error('Cannot change directory to %s: %s' % (cwd, e))
shutil.rmtree(self.top_dir)
0
Example 116
Project: be Source File: cli.py
@main.command(name="in")
@click.argument("topics", nargs=-1, required=True)
@click.option("-y", "--yes", is_flag=True,
help="Automatically accept any questions")
@click.option("-a", "--as", "as_", default=getpass.getuser(),
help="Enter project as a different user")
@click.option("-e", "--enter", is_flag=True,
help="Change the current working "
"directory to development directory")
@click.pass_context
def in_(ctx, topics, yes, as_, enter):
"""Set the current topics to `topics`
Environment:
BE_PROJECT: First topic
BE_CWD: Current `be` working directory
BE_TOPICS: Arguments to `in`
BE_DEVELOPMENTDIR: Absolute path to current development directory
BE_PROJECTROOT: Absolute path to current project
BE_PROJECTSROOT: Absolute path to where projects are located
BE_ACTIVE: 0 or 1, indicates an active be environment
BE_USER: Current user, overridden with `--as`
BE_SCRIPT: User-supplied shell script
BE_PYTHON: User-supplied python script
BE_ENTER: 0 or 1 depending on whether the topic was entered
BE_GITHUB_API_TOKEN: Optional GitHub API token
BE_ENVIRONMENT: Space-separated list of user-added
environment variables
BE_TEMPDIR: Directory in which temporary files are stored
BE_PRESETSDIR: Directory in which presets are searched
BE_ALIASDIR: Directory in which aliases are written
BE_BINDING: Binding between template and item in inventory
\b
Usage:
$ be in project topics
"""
topics = map(str, topics) # They enter as unicode
if self.isactive():
lib.echo("ERROR: Exit current project first")
sys.exit(lib.USER_ERROR)
# Determine topic syntax
if len(topics[0].split("/")) == 3:
topic_syntax = lib.FIXED
project = topics[0].split("/")[0]
else:
topic_syntax = lib.POSITIONAL
project = topics[0]
project_dir = lib.project_dir(_extern.cwd(), project)
if not os.path.exists(project_dir):
lib.echo("Project \"%s\" not found. " % project)
lib.echo("\nAvailable:")
ctx.invoke(ls)
sys.exit(lib.USER_ERROR)
# Boot up
context = lib.context(root=_extern.cwd(), project=project)
be = _extern.load_be(project)
templates = _extern.load_templates(project)
inventory = _extern.load_inventory(project)
context.update({
"BE_PROJECT": project,
"BE_USER": str(as_),
"BE_ENTER": "1" if enter else "",
"BE_TOPICS": " ".join(topics)
})
# Remap topic syntax, for backwards compatibility
# In cases where the topic is entered in a way that
# differs from the template, remap topic to template.
if any(re.findall("{\d+}", pattern) for pattern in templates.values()):
template_syntax = lib.POSITIONAL
else:
template_syntax = lib.FIXED
if topic_syntax & lib.POSITIONAL and not template_syntax & lib.POSITIONAL:
topics = ["/".join(topics)]
if topic_syntax & lib.FIXED and not template_syntax & lib.FIXED:
topics[:] = topics[0].split("/")
try:
key = be.get("templates", {}).get("key") or "{1}"
item = lib.item_from_topics(key, topics)
binding = lib.binding_from_item(inventory, item)
context["BE_BINDING"] = binding
except IndexError as exc:
lib.echo("At least %s topics are required" % str(exc))
sys.exit(lib.USER_ERROR)
except KeyError as exc:
lib.echo("\"%s\" not found" % item)
if exc.bindings:
lib.echo("\nAvailable:")
for item_ in sorted(exc.bindings,
key=lambda a: (exc.bindings[a], a)):
lib.echo("- %s (%s)" % (item_, exc.bindings[item_]))
sys.exit(lib.USER_ERROR)
# Finally, determine a development directory
# based on the template-, not topic-syntax.
if template_syntax & lib.POSITIONAL:
try:
development_dir = lib.pos_development_directory(
templates=templates,
inventory=inventory,
context=context,
topics=topics,
user=as_,
item=item)
except KeyError as exc:
lib.echo("\"%s\" not found" % item)
if exc.bindings:
lib.echo("\nAvailable:")
for item_ in sorted(exc.bindings,
key=lambda a: (exc.bindings[a], a)):
lib.echo("- %s (%s)" % (item_, exc.bindings[item_]))
sys.exit(lib.USER_ERROR)
else: # FIXED topic_syntax
development_dir = lib.fixed_development_directory(
templates,
inventory,
topics,
as_)
context["BE_DEVELOPMENTDIR"] = development_dir
tempdir = (tempfile.mkdtemp()
if not os.environ.get("BE_TEMPDIR")
else os.environ["BE_TEMPDIR"])
context["BE_TEMPDIR"] = tempdir
# Should it be entered?
if enter and not os.path.exists(development_dir):
create = False
if yes:
create = True
else:
sys.stdout.write("No development directory found. Create? [Y/n]: ")
sys.stdout.flush()
if raw_input().lower() in ("", "y", "yes"):
create = True
if create:
ctx.invoke(mkdir, dir=development_dir)
else:
sys.stdout.write("Cancelled")
sys.exit(lib.NORMAL)
# Parse be.yaml
if "script" in be:
context["BE_SCRIPT"] = _extern.write_script(
be["script"], tempdir).replace("\\", "/")
if "python" in be:
script = "\n".join(be["python"])
context["BE_PYTHON"] = script
try:
exec script in {"__name__": __name__}
except Exception as e:
lib.echo("ERROR: %s" % e)
invalids = [v for v in context.values() if not isinstance(v, str)]
assert all(isinstance(v, str) for v in context.values()), invalids
# Create aliases
aliases_dir = _extern.write_aliases(
be.get("alias", {}), tempdir)
context["PATH"] = (aliases_dir
+ os.pathsep
+ context.get("PATH", ""))
context["BE_ALIASDIR"] = aliases_dir
# Parse redirects
lib.parse_redirect(
be.get("redirect", {}), topics, context)
# Override inherited context
# with that coming from be.yaml.
if "environment" in be:
parsed = lib.parse_environment(
fields=be["environment"],
context=context,
topics=topics)
context["BE_ENVIRONMENT"] = " ".join(parsed.keys())
context.update(parsed)
if "BE_TESTING" in context:
os.chdir(development_dir)
os.environ.update(context)
else:
parent = lib.parent()
cmd = lib.cmd(parent)
# Store reference to calling shell
context["BE_SHELL"] = parent
try:
sys.exit(subprocess.call(cmd, env=context))
finally:
import shutil
shutil.rmtree(tempdir)
0
Example 117
Project: easybuild-easyblocks Source File: imkl.py
def post_install_step(self):
"""
Install group libraries and interfaces (if desired).
"""
super(EB_imkl, self).post_install_step()
shlib_ext = get_shared_lib_ext()
# reload the dependencies
self.load_dependency_modules()
if self.cfg['m32']:
extra = {
'libmkl.%s' % shlib_ext : 'GROUP (-lmkl_intel -lmkl_intel_thread -lmkl_core)',
'libmkl_em64t.a': 'GROUP (libmkl_intel.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_solver.a': 'GROUP (libmkl_solver.a)',
'libmkl_scalapack.a': 'GROUP (libmkl_scalapack_core.a)',
'libmkl_lapack.a': 'GROUP (libmkl_intel.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_cdft.a': 'GROUP (libmkl_cdft_core.a)'
}
else:
extra = {
'libmkl.%s' % shlib_ext: 'GROUP (-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core)',
'libmkl_em64t.a': 'GROUP (libmkl_intel_lp64.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_solver.a': 'GROUP (libmkl_solver_lp64.a)',
'libmkl_scalapack.a': 'GROUP (libmkl_scalapack_lp64.a)',
'libmkl_lapack.a': 'GROUP (libmkl_intel_lp64.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_cdft.a': 'GROUP (libmkl_cdft_core.a)'
}
if LooseVersion(self.version) >= LooseVersion('10.3'):
libsubdir = os.path.join('mkl', 'lib', 'intel64')
else:
if self.cfg['m32']:
libsubdir = os.path.join('lib', '32')
else:
libsubdir = os.path.join('lib', 'em64t')
for fil, txt in extra.items():
dest = os.path.join(self.installdir, libsubdir, fil)
if not os.path.exists(dest):
try:
f = open(dest, 'w')
f.write(txt)
f.close()
self.log.info("File %s written" % dest)
except IOError, err:
raise EasyBuildError("Can't write file %s: %s", dest, err)
# build the mkl interfaces, if desired
if self.cfg['interfaces']:
if LooseVersion(self.version) >= LooseVersion('10.3'):
intsubdir = os.path.join('mkl', 'interfaces')
inttarget = 'libintel64'
else:
intsubdir = 'interfaces'
if self.cfg['m32']:
inttarget = 'lib32'
else:
inttarget = 'libem64t'
cmd = "make -f makefile %s" % inttarget
# blas95 and lapack95 need more work, ignore for now
# blas95 and lapack also need include/.mod to be processed
fftw2libs = ['fftw2xc', 'fftw2xf']
fftw3libs = ['fftw3xc', 'fftw3xf']
cdftlibs = ['fftw2x_cdft']
if LooseVersion(self.version) >= LooseVersion('10.3'):
cdftlibs.append('fftw3x_cdft')
interfacedir = os.path.join(self.installdir, intsubdir)
try:
os.chdir(interfacedir)
self.log.info("Changed to interfaces directory %s" % interfacedir)
except OSError, err:
raise EasyBuildError("Can't change to interfaces directory %s", interfacedir)
compopt = None
# determine whether we're using a non-Intel GCC-based or PGI-based toolchain
# can't use toolchain.comp_family, because of dummy toolchain used when installing imkl
if get_software_root('icc') is None:
# check for PGI first, since there's a GCC underneath PGI too...
if get_software_root('PGI'):
compopt = 'compiler=pgi'
elif get_software_root('GCC'):
compopt = 'compiler=gnu'
else:
raise EasyBuildError("Not using Intel/GCC/PGI compilers, don't know how to build wrapper libs")
else:
compopt = 'compiler=intel'
# patch makefiles for cdft wrappers when PGI is used as compiler
if get_software_root('PGI'):
regex_subs = [
# pgi should be considered as a valid compiler
("intel gnu", "intel gnu pgi"),
# transform 'gnu' case to 'pgi' case
(r"ifeq \(\$\(compiler\),gnu\)", "ifeq ($(compiler),pgi)"),
('=gcc', '=pgcc'),
# correct flag to use C99 standard
('-std=c99', '-c99'),
# -Wall and -Werror are not valid options for pgcc, no close equivalent
('-Wall', ''),
('-Werror', ''),
]
for lib in cdftlibs:
apply_regex_substitutions(os.path.join(interfacedir, lib, 'makefile'), regex_subs)
for lib in fftw2libs + fftw3libs + cdftlibs:
buildopts = [compopt]
if lib in fftw3libs:
buildopts.append('install_to=$INSTALL_DIR')
elif lib in cdftlibs:
mpi_spec = None
# check whether MPI_FAMILY constant is defined, so mpi_family() can be used
if hasattr(self.toolchain, 'MPI_FAMILY') and self.toolchain.MPI_FAMILY is not None:
mpi_spec_by_fam = {
toolchain.MPICH: 'mpich2', # MPICH is MPICH v3.x, which is MPICH2 compatible
toolchain.MPICH2: 'mpich2',
toolchain.MVAPICH2: 'mpich2',
toolchain.OPENMPI: 'openmpi',
}
mpi_fam = self.toolchain.mpi_family()
mpi_spec = mpi_spec_by_fam.get(mpi_fam)
self.log.debug("Determined MPI specification based on MPI toolchain component: %s" % mpi_spec)
else:
# can't use toolchain.mpi_family, because of dummy toolchain
if get_software_root('MPICH2') or get_software_root('MVAPICH2'):
mpi_spec = 'mpich2'
elif get_software_root('OpenMPI'):
mpi_spec = 'openmpi'
self.log.debug("Determined MPI specification based on loaded MPI module: %s" % mpi_spec)
if mpi_spec is not None:
buildopts.append('mpi=%s' % mpi_spec)
precflags = ['']
if lib.startswith('fftw2x') and not self.cfg['m32']:
# build both single and double precision variants
precflags = ['PRECISION=MKL_DOUBLE', 'PRECISION=MKL_SINGLE']
intflags = ['']
if lib in cdftlibs and not self.cfg['m32']:
# build both 32-bit and 64-bit interfaces
intflags = ['interface=lp64', 'interface=ilp64']
allopts = [list(opts) for opts in itertools.product(intflags, precflags)]
for flags, extraopts in itertools.product(['', '-fPIC'], allopts):
tup = (lib, flags, buildopts, extraopts)
self.log.debug("Building lib %s with: flags %s, buildopts %s, extraopts %s" % tup)
tmpbuild = tempfile.mkdtemp(dir=self.builddir)
self.log.debug("Created temporary directory %s" % tmpbuild)
# always set INSTALL_DIR, SPEC_OPT, COPTS and CFLAGS
# fftw2x(c|f): use $INSTALL_DIR, $CFLAGS and $COPTS
# fftw3x(c|f): use $CFLAGS
# fftw*cdft: use $INSTALL_DIR and $SPEC_OPT
env.setvar('INSTALL_DIR', tmpbuild)
env.setvar('SPEC_OPT', flags)
env.setvar('COPTS', flags)
env.setvar('CFLAGS', flags)
try:
intdir = os.path.join(interfacedir, lib)
os.chdir(intdir)
self.log.info("Changed to interface %s directory %s" % (lib, intdir))
except OSError, err:
raise EasyBuildError("Can't change to interface %s directory %s: %s", lib, intdir, err)
fullcmd = "%s %s" % (cmd, ' '.join(buildopts + extraopts))
res = run_cmd(fullcmd, log_all=True, simple=True)
if not res:
raise EasyBuildError("Building %s (flags: %s, fullcmd: %s) failed", lib, flags, fullcmd)
for fn in os.listdir(tmpbuild):
src = os.path.join(tmpbuild, fn)
if flags == '-fPIC':
# add _pic to filename
ff = fn.split('.')
fn = '.'.join(ff[:-1]) + '_pic.' + ff[-1]
dest = os.path.join(self.installdir, libsubdir, fn)
try:
if os.path.isfile(src):
shutil.move(src, dest)
self.log.info("Moved %s to %s" % (src, dest))
except OSError, err:
raise EasyBuildError("Failed to move %s to %s: %s", src, dest, err)
rmtree2(tmpbuild)
0
Example 118
Project: rail Source File: rna_installer.py
def install(self):
""" Installs Rail-RNA and all its dependencies. """
if not self.no_dependencies and self.curl_exe is None:
self.curl_exe = which('curl')
if self.curl_exe is None:
print_to_screen('Rail-RNA\'s installer requires Curl if '
'dependencies are to be installed. '
'Download it at '
'http://curl.haxx.se/download.html and use '
'--curl to specify its path, or '
'disable installing dependencies with '
'--no-dependencies.')
sys.exit(1)
if self._yes_no_query(
'Rail-RNA can be installed for all users or just the '
'current user.\n * Install for all users?',
answer=(None if not self.yes else (self.yes and not self.me))
):
if os.getuid():
print_to_screen('Rerun with sudo privileges to install '
'for all users.')
sys.exit(0)
install_dir = '/usr/local'
self.local = False
else:
install_dir = os.path.abspath(os.path.expanduser('~/'))
self.local = True
bin_dir = os.path.join(install_dir, 'bin')
rail_exe = os.path.join(bin_dir, 'rail-rna')
if self.install_dir is None:
self.final_install_dir = os.path.join(install_dir, 'raildotbio')
else:
# User specified an installation directory
self.final_install_dir = self.install_dir
# Install in a temporary directory first, then move to final dest
temp_install_dir = tempfile.mkdtemp()
register_cleanup(remove_temporary_directories, [temp_install_dir])
if os.path.exists(self.final_install_dir):
if self._yes_no_query(
('The installation path {dir} already exists.\n '
'* Overwrite {dir}?').format(dir=self.final_install_dir)
):
try:
shutil.rmtree(self.final_install_dir)
except OSError:
# Handle this later if directory creation fails
pass
try:
os.remove(self.final_install_dir)
except OSError:
pass
else:
print_to_screen(
'Specify a different installation directory with '
'--install-dir.'
)
sys.exit(0)
self._print_to_screen_and_log('[Installing] Extracting Rail-RNA...',
newline=False,
carriage_return=True)
try:
os.makedirs(self.final_install_dir)
except OSError as e:
self._print_to_screen_and_log(
('Problem encountered trying to create '
'directory %s for installation. May need '
'sudo permissions.') % self.final_install_dir
)
self._bail()
else:
# So it's possible to move temp installation dir there
os.rmdir(self.final_install_dir)
pass
with cd(temp_install_dir):
with zipfile.ZipFile(self.zip_name) as zip_object:
zip_object.extractall('./rail-rna')
if not self.no_dependencies:
self._grab_and_explode(self.depends['pypy'], 'PyPy')
self._grab_and_explode(self.depends['sra_tools'], 'SRA Tools')
if not self.prep_dependencies:
self._grab_and_explode(self.depends['bowtie1'], 'Bowtie 1')
self._grab_and_explode(self.depends['bowtie2'], 'Bowtie 2')
self._grab_and_explode(self.depends['bedgraphtobigwig'],
'BedGraphToBigWig')
self._grab_and_explode(self.depends['samtools'],
'SAMTools')
if not self.prep_dependencies and not self.no_dependencies:
# Have to make SAMTools (annoying; maybe change this)
samtools_dir = os.path.join(temp_install_dir,
self.depends['samtools'][0].rpartition('/')[2][:-8]
)
with cd(samtools_dir):
'''Make sure unistd.h is #included cram_io.c ... it's some
bug in some SAMTools that prevents compilation on
langmead-fs1, which may be a general problem with
portability. See https://github.com/samtools/htslib/commit/
0ec5202de5691b27917ce828a9d24c9c729a9b81'''
cram_io_file = os.path.join(glob.glob('./htslib-*')[0],
'cram', 'cram_io.c')
with open(cram_io_file) as cram_io_stream:
all_cram_io = cram_io_stream.read()
if '<unistd.h>' not in all_cram_io:
with open(cram_io_file, 'w') as cram_io_out_stream:
cram_io_out_stream.write(all_cram_io.replace(
'#include <string.h>',
'#include <string.h>\n#include <unistd.h>'
))
makefile = 'Makefile'
with open(makefile) as makefile_stream:
all_makefile = makefile_stream.read()
with open(makefile, 'w') as makefile_stream:
makefile_stream.write(
all_makefile.replace(
'-D_CURSES_LIB=1', '-D_CURSES_LIB=0'
).replace('LIBCURSES=','#LIBCURSES=')
)
# Make on all but one cylinder
thread_count = max(1, multiprocessing.cpu_count() - 1)
samtools_command = ['make', '-j%d' % thread_count]
self._print_to_screen_and_log(
'[Installing] Making SAMTools...',
newline=False,
carriage_return=True
)
try:
subprocess.check_output(samtools_command,
stderr=self.log_stream)
except subprocess.CalledProcessError as e:
self._print_to_screen_and_log(
('Error encountered making SAMTools; exit '
'code was %d; command invoked was "%s".') %
(e.returncode, ' '.join(samtools_command))
)
self._bail()
samtools = os.path.join(self.final_install_dir,
self.depends['samtools'][0].rpartition('/')[2][:-8],
'samtools')
bowtie1_base = '-'.join(
self.depends['bowtie1'][0].rpartition(
'/'
)[2].split('-')[:2]
)
bowtie1 = os.path.join(self.final_install_dir,
bowtie1_base, 'bowtie')
bowtie1_build = os.path.join(self.final_install_dir,
bowtie1_base, 'bowtie-build')
bowtie2_base = '-'.join(
self.depends['bowtie2'][0].rpartition(
'/'
)[2].split('-')[:2]
)
bowtie2 = os.path.join(self.final_install_dir,
bowtie2_base, 'bowtie2')
bowtie2_build = os.path.join(self.final_install_dir,
bowtie2_base, 'bowtie2-build')
bedgraphtobigwig = os.path.join(
self.final_install_dir,
'bedGraphToBigWig'
)
else:
bowtie1 = bowtie1_build = bowtie2 = bowtie2_build \
= bedgraphtobigwig = samtools = 'None'
if self.no_dependencies:
pypy = 'None'
fastq_dump = 'None'
vdb_config = 'None'
else:
pypy = os.path.join(self.final_install_dir,
self.depends['pypy'][0].rpartition(
'/'
)[2][:-8], 'bin', 'pypy'
)
fastq_dump = os.path.join(self.final_install_dir,
self.depends['sra_tools'][0].rpartition(
'/'
)[2][:-7], 'bin', 'fastq-dump'
)
vdb_config = os.path.join(self.final_install_dir,
self.depends['sra_tools'][0].rpartition(
'/'
)[2][:-7], 'bin', 'vdb-config'
)
# Write paths to exe_paths
with open(
os.path.join(temp_install_dir, 'rail-rna',
'exe_paths.py'), 'w'
) as exe_paths_stream:
print >>exe_paths_stream, (
"""\"""
exe_paths.py
Part of Rail-RNA
Defines default paths of Rail-RNA's executable dependencies. Set a given
variable equal to None if the default path should be in PATH.
\"""
pypy = {pypy}
aws = None
curl = None
sort = None
bowtie1 = {bowtie1}
bowtie1_build = {bowtie1_build}
bowtie2 = {bowtie2}
bowtie2_build = {bowtie2_build}
samtools = {samtools}
bedgraphtobigwig = {bedgraphtobigwig}
fastq_dump = {fastq_dump}
vdb_config = {vdb_config}
"""
).format(pypy=self._quote(pypy), bowtie1=self._quote(bowtie1),
bowtie1_build=self._quote(bowtie1_build),
bowtie2=self._quote(bowtie2),
bowtie2_build=self._quote(bowtie2_build),
samtools=self._quote(samtools),
bedgraphtobigwig=self._quote(bedgraphtobigwig),
fastq_dump=self._quote(fastq_dump),
vdb_config=self._quote(vdb_config))
# Move to final directory
try:
shutil.move(temp_install_dir, self.final_install_dir)
except Exception as e:
self._print_to_screen_and_log(('Problem "%s" encountered moving '
'temporary installation directory '
'%s to final destination %s.') % (
e,
temp_install_dir,
self.final_install_dir
))
self._bail()
# Create shell-script executable
try:
os.makedirs(bin_dir)
except Exception as e:
if not os.path.isdir(bin_dir):
self._print_to_screen_and_log(('Problem "%s" encountered '
'creating directory %s.') % (
e,
bin_dir
)
)
self._bail()
install_dir_replacement = os.path.join(
self.final_install_dir, 'rail-rna'
)
with open(rail_exe, 'w') as rail_exe_stream:
print >>rail_exe_stream, (
"""#!/usr/bin/env bash
{python_executable} {install_dir} $@
"""
).format(python_executable=sys.executable,
install_dir=install_dir_replacement)
if self.local:
'''Have to add Rail to PATH. Do this in bashrc and bash_profile
contingent on whether it's present already because of
inconsistent behavior across Mac OS and Linux distros.'''
to_print = (
"""
## Rail-RNA additions
if [ -d "{bin_dir}" ] && [[ ":$PATH:" != *":{bin_dir}:"* ]]; then
PATH="${{PATH:+"$PATH:"}}{bin_dir}"
fi
export RAILDOTBIO={install_dir}
## End Rail-RNA additions
"""
).format(bin_dir=bin_dir,
install_dir=install_dir_replacement)
else:
# Just define raildotbio directory
to_print = (
"""
## Rail-RNA addition
export RAILDOTBIO={install_dir}
## End Rail-RNA addition
"""
).format(bin_dir=bin_dir,
install_dir=install_dir_replacement)
import mmap
bashrc = os.path.expanduser('~/.bashrc')
bash_profile = os.path.expanduser('~/.bash_profile')
try:
with open(bashrc) as bashrc_stream:
mmapped = mmap.mmap(bashrc_stream.fileno(), 0,
access=mmap.ACCESS_READ)
if mmapped.find(to_print) == -1:
print_to_bashrc = True
else:
print_to_bashrc = False
except (IOError, ValueError):
# No file
print_to_bashrc = True
try:
with open(bash_profile) as bash_profile_stream:
mmapped = mmap.mmap(bash_profile_stream.fileno(), 0,
access=mmap.ACCESS_READ)
if mmapped.find(to_print) == -1:
print_to_bash_profile = True
else:
print_to_bash_profile = False
except (IOError, ValueError):
# No file
print_to_bash_profile = True
if print_to_bashrc:
with open(bashrc, 'a') as bashrc_stream:
print >>bashrc_stream, to_print
if print_to_bash_profile:
with open(bash_profile, 'a') as bash_profile_stream:
print >>bash_profile_stream, to_print
# Set 755 permissions across Rail's dirs and 644 across files
dir_command = ['find', self.final_install_dir, '-type', 'd',
'-exec', 'chmod', '755', '{}', ';']
file_command = ['find', self.final_install_dir, '-type', 'f',
'-exec', 'chmod', '644', '{}', ';']
try:
subprocess.check_output(dir_command,
stderr=self.log_stream)
except subprocess.CalledProcessError as e:
self._print_to_screen_and_log(
('Error encountered changing directory '
'permissions; exit code was %d; command invoked '
'was "%s".') %
(e.returncode, ' '.join(dir_command))
)
self._bail()
try:
subprocess.check_output(file_command,
stderr=self.log_stream)
except subprocess.CalledProcessError as e:
self._print_to_screen_and_log(
('Error encountered changing file '
'permissions; exit code was %d; command invoked '
'was "%s".') %
(e.returncode, ' '.join(file_command))
)
self._bail()
# Go back and set 755 permissions for executables
os.chmod(rail_exe, 0755)
0
Example 119
Project: C-PAC Source File: config_window.py
def test_sublist(self, sublist):
'''
Instance method to test a subject list for errors
Parameters
----------
self : MainFrame (wx.Frame object)
the method is aware of the instance as self
sublist : list (dict)
a C-PAC-formatted subject list (yaml list of dictionaries)
Returns
-------
pass_flg : boolean
flag which indicates whether the subject list passed testing
'''
# Import packages
import os
import tempfile
import nibabel as nb
from CPAC.utils.datasource import check_for_s3
# Init variables
err_str = ''
err_msg = ''
not_found_flg = False
bad_dim_flg = False
pass_flg = False
checked_s3 = False
s3_str = 's3://'
# Check to ensure the user is providing an actual subject
# list and not some other kind of file
try:
subInfo = sublist[0]
except:
msg = 'ERROR: Subject list file not in proper format - ' \
'check if you loaded the correct file? \n\n'\
'Error name: config_window_0001'
errDlg4 = wx.MessageDialog(self, msg, 'Subject List Error',
wx.OK | wx.ICON_ERROR)
errDlg4.ShowModal()
errDlg4.Destroy()
# Raise Exception
raise Exception
# Another check to ensure the actual subject list was generated
# properly and that it will work
if 'subject_id' not in subInfo:
msg = 'ERROR: Subject list file not in proper format - '\
'check if you loaded the correct file? \n\n'\
'Error name: config_window_0002'
errDlg3 = wx.MessageDialog(self, msg , 'Subject List Error',
wx.OK | wx.ICON_ERROR)
errDlg3.ShowModal()
errDlg3.Destroy()
# Raise Exception
raise Exception
# Iterate and test each subject's files
for sub in sublist:
anat_file = sub['anat']
func_files = sub['rest']
if anat_file.lower().startswith(s3_str):
if checked_s3:
break
dl_dir = tempfile.mkdtemp()
creds_path = sub['creds_path']
anat_file = check_for_s3(anat_file, creds_path, dl_dir=dl_dir)
# Check if anatomical file exists
if os.path.exists(anat_file):
img = nb.load(anat_file)
hdr = img.get_header()
dims = hdr.get_data_shape()
# Check to make sure it has the proper dimensions
if len(dims) != 3:
bad_dim_flg = True
err_str_suffix = 'Anat file not 3-dimensional: %s\n' \
% anat_file
err_str = err_str + err_str_suffix
# Anat file doesnt exist
else:
not_found_flg = True
err_str_suffix = 'File not found: %s\n' % anat_file
err_str = err_str + err_str_suffix
# For each functional file
for func_file in func_files.values():
if func_file.lower().startswith(s3_str):
dl_dir = tempfile.mkdtemp()
creds_path = sub['creds_path']
func_file = check_for_s3(func_file, creds_path, dl_dir=dl_dir,img_type='func')
checked_s3 = True
# Check if functional file exists
if os.path.exists(func_file):
img = nb.load(func_file)
hdr = img.get_header()
dims = hdr.get_data_shape()
# Check to make sure it has the proper dimensions
if len(dims) != 4:
bad_dim_flg = True
err_str_suffix = 'Func file not 4-dimensional: %s\n' \
% func_file
err_str = err_str + err_str_suffix
# Functional file doesnt exist
else:
not_found_flg = True
err_str_suffix = 'File not found: %s\n' % func_file
err_str = err_str + err_str_suffix
# If we're just checking s3 files, remove the temporarily downloaded
if checked_s3:
try:
os.remove(anat_file)
os.remove(func_file)
except:
pass
break
# Check flags for error message
if not_found_flg:
err_msg = 'One or more of your input files are missing.\n'
if bad_dim_flg:
err_msg = err_msg + 'One or more of your input images have '\
'improper dimensionality\n'
# If err_msg was populated, display in window
if err_msg:
err_msg = 'ERROR: ' + err_msg + \
'See terminal output for more details'
errDlgFileTest = wx.MessageDialog(self,
err_msg,
'Pipeline Not Ready',
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
raise Exception(err_str)
else:
pass_flg = True
# Return the flag
return pass_flg
0
Example 120
Project: cmddocs Source File: conftest.py
@pytest.fixture(scope="module")
def demoenv():
"""
Initializes a test environment to make
sure all tests can be applied properly
"""
# init demo dir
d = tempfile.mkdtemp(dir="/tmp/", prefix="demodocs-")
doc = """
# Header 1
This is some test content
hopefully no one will ever read this
## Header 2
py.test rocks...
### Header 3
there will be some codeblock
```
foo=$(echo foo)
echo foo | sed -i 's#foo#bar#'
```
#### Header 4
Test Test test
##### Header 5
This is some test content
hopefully no one will ever read this
"""
# create test dirs
for x in range(1, 4):
x = str(x)
os.mkdir(d + "/dir" + x)
# create test files
for x in range(1, 6):
x = str(x)
f = open(d + "/testfile" + x + ".md", "ab+")
f.write("Test " + x)
f.close()
for x in range(1, 4):
x = str(x)
f = open(d + "/dir1/testfile" + x + ".md", "ab+")
f.write(doc)
f.close()
repo = git.Repo.init(d)
repo.git.config("user.email", "[email protected]")
repo.git.config("user.name", "Charlie Root")
repo.git.add(".")
repo.git.commit(m=" init")
# create new content
for x in range(1, 4):
x = str(x)
f = open(d + "/dir2/testfile" + x + ".md", "ab+")
f.write(doc)
f.close()
# create 2nd commit
repo.git.add(".")
repo.git.commit(m=" 2nd commit")
# create new content
for x in range(1, 4):
x = str(x)
f = open(d + "/dir3/testfile" + x + ".md", "ab+")
f.write(doc)
f.close()
# create 3rd commit
repo.git.add(".")
repo.git.commit(m=" 3rd commit")
# create test config
confpath = tempfile.mkdtemp(dir="/tmp/", prefix="demodocsconf-")
config = open(confpath + "/config", "ab+")
# initialize test config
content = """
[General]
Datadir = %s
Default_Commit_Message = small changes
Excludedir = .git/
Editor = /usr/local/bin/vim
Pager = /usr/bin/less
Prompt = cmddocs>
Promptcolor = 37
Intro_Message = cmddocs - press ? for help
Mail = [email protected]
Default_Extension = md
[Colors]
Header12 = 37
Header345 = 37
Codeblock = 92
""" % d
config.write(content)
config.close()
c = config.name
print(d)
return c, d
0
Example 121
Project: ChocolateyPackages Source File: package_manager.py
def install_package(self, package_name):
"""
Downloads and installs (or upgrades) a package
Uses the self.list_available_packages() method to determine where to
retrieve the package file from.
The install process consists of:
1. Finding the package
2. Downloading the .sublime-package/.zip file
3. Extracting the package file
4. Showing install/upgrade messaging
5. Submitting usage info
6. Recording that the package is installed
:param package_name:
The package to download and install
:return: bool if the package was successfully installed
"""
packages = self.list_available_packages()
is_available = package_name in list(packages.keys())
is_unavailable = package_name in self.settings.get('unavailable_packages', [])
if is_unavailable and not is_available:
console_write(u'The package "%s" is not available on this platform.' % package_name, True)
return False
if not is_available:
show_error(u'The package specified, %s, is not available' % package_name)
return False
url = packages[package_name]['download']['url']
package_filename = package_name + '.sublime-package'
tmp_dir = tempfile.mkdtemp()
try:
# This is refers to the zipfile later on, so we define it here so we can
# close the zip file if set during the finally clause
package_zip = None
tmp_package_path = os.path.join(tmp_dir, package_filename)
unpacked_package_dir = self.get_package_dir(package_name)
package_path = os.path.join(sublime.installed_packages_path(),
package_filename)
pristine_package_path = os.path.join(os.path.dirname(
sublime.packages_path()), 'Pristine Packages', package_filename)
if os.path.exists(os.path.join(unpacked_package_dir, '.git')):
if self.settings.get('ignore_vcs_packages'):
show_error(u'Skipping git package %s since the setting ignore_vcs_packages is set to true' % package_name)
return False
return GitUpgrader(self.settings['git_binary'],
self.settings['git_update_command'], unpacked_package_dir,
self.settings['cache_length'], self.settings['debug']).run()
elif os.path.exists(os.path.join(unpacked_package_dir, '.hg')):
if self.settings.get('ignore_vcs_packages'):
show_error(u'Skipping hg package %s since the setting ignore_vcs_packages is set to true' % package_name)
return False
return HgUpgrader(self.settings['hg_binary'],
self.settings['hg_update_command'], unpacked_package_dir,
self.settings['cache_length'], self.settings['debug']).run()
old_version = self.get_metadata(package_name).get('version')
is_upgrade = old_version != None
# Download the sublime-package or zip file
try:
with downloader(url, self.settings) as manager:
package_bytes = manager.fetch(url, 'Error downloading package.')
except (DownloaderException) as e:
console_write(e, True)
show_error(u'Unable to download %s. Please view the console for more details.' % package_name)
return False
with open_compat(tmp_package_path, "wb") as package_file:
package_file.write(package_bytes)
# Try to open it as a zip file
try:
package_zip = zipfile.ZipFile(tmp_package_path, 'r')
except (zipfile.BadZipfile):
show_error(u'An error occurred while trying to unzip the package file for %s. Please try installing the package again.' % package_name)
return False
# Scan through the root level of the zip file to gather some info
root_level_paths = []
last_path = None
for path in package_zip.namelist():
try:
if not isinstance(path, str_cls):
path = path.decode('utf-8', 'strict')
except (UnicodeDecodeError):
console_write(u'One or more of the zip file entries in %s is not encoded using UTF-8, aborting' % package_name, True)
return False
last_path = path
if path.find('/') in [len(path) - 1, -1]:
root_level_paths.append(path)
# Make sure there are no paths that look like security vulnerabilities
if path[0] == '/' or path.find('../') != -1 or path.find('..\\') != -1:
show_error(u'The package specified, %s, contains files outside of the package dir and cannot be safely installed.' % package_name)
return False
if last_path and len(root_level_paths) == 0:
root_level_paths.append(last_path[0:last_path.find('/') + 1])
# If there is only a single directory at the top leve, the file
# is most likely a zip from BitBucket or GitHub and we need
# to skip the top-level dir when extracting
skip_root_dir = len(root_level_paths) == 1 and \
root_level_paths[0].endswith('/')
no_package_file_zip_path = '.no-sublime-package'
if skip_root_dir:
no_package_file_zip_path = root_level_paths[0] + no_package_file_zip_path
# If we should extract unpacked or as a .sublime-package file
unpack = True
# By default, ST3 prefers .sublime-package files since this allows
# overriding files in the Packages/{package_name}/ folder
if int(sublime.version()) >= 3000:
unpack = False
# If the package maintainer doesn't want a .sublime-package
try:
package_zip.getinfo(no_package_file_zip_path)
unpack = True
except (KeyError):
pass
# If we already have a package-metadata.json file in
# Packages/{package_name}/, the only way to successfully upgrade
# will be to unpack
unpacked_metadata_file = os.path.join(unpacked_package_dir,
'package-metadata.json')
if os.path.exists(unpacked_metadata_file):
unpack = True
# If we determined it should be unpacked, we extract directly
# into the Packages/{package_name}/ folder
if unpack:
self.backup_package_dir(package_name)
package_dir = unpacked_package_dir
# Otherwise we go into a temp dir since we will be creating a
# new .sublime-package file later
else:
tmp_working_dir = os.path.join(tmp_dir, 'working')
os.mkdir(tmp_working_dir)
package_dir = tmp_working_dir
package_metadata_file = os.path.join(package_dir,
'package-metadata.json')
if not os.path.exists(package_dir):
os.mkdir(package_dir)
os.chdir(package_dir)
# Here we don't use .extractall() since it was having issues on OS X
overwrite_failed = False
extracted_paths = []
for path in package_zip.namelist():
dest = path
try:
if not isinstance(dest, str_cls):
dest = dest.decode('utf-8', 'strict')
except (UnicodeDecodeError):
console_write(u'One or more of the zip file entries in %s is not encoded using UTF-8, aborting' % package_name, True)
return False
if os.name == 'nt':
regex = ':|\*|\?|"|<|>|\|'
if re.search(regex, dest) != None:
console_write(u'Skipping file from package named %s due to an invalid filename' % package_name, True)
continue
# If there was only a single directory in the package, we remove
# that folder name from the paths as we extract entries
if skip_root_dir:
dest = dest[len(root_level_paths[0]):]
if os.name == 'nt':
dest = dest.replace('/', '\\')
else:
dest = dest.replace('\\', '/')
dest = os.path.join(package_dir, dest)
def add_extracted_dirs(dir_):
while dir_ not in extracted_paths:
extracted_paths.append(dir_)
dir_ = os.path.dirname(dir_)
if dir_ == package_dir:
break
if path.endswith('/'):
if not os.path.exists(dest):
os.makedirs(dest)
add_extracted_dirs(dest)
else:
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
add_extracted_dirs(dest_dir)
extracted_paths.append(dest)
try:
open_compat(dest, 'wb').write(package_zip.read(path))
except (IOError) as e:
message = unicode_from_os(e)
if re.search('[Ee]rrno 13', message):
overwrite_failed = True
break
console_write(u'Skipping file from package named %s due to an invalid filename' % package_name, True)
except (UnicodeDecodeError):
console_write(u'Skipping file from package named %s due to an invalid filename' % package_name, True)
package_zip.close()
package_zip = None
# If upgrading failed, queue the package to upgrade upon next start
if overwrite_failed:
reinstall_file = os.path.join(package_dir, 'package-control.reinstall')
open_compat(reinstall_file, 'w').close()
# Don't delete the metadata file, that way we have it
# when the reinstall happens, and the appropriate
# usage info can be sent back to the server
clear_directory(package_dir, [reinstall_file, package_metadata_file])
show_error(u'An error occurred while trying to upgrade %s. Please restart Sublime Text to finish the upgrade.' % package_name)
return False
# Here we clean out any files that were not just overwritten. It is ok
# if there is an error removing a file. The next time there is an
# upgrade, it should be cleaned out successfully then.
clear_directory(package_dir, extracted_paths)
self.print_messages(package_name, package_dir, is_upgrade, old_version)
with open_compat(package_metadata_file, 'w') as f:
metadata = {
"version": packages[package_name]['download']['version'],
"url": packages[package_name]['homepage'],
"description": packages[package_name]['description']
}
json.dump(metadata, f)
# Submit install and upgrade info
if is_upgrade:
params = {
'package': package_name,
'operation': 'upgrade',
'version': packages[package_name]['download']['version'],
'old_version': old_version
}
else:
params = {
'package': package_name,
'operation': 'install',
'version': packages[package_name]['download']['version']
}
self.record_usage(params)
# Record the install in the settings file so that you can move
# settings across computers and have the same packages installed
def save_package():
settings = sublime.load_settings('Package Control.sublime-settings')
installed_packages = settings.get('installed_packages', [])
if not installed_packages:
installed_packages = []
installed_packages.append(package_name)
installed_packages = list(set(installed_packages))
installed_packages = sorted(installed_packages,
key=lambda s: s.lower())
settings.set('installed_packages', installed_packages)
sublime.save_settings('Package Control.sublime-settings')
sublime.set_timeout(save_package, 1)
# If we didn't extract directly into the Packages/{package_name}/
# folder, we need to create a .sublime-package file and install it
if not unpack:
try:
# Remove the downloaded file since we are going to overwrite it
os.remove(tmp_package_path)
package_zip = zipfile.ZipFile(tmp_package_path, "w",
compression=zipfile.ZIP_DEFLATED)
except (OSError, IOError) as e:
show_error(u'An error occurred creating the package file %s in %s.\n\n%s' % (
package_filename, tmp_dir, unicode_from_os(e)))
return False
package_dir_regex = re.compile('^' + re.escape(package_dir))
for root, dirs, files in os.walk(package_dir):
paths = dirs
paths.extend(files)
for path in paths:
full_path = os.path.join(root, path)
relative_path = re.sub(package_dir_regex, '', full_path)
if os.path.isdir(full_path):
continue
package_zip.write(full_path, relative_path)
package_zip.close()
package_zip = None
if os.path.exists(package_path):
os.remove(package_path)
shutil.move(tmp_package_path, package_path)
# We have to remove the pristine package too or else Sublime Text 2
# will silently delete the package
if os.path.exists(pristine_package_path):
os.remove(pristine_package_path)
os.chdir(sublime.packages_path())
return True
finally:
# We need to make sure the zipfile is closed to
# help prevent permissions errors on Windows
if package_zip:
package_zip.close()
# Try to remove the tmp dir after a second to make sure
# a virus scanner is holding a reference to the zipfile
# after we close it.
def remove_tmp_dir():
try:
shutil.rmtree(tmp_dir)
except (PermissionError):
# If we can't remove the tmp dir, don't let an uncaught exception
# fall through and break the install process
pass
sublime.set_timeout(remove_tmp_dir, 1000)
0
Example 122
Project: LS-BSR Source File: ls_bsr.py
def main(directory,id,filter,processors,genes,cluster_method,blast,length,
max_plog,min_hlog,f_plog,keep,filter_peps,filter_scaffolds,prefix,temp_dir,debug):
start_dir = os.getcwd()
ap=os.path.abspath("%s" % start_dir)
dir_path=os.path.abspath("%s" % directory)
logging.logPrint("Testing paths of dependencies")
if blast=="blastn" or blast=="tblastn":
ab = subprocess.call(['which', 'blastn'])
if ab == 0:
print "citation: Altschul SF, Madden TL, Schaffer AA, Zhang J, Zhang Z, Miller W, and Lipman DJ. 1997. Gapped BLAST and PSI-BLAST: a new generation of protein database search programs. Nucleic Acids Res 25:3389-3402"
else:
print "blastn isn't in your path, but needs to be!"
sys.exit()
if "NULL" in temp_dir:
fastadir = tempfile.mkdtemp()
else:
fastadir = os.path.abspath("%s" % temp_dir)
if os.path.exists('%s' % temp_dir):
print "old run directory exists in your genomes directory (%s). Delete and run again" % temp_dir
sys.exit()
else:
os.makedirs('%s' % temp_dir)
for infile in glob.glob(os.path.join(dir_path, '*.fasta')):
name=get_seq_name(infile)
os.link("%s" % infile, "%s/%s.new" % (fastadir,name))
if "null" in genes:
rc = subprocess.call(['which', 'prodigal'])
if rc == 0:
pass
else:
print "prodigal is not in your path, but needs to be!"
sys.exit()
print "citation: Hyatt D, Chen GL, Locascio PF, Land ML, Larimer FW, and Hauser LJ. 2010. Prodigal: prokaryotic gene recognition and translation initiation site identification. BMC Bioinformatics 11:119"
if "usearch" in cluster_method:
print "citation: Edgar RC. 2010. Search and clustering orders of magnitude faster than BLAST. Bioinformatics 26:2460-2461"
elif "cd-hit" in cluster_method:
print "citation: Li, W., Godzik, A. 2006. Cd-hit: a fast program for clustering and comparing large sets of protein or nuceltodie sequences. Bioinformatics 22(13):1658-1659"
elif "vsearch" in cluster_method:
print "citation: Rognes, T., Flouri, T., Nichols, B., Qunice, C., Mahe, Frederic. 2016. VSEARCH: a versatile open source tool for metagenomics. PeerJ Preprints. DOI: https://doi.org/10.7287/peerj.preprints.2409v1"
if blast=="blat":
ac = subprocess.call(['which', 'blat'])
if ac == 0:
print "citation: W.James Kent. 2002. BLAT - The BLAST-Like Alignment Tool. Genome Research 12:656-664"
else:
print "You have requested blat, but it is not in your PATH"
sys.exit()
logging.logPrint("predicting genes with Prodigal")
predict_genes(fastadir, processors)
logging.logPrint("Prodigal done")
"""This function produces locus tags"""
genbank_hits = process_genbank_files(dir_path)
if genbank_hits == None or len(genbank_hits) == 0:
os.system("cat *genes.seqs > all_gene_seqs.out")
if filter_scaffolds == "T":
filter_scaffolds("all_gene_seqs.out")
os.system("mv tmp.out all_gene_seqs.out")
else:
pass
else:
logging.logPrint("Converting genbank files")
"""First combine all of the prodigal files into one file"""
os.system("cat *genes.seqs > all_gene_seqs.out")
if filter_scaffolds == "T":
filter_scaffolds("all_gene_seqs.out")
os.system("mv tmp.out all_gene_seqs.out")
else:
pass
"""This combines the locus tags with the Prodigal prediction"""
os.system("cat *locus_tags.fasta all_gene_seqs.out > tmp.out")
os.system("mv tmp.out all_gene_seqs.out")
"""I also need to convert the GenBank file to a FASTA file"""
for hit in genbank_hits:
reduced_hit = hit.replace(".gbk","")
SeqIO.convert("%s/%s" % (dir_path, hit), "genbank", "%s.fasta.new" % reduced_hit, "fasta")
if "NULL" in cluster_method:
print "Clustering chosen, but no method selected...exiting"
sys.exit()
elif "usearch" in cluster_method:
ac = subprocess.call(['which', 'usearch'])
if ac == 0:
os.system("mkdir split_files")
os.system("cp all_gene_seqs.out split_files/all_sorted.txt")
os.chdir("split_files/")
logging.logPrint("Splitting FASTA file for use with USEARCH")
split_files("all_sorted.txt")
logging.logPrint("clustering with USEARCH at an ID of %s" % id)
run_usearch(id)
os.system("cat *.usearch.out > all_sorted.txt")
os.system("mv all_sorted.txt %s" % fastadir)
os.chdir("%s" % fastadir)
uclust_cluster(id)
logging.logPrint("USEARCH clustering finished")
else:
print "usearch must be in your path as usearch...exiting"
sys.exit()
elif "vsearch" in cluster_method:
ac = subprocess.call(['which', 'vsearch'])
if ac == 0:
logging.logPrint("clustering with VSEARCH at an ID of %s, using %s processors" % (id,processors))
run_vsearch(id, processors)
os.system("mv vsearch.out consensus.fasta")
logging.logPrint("VSEARCH clustering finished")
else:
print "vsearch must be in your path as vsearch...exiting"
sys.exit()
elif "cd-hit" in cluster_method:
ac = subprocess.call(['which', 'cd-hit-est'])
if ac == 0:
logging.logPrint("clustering with cd-hit at an ID of %s, using %s processors" % (id,processors))
subprocess.check_call("cd-hit-est -i all_gene_seqs.out -o consensus.fasta -M 0 -T %s -c %s > /dev/null 2>&1" % (processors, id), shell=True)
else:
print "cd-hit must be in your path as cd-hit-est...exiting"
sys.exit()
"""need to check for dups here"""
dup_ids = test_duplicate_header_ids("consensus.fasta")
if dup_ids == "True":
pass
elif dup_ids == "False":
print "duplicate headers identified, renaming.."
rename_fasta_header("consensus.fasta", "tmp.txt")
os.system("mv tmp.txt consensus.fasta")
else:
pass
if "tblastn" == blast:
subprocess.check_call("makeblastdb -in consensus.fasta -dbtype nucl > /dev/null 2>&1", shell=True)
translate_consensus("consensus.fasta")
if filter_peps == "T":
filter_seqs("tmp.pep")
os.system("rm tmp.pep")
else:
os.system("mv tmp.pep consensus.pep")
clusters = get_cluster_ids("consensus.pep")
blast_against_self_tblastn("tblastn", "consensus.fasta", "consensus.pep", "tmp_blast.out", processors, filter)
elif "blastn" == blast:
subprocess.check_call("makeblastdb -in consensus.fasta -dbtype nucl > /dev/null 2>&1", shell=True)
blast_against_self_blastn("blastn", "consensus.fasta", "consensus.fasta", "tmp_blast.out", filter, processors)
clusters = get_cluster_ids("consensus.fasta")
elif "blat" == blast:
blat_against_self("consensus.fasta", "consensus.fasta", "tmp_blast.out", processors)
clusters = get_cluster_ids("consensus.fasta")
else:
pass
subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
ref_scores=parse_self_blast(open("self_blast.out", "U"))
subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
os.system("rm *new_genes.*")
if blast == "tblastn" or blast == "blastn":
logging.logPrint("starting BLAST")
else:
logging.logPrint("starting BLAT")
if "tblastn" == blast:
blast_against_each_genome_tblastn(dir_path, processors, "consensus.pep", filter)
elif "blastn" == blast:
blast_against_each_genome_blastn(dir_path, processors, filter, "consensus.fasta")
elif "blat" == blast:
blat_against_each_genome(dir_path, "consensus.fasta",processors)
else:
pass
else:
logging.logPrint("Using pre-compiled set of predicted genes")
files = glob.glob(os.path.join(dir_path, "*.fasta"))
if len(files)==0:
print "no usable reference genomes found!"
sys.exit()
else:
pass
gene_path=os.path.abspath("%s" % genes)
dup_ids = test_duplicate_header_ids(gene_path)
if dup_ids == "True":
pass
elif dup_ids == "False":
print "duplicate headers identified, exiting.."
sys.exit()
clusters = get_cluster_ids(gene_path)
os.system("cp %s %s" % (gene_path,fastadir))
os.chdir("%s" % fastadir)
if gene_path.endswith(".pep"):
logging.logPrint("using tblastn on peptides")
try:
subprocess.check_call("makeblastdb -in %s -dbtype prot > /dev/null 2>&1" % gene_path, shell=True)
except:
logging.logPrint("problem encountered with BLAST database")
sys.exit()
blast_against_self_tblastn("blastp", gene_path, gene_path, "tmp_blast.out", processors, filter)
subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
ref_scores=parse_self_blast(open("self_blast.out", "U"))
subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
logging.logPrint("starting BLAST")
blast_against_each_genome_tblastn(dir_path, processors, gene_path, filter)
elif gene_path.endswith(".fasta"):
if "tblastn" == blast:
logging.logPrint("using tblastn")
translate_genes(gene_path)
try:
subprocess.check_call("makeblastdb -in %s -dbtype nucl > /dev/null 2>&1" % gene_path, shell=True)
except:
logging.logPrint("problem encountered with BLAST database")
sys.exit()
blast_against_self_tblastn("tblastn", gene_path, "genes.pep", "tmp_blast.out", processors, filter)
subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
ref_scores=parse_self_blast(open("self_blast.out", "U"))
subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
logging.logPrint("starting BLAST")
blast_against_each_genome_tblastn(dir_path, processors, "genes.pep", filter)
os.system("cp genes.pep %s" % start_dir)
elif "blastn" == blast:
logging.logPrint("using blastn")
try:
subprocess.check_call("makeblastdb -in %s -dbtype nucl > /dev/null 2>&1" % gene_path, shell=True)
except:
logging.logPrint("Database not formatted correctly...exiting")
sys.exit()
try:
blast_against_self_blastn("blastn", gene_path, gene_path, "tmp_blast.out", filter, processors)
except:
print "problem with blastn, exiting"
sys.exit()
subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
os.system("cp self_blast.out tmp.out")
ref_scores=parse_self_blast(open("self_blast.out", "U"))
subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
logging.logPrint("starting BLAST")
try:
blast_against_each_genome_blastn(dir_path, processors, filter, gene_path)
except:
print "problem with blastn, exiting"
sys.exit()
elif "blat" == blast:
logging.logPrint("using blat")
blat_against_self(gene_path, gene_path, "tmp_blast.out", processors)
subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
ref_scores=parse_self_blast(open("self_blast.out", "U"))
subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
logging.logPrint("starting BLAT")
blat_against_each_genome(dir_path,gene_path,processors)
else:
pass
else:
print "input file format not supported"
sys.exit()
find_dups_dev(ref_scores, length, max_plog, min_hlog, clusters, processors)
if blast=="blat":
logging.logPrint("BLAT done")
else:
logging.logPrint("BLAST done")
parse_blast_report("false")
get_unique_lines()
curr_dir=os.getcwd()
table_files = glob.glob(os.path.join(curr_dir, "*.filtered.unique"))
files_and_temp_names = [(str(idx), os.path.join(curr_dir, f))
for idx, f in enumerate(table_files)]
names=[]
table_list = []
nr_sorted=sorted(clusters)
centroid_list = []
centroid_list.append(" ")
for x in nr_sorted:
centroid_list.append(x)
table_list.append(centroid_list)
logging.logPrint("starting matrix building")
new_names,new_table = new_loop(files_and_temp_names, processors, clusters, debug)
new_table_list = table_list+new_table
logging.logPrint("matrix built")
open("ref.list", "a").write("\n")
for x in nr_sorted:
open("ref.list", "a").write("%s\n" % x)
names_out = open("names.txt", "w")
names_redux = [val for subl in new_names for val in subl]
for x in names_redux: print >> names_out, "".join(x)
names_out.close()
create_bsr_matrix_dev(new_table_list)
divide_values("bsr_matrix", ref_scores)
subprocess.check_call("paste ref.list BSR_matrix_values.txt > %s/bsr_matrix_values.txt" % start_dir, shell=True)
if "T" in f_plog:
filter_paralogs("%s/bsr_matrix_values.txt" % start_dir, "paralog_ids.txt")
os.system("cp bsr_matrix_values_filtered.txt %s" % start_dir)
else:
pass
try:
subprocess.check_call("cp dup_matrix.txt names.txt consensus.pep consensus.fasta duplicate_ids.txt paralog_ids.txt %s" % ap, shell=True, stderr=open(os.devnull, 'w'))
except:
sys.exc_clear()
"""new code to rename files according to a prefix"""
import datetime
timestamp = datetime.datetime.now()
rename = str(timestamp.year), str(timestamp.month), str(timestamp.day), str(timestamp.hour), str(timestamp.minute), str(timestamp.second)
os.chdir("%s" % ap)
if "NULL" in prefix:
os.system("mv dup_matrix.txt %s_dup_matrix.txt" % "".join(rename))
os.system("mv names.txt %s_names.txt" % "".join(rename))
os.system("mv duplicate_ids.txt %s_duplicate_ids.txt" % "".join(rename))
os.system("mv paralog_ids.txt %s_paralog_ids.txt" % "".join(rename))
os.system("mv bsr_matrix_values.txt %s_bsr_matrix.txt" % "".join(rename))
if os.path.isfile("consensus.fasta"):
os.system("mv consensus.fasta %s_consensus.fasta" % "".join(rename))
if os.path.isfile("consensus.pep"):
os.system("mv consensus.pep %s_consensus.pep" % "".join(rename))
else:
os.system("mv dup_matrix.txt %s_dup_matrix.txt" % prefix)
os.system("mv names.txt %s_names.txt" % prefix)
os.system("mv duplicate_ids.txt %s_duplicate_ids.txt" % prefix)
os.system("mv paralog_ids.txt %s_paralog_ids.txt" % prefix)
os.system("mv bsr_matrix_values.txt %s_bsr_matrix.txt" % prefix)
if os.path.isfile("consensus.fasta"):
os.system("mv consensus.fasta %s_consensus.fasta" % prefix)
if os.path.isfile("consensus.pep"):
os.system("mv consensus.pep %s_consensus.pep" % prefix)
if "NULL" in prefix:
outfile = open("%s_run_parameters.txt" % "".join(rename), "w")
else:
outfile = open("%s_run_parameters.txt" % prefix, "w")
print >> outfile, "-d %s \\" % directory
print >> outfile, "-i %s \\" % id
print >> outfile, "-f %s \\" % filter
print >> outfile, "-p %s \\" % processors
print >> outfile, "-g %s \\" % genes
print >> outfile, "-c %s \\" % cluster_method
print >> outfile, "-b %s \\" % blast
print >> outfile, "-l %s \\" % length
print >> outfile, "-m %s \\" % max_plog
print >> outfile, "-n %s \\" % min_hlog
print >> outfile, "-t %s \\" % f_plog
print >> outfile, "-k %s \\" % keep
print >> outfile, "-s %s \\" % filter_peps
print >> outfile, "-e %s \\" % filter_scaffolds
print >> outfile, "-x %s \\" % prefix
print >> outfile, "-z %s" % debug
print >> outfile, "temp data stored here if kept: %s" % fastadir
outfile.close()
logging.logPrint("all Done")
if "T" == keep:
pass
else:
os.system("rm -rf %s" % fastadir)
os.chdir("%s" % ap)
0
Example 123
Project: the-new-hotness Source File: consumers.py
def _handle_anitya_update(self, upstream, package, msg):
url = msg['msg']['project']['homepage']
projectid = msg['msg']['project']['id']
# Is it something that we're being asked not to act on?
is_monitored = self.is_monitored(package)
# Is it new to us?
mdapi_url = '{0}/koji/srcpkg/{1}'.format(self.mdapi_url, package)
self.log.debug("Getting pkg info from %r" % mdapi_url)
r = requests.get(mdapi_url)
if r.status_code != 200:
# Unfortunately it's not in mdapi, we can't do much about it
self.log.warning("No koji version found for %r" % package)
if is_monitored:
self.publish("update.drop", msg=dict(
trigger=msg, reason="rawhide"))
return
js = r.json()
version = js["version"]
release = js["release"]
self.log.info("Comparing upstream %s against repo %s-%s" % (
upstream, version, release))
diff = hotness.helpers.cmp_upstream_repo(upstream, (version, release))
# If so, then poke bugzilla and start a scratch build
if diff == 1:
self.log.info("OK, %s is newer than %s-%s" % (
upstream, version, release))
if not is_monitored:
self.log.info("Pkgdb says not to monitor %r. Dropping." % package)
self.publish("update.drop", msg=dict(trigger=msg, reason="pkgdb"))
return
bz = self.bugzilla.handle(
projectid, package, upstream, version, release, url)
if not bz:
self.log.info("No RHBZ change detected (odd). Aborting.")
self.publish("update.drop", msg=dict(
trigger=msg, reason="bugzilla"))
return
self.publish("update.bug.file", msg=dict(
trigger=msg, bug=dict(bug_id=bz.bug_id)))
if is_monitored == 'nobuild':
self.log.info("Monitor flag set to 'nobuild'. "
"Skipping scratch build.")
return
cwd = os.getcwd()
result_rh = 0
rh_stuff = {}
tmp = tempfile.mkdtemp(prefix='thn-rh', dir='/var/tmp')
try:
result_rh, rh_stuff = self.buildsys.rebase_helper(package, upstream, tmp, bz)
self.log.info(rh_stuff)
if int(result_rh) == 0:
if 'build_logs' in rh_stuff and 'build_ref' in rh_stuff.get('build_logs'):
build_ref = rh_stuff['build_logs']['build_ref']
if build_ref:
for ver in ['old', 'new']:
dict_version = build_ref[ver]
task_id = dict_version['koji_task_id']
if 'old' == ver:
self.old_triggered_task_ids[task_id] = [bz, None, str(version), str(package)]
else:
self.new_triggered_task_ids[task_id] = [bz, None, str(upstream), str(package)]
else:
note = 'Patching or scratch build for %s-%s failed.\n' % (package, version)
self.bugzilla.follow_up(note, bz)
if 'logs' in rh_stuff['build_logs']:
for log in rh_stuff['build_logs']['logs']:
note = 'Build log %s.' % log
self.bugzilla.attach_patch(log, note, bz)
# Attach rebase-helper logs for another analysis
if 'logs' in rh_stuff:
for log in rh_stuff['logs']:
rebase_helper_url = 'https://github.com/phracek/rebase-helper/issues'
note_logs = 'Rebase-helper %s log file.\n' \
'See for details and report the eventual error to rebase-helper %s.' % \
(os.path.basename(log), rebase_helper_url)
self.bugzilla.attach_patch(log, note_logs, bz)
if 'patches' in rh_stuff:
for patch in rh_stuff['patches']:
self.bugzilla.follow_up(patch, bz)
os.chdir(cwd)
if os.path.exists(tmp):
shutil.rmtree(tmp)
except Exception as ex:
self.log.info('Customer.py: Rebase helper failed with an unknown reason. %s' % str(ex))
self.log.info(rh_stuff)
self.bugzilla.follow_up('Rebase helper failed.\n'
'See logs and attachments in this bugzilla %s' % ex.message, bz)
if 'patches' in rh_stuff:
for patch in rh_stuff['patches']:
self.bugzilla.follow_up(patch, bz)
if 'logs' in rh_stuff:
for log in rh_stuff['logs']:
rh_logs = "Log %s provided by rebase-helper." % log
self.bugzilla.attach_patch(log, rh_logs, bz)
os.chdir(cwd)
if os.path.exists(tmp):
shutil.rmtree(tmp)
self.log.info("Now with #%i, time to do koji stuff" % bz.bug_id)
try:
# Kick off a scratch build..
task_id, patch_filename, description = self.buildsys.handle(
package, upstream, version, bz)
# Map that koji task_id to the bz ticket we want to pursue.
self.new_triggered_task_ids[task_id] = [bz, None, str(upstream), str(package)]
# Attach the patch to the ticket
self.bugzilla.attach_patch(patch_filename, description, bz)
except Exception as e:
heading = "Failed to kick off scratch build."
note = heading + "\n\n" + str(e)
self.log.warning(heading)
self.log.warning(traceback.format_exc())
self.bugzilla.follow_up(note, bz)
0
Example 124
Project: apogee Source File: moog.py
def windows(*args,**kwargs):
"""
NAME:
windows
PURPOSE:
Generate model APOGEE spectra using MOOG in selected wavelength windows (but the whole APOGEE spectral range is returned): this is a general routine that generates the non-continuum-normalized spectrum, convolves with the LSF and macrotubulence, and optionally continuum normalizes the output; use 'moogsynth' for a direct interface to MOOG
INPUT ARGUMENTS:
Windows specification: Provide one of
(1) Element string: the APOGEE windows for this element will be loaded
(2) startindxs, endindxs= start and end indexes of the windows on the apStar wavelength grid
(3) startlams, endlams= start and end wavelengths in \AA
lists with abundance differences wrt the atmosphere (they don't all have to have the same length, missing ones are filled in with zeros):
[Atomic number1,diff1_1,diff1_2,diff1_3,...,diff1_N]
[Atomic number2,diff2_1,diff2_2,diff2_3,...,diff2_N]
...
[Atomic numberM,diffM_1,diffM_2,diffM_3,...,diffM_N]
INPUT KEYWORDS:
BASELINE: you can specify the baseline spectrum to not always re-compute it
baseline= baseline c-normalized spectrum on MOOG wavelength grid (obtained from moogsynth)
mwav= MOOG wavelength grid (obtained from moogsynth)
cflux= continuum flux from MOOG
Typically, you can obtain these three keywords by doing (kwargs are the keywords you provide to this function as well)
>>> baseline= moogsynth(**kwargs)[1]
>>> mwav, cflux= moogsynth(doflux=True,**kwargs)
LSF:
lsf= ('all') LSF to convolve with; output of apogee.spec.lsf.eval; sparsify for efficiency; if 'all' or 'combo' a pre-computed version will be downloaded from the web
Either:
xlsf= (None) pixel offset grid on which the LSF is computed (see apogee.spec.lsf.eval); unnecessary if lsf=='all' or 'combo'
dxlsf= (None) spacing of pixel offsets
vmacro= (6.) macroturbulence to apply
CONTINUUM:
cont= ('aspcap') continuum-normalization to apply:
None: no continuum normalization
'true': Use the true continuum
'aspcap': Use the continuum normalization method of ASPCAP DR12
'cannon': Normalize using continuum pixels derived from the Cannon
SYNTHESIS:
linelist= (None) linelist to use; if this is None, the code looks for a weed-out version of the linelist appropriate for the given model atmosphere
run_weedout= (False) if True, run MOOG weedout on the linelist first
wmin, wmax, dw, width= (15000.000, 17000.000, 0.10000000, 7.0000000) spectral synthesis limits *for the whole spectrum* (not just the windows), step, and width of calculation (see MOOG)
MODEL ATMOSPHERE PARAMETERS:
Specify one of the following:
(a) modelatm= (None) can be set to the filename of a model atmosphere or to a model-atmosphere instance (if filename, needs to end in .mod)
(b) parameters of a KURUCZ model atmosphere:
(1) teff= (4500) Teff
logg= (2.5) logg
metals= (0.) metallicity
cm= (0.) carbon-enhancement
am= (0.) alpha-enhancement
(2) fparam= standard ASPCAP output format (
lib= ('kurucz_filled') model atmosphere library
dr= (None) use model atmospheres from this data release
vmicro= (2.) microturbulence (only used if the MOOG-formatted atmosphere is not found) (can also be part of fparam)
MISCELLANEOUS:
dr= return the path corresponding to this data release
OUTPUT:
spectra (nspec,nwave)
HISTORY:
2015-03-18 - Written - Bovy (IAS)
"""
# Pop some kwargs
run_weedout= kwargs.pop('run_weedout',False)
baseline= kwargs.pop('baseline',None)
mwav= kwargs.pop('mwav',None)
cflux= kwargs.pop('cflux',None)
# Check that we have the LSF and store the relevant keywords
lsf= kwargs.pop('lsf','all')
if isinstance(lsf,str):
xlsf, lsf= aplsf._load_precomp(dr=kwargs.get('dr',None),fiber=lsf)
dxlsf= None
else:
xlsf= kwargs.pop('xlsf',None)
dxlsf= kwargs.pop('dxlsf',None)
if xlsf is None and dxlsf is None: raise ValueError('xlsf= or dxlsf= input needs to be given if the LSF is given as an array')
vmacro= kwargs.pop('vmacro',6.)
# Parse continuum-normalization keywords
cont= kwargs.pop('cont','aspcap')
# Parse the wavelength regions
apWave= apStarWavegrid()
if isinstance(args[0],str): #element string given
si,ei= apwindow.waveregions(args[0],pad=3,asIndex=True)
args= args[1:]
else:
if isinstance(args[0][0],int): # assume index
si,ei= args[0], args[1]
else: # assume wavelengths in \AA
sl,el= args[0], args[1]
# Convert to index
si, ei= [], []
for s,e in zip(sl,el):
# Find closest index into apWave
si.append(numpy.argmin(numpy.fabs(s-apWave)))
ei.append(numpy.argmin(numpy.fabs(e-apWave)))
args= args[2:]
# Setup the model atmosphere
modelatm= kwargs.pop('modelatm',None)
tmpModelAtmDir= False
# Parse fparam, if present
fparam= kwargs.pop('fparam',None)
if not fparam is None:
kwargs['teff']= fparam[0,paramIndx('TEFF')]
kwargs['logg']= fparam[0,paramIndx('LOGG')]
kwargs['metals']= fparam[0,paramIndx('METALS')]
kwargs['am']= fparam[0,paramIndx('ALPHA')]
kwargs['cm']= fparam[0,paramIndx('C')]
kwargs['vm']= 10.**fparam[0,paramIndx('LOG10VDOP')]
if modelatm is None: # Setup a model atmosphere
modelatm= atlas9.Atlas9Atmosphere(teff=kwargs.get('teff',4500.),
logg=kwargs.get('logg',2.5),
metals=kwargs.get('metals',0.),
am=kwargs.get('am',0.),
cm=kwargs.get('cm',0.),
dr=kwargs.get('dr',None))
if isinstance(modelatm,str) and os.path.exists(modelatm):
modelfilename= modelatm
elif isinstance(modelatm,str):
raise ValueError('modelatm= input is a non-existing filename')
else: # model atmosphere instance
# Need to write this instance to a file; we will run in a temp
# subdirectory of the current directory
tmpDir= tempfile.mkdtemp(dir=os.getcwd())
tmpModelAtmDir= True # need to remove this later
modelfilename= os.path.join(tmpDir,'modelatm.mod')
modelatm.writeto(modelfilename)
kwargs['modelatm']= modelfilename
try:
# Check whether a MOOG version of the model atmosphere exists
if not os.path.exists(modelfilename.replace('.mod','.org')):
# Convert to MOOG format
convert_modelAtmosphere(**kwargs)
# Run weedout on the linelist first if requested
if run_weedout:
linelistfilename= modelfilename.replace('.mod','.lines')
if not os.path.exists(linelistfilename):
weedout(**kwargs)
kwargs['linelist']= linelistfilename
# Run MOOG synth for the whole wavelength range as a baseline, also contin
if baseline is None:
baseline= moogsynth(**kwargs)[1]
elif isinstance(baseline,tuple): #probably accidentally gave wav as well
baseline= baseline[1]
if mwav is None or cflux is None:
mwav, cflux= moogsynth(doflux=True,**kwargs)
# Convert the apStarWavegrid windows to moogWavegrid regions
sm,em= [], []
for start,end in zip(si,ei):
sm.append(numpy.argmin(numpy.fabs(apWave[start]-mwav)))
em.append(numpy.argmin(numpy.fabs(apWave[end]-mwav)))
# Run MOOG synth for all abundances and all windows
if len(args) == 0: #special case that there are *no* differences
args= ([26,0.],)
nsynths= numpy.array([len(args[ii])-1 for ii in range(len(args))])
nsynth= numpy.amax(nsynths) #Take the longest abundance list
out= numpy.tile(baseline,(nsynth,1))
# Run all windows
for start, end in zip(sm,em):
kwargs['wmin']= mwav[start]
kwargs['wmax']= mwav[end]
# Check whether the number of syntheses is > 5 and run multiple
# MOOG instances if necessary, bc MOOG only does 5 at a time
ninstances= int(numpy.ceil(nsynth/5.))
for ii in range(ninstances):
newargs= ()
for jj in range(len(args)):
tab= [args[jj][0]]
if len(args[jj][5*ii+1:5*(ii+1)+1]) > 0:
tab.extend(args[jj][5*ii+1:5*(ii+1)+1])
newargs= newargs+(tab,)
out[5*ii:5*(ii+1),start:end+1]= moogsynth(*newargs,**kwargs)[1]
except: raise
finally:
if tmpModelAtmDir: # need to remove this temporary directory
os.remove(modelfilename)
moogmodelfilename= modelfilename.replace('.mod','.org')
if os.path.exists(moogmodelfilename):
os.remove(moogmodelfilename)
if run_weedout:
os.remove(modelfilename.replace('.mod','.lines'))
os.rmdir(tmpDir)
# Now multiply each continuum-normalized spectrum with the continuum
out*= numpy.tile(cflux,(nsynth,1))
# Now convolve with the LSF
out= aplsf.convolve(mwav,out,
lsf=lsf,xlsf=xlsf,dxlsf=dxlsf,vmacro=vmacro)
# Now continuum-normalize
if cont.lower() == 'true':
# Get the true continuum on the apStar wavelength grid
apWave= apStarWavegrid()
baseline= numpy.polynomial.Polynomial.fit(mwav,cflux,4)
ip= interpolate.InterpolatedUnivariateSpline(mwav,
cflux/baseline(mwav),
k=3)
cflux= baseline(apWave)*ip(apWave)
# Divide it out
out/= numpy.tile(cflux,(nsynth,1))
elif not cont is None:
cflux= apcont.fit(out,numpy.ones_like(out),type=cont)
out[cflux > 0.]/= cflux[cflux > 0.]
out[cflux <= 0.]= numpy.nan
return out
0
Example 125
Project: gnowsys-studio Source File: generate_all_rdf.py
def rdf_description(name,notation='xml' ):
"""
Funtion takes title of node, and rdf notation.
"""
valid_formats = ["xml", "n3", "ntriples", "trix"]
default_graph_uri = "http://gstudio.gnowledge.org/rdfstore"
# default_graph_uri = "http://example.com/"
configString = "/var/tmp/rdfstore"
# Get the IOMemory plugin.
store = plugin.get('IOMemory', Store)('rdfstore')
# Open previously created store, or create it if it doesn't exist yet
graph = Graph(store="IOMemory",
identifier = URIRef(default_graph_uri))
path = mkdtemp()
rt = graph.open(path, create=False)
if rt == NO_STORE:
graph.open(path, create=True)
else:
assert rt == VALID_STORE, "The underlying store is corrupt"
# Now we'll add some triples to the graph & commit the changes
#rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
graph.bind("gstudio", "http://gnowledge.org/")
exclusion_fields = ["id", "rght", "node_ptr_id", "image", "lft", "_state", "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields"]
#verifies the type of node
node=NID.objects.get(title=name)
node_type=node.reftype
if (node_type=='Gbobject' ):
node=Gbobject.objects.get(title=name)
rdflib=link(node)
elif (node_type=='None'):
node=Gbobject.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Processes'):
node=Gbobject.objects.get(title=name)
rdflib=link(node)
elif (node_type=='System'):
node=Gbobject.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Objecttype'):
node=Objecttype.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Attributetype'):
node=Attributetype.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Complement'):
node=Complement.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Union'):
node=Union.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Intersection'):
node=Intersection.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Expression'):
node=Expression.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Processtype'):
node=Processtype.objects.get(title=name)
rdflib=link(node)
elif (node_type=='Systemtype'):
node=Systemtype.objects.get(title=name)
rdflib=link(node)
elif (node_type=='AttributeSpecification'):
node=AttributeSpecification.objects.get(title=name)
rdflib=link(node)
elif (node_type=='RelationSpecification'):
node=RelationSpecification.objects.get(title=name)
rdflib=link(node)
elif(node_type=='Attribute'):
node=Attribute.objects.get(title=name)
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
elif(node_type=='Relationtype' ):
node=Relationtype.objects.get(title=name)
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
elif(node_type=='Metatype'):
node=Metatype.objects.get(title=name)
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
else:
rdflib = Namespace('http://sbox.gnowledge.org/gstudio/')
node_dict=node.__dict__
subject=str(node_dict['id'])
for key in node_dict:
if key not in exclusion_fields:
predicate=str(key)
pobject=str(node_dict[predicate])
graph.add((rdflib[subject], rdflib[predicate], Literal(pobject)))
rdf_code=graph.serialize(format=notation)
graph.commit()
print rdf_code
graph.close()
0
Example 126
def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
pkgs = []
res = {}
res['results'] = []
res['msg'] = ''
res['rc'] = 0
res['changed'] = False
tempdir = tempfile.mkdtemp()
for spec in items:
pkg = None
# check if pkgspec is installed (if possible for idempotence)
# localpkg
if spec.endswith('.rpm') and '://' not in spec:
# get the pkg name-v-r.arch
if not os.path.exists(spec):
res['msg'] += "No RPM file matching '%s' found on system" % spec
res['results'].append("No RPM file matching '%s' found on system" % spec)
res['rc'] = 127 # Ensure the task fails in with-loop
module.fail_json(**res)
nvra = local_nvra(module, spec)
# look for them in the rpmdb
if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
# if they are there, skip it
continue
pkg = spec
# URL
elif '://' in spec:
# download package so that we can check if it's already installed
package = fetch_rpm_from_url(spec, module=module)
nvra = local_nvra(module, package)
if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos):
# if it's there, skip it
continue
pkg = package
#groups :(
elif spec.startswith('@'):
# complete wild ass guess b/c it's a group
pkg = spec
# range requires or file-requires or pkgname :(
else:
# most common case is the pkg is already installed and done
# short circuit all the bs - and search for it as a pkg in is_installed
# if you find it then we're done
if not set(['*','?']).intersection(set(spec)):
installed_pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True)
if installed_pkgs:
res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
continue
# look up what pkgs provide this
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos)
if not pkglist:
res['msg'] += "No package matching '%s' found available, installed or updated" % spec
res['results'].append("No package matching '%s' found available, installed or updated" % spec)
res['rc'] = 126 # Ensure the task fails in with-loop
module.fail_json(**res)
# if any of the packages are involved in a transaction, fail now
# so that we don't hang on the yum operation later
conflicts = transaction_exists(pkglist)
if len(conflicts) > 0:
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
res['rc'] = 125 # Ensure the task fails in with-loop
module.fail_json(**res)
# if any of them are installed
# then nothing to do
found = False
for this in pkglist:
if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True):
found = True
res['results'].append('%s providing %s is already installed' % (this, spec))
break
# if the version of the pkg you have installed is not in ANY repo, but there are
# other versions in the repos (both higher and lower) then the previous checks won't work.
# so we check one more time. This really only works for pkgname - not for file provides or virt provides
# but virt provides should be all caught in what_provides on its own.
# highly irritating
if not found:
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue
# if not - then pass in the spec as what to install
# we could get here if nothing provides it but that's not
# the error we're catching here
pkg = spec
pkgs.append(pkg)
if pkgs:
cmd = yum_basecmd + ['install'] + pkgs
if module.check_mode:
# Remove rpms downloaded for EL5 via url
try:
shutil.rmtree(tempdir)
except Exception:
e = get_exception()
module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e))
module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
changed = True
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(cmd, environ_update=lang_env)
if (rc == 1):
for spec in items:
# Fail on invalid urls:
if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
module.fail_json(msg='Package at %s could not be installed' % spec, rc=1, changed=False)
if (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
# avoid failing in the 'Nothing To Do' case
# this may happen with an URL spec.
# for an already installed group,
# we get rc = 0 and 'Nothing to do' in out, not in err.
rc = 0
err = ''
out = '%s: Nothing to do' % spec
changed = False
res['rc'] = rc
res['results'].append(out)
res['msg'] += err
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
# look for each pkg in rpmdb
# look for each pkg via obsoletes
# Record change
res['changed'] = changed
# Remove rpms downloaded for EL5 via url
try:
shutil.rmtree(tempdir)
except Exception:
e = get_exception()
module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e))
return res
0
Example 127
Project: apogee Source File: turbospec.py
def turbosynth(*args,**kwargs):
"""
NAME:
turbosynth
PURPOSE:
Run a Turbospectrum synthesis (direct interface to the Turbospectrum code; use 'synth' for a general routine that generates the non-continuum-normalized spectrum, convolves withe LSF and macrotubulence, and optionally continuum normalizes the output)
INPUT ARGUMENTS:
lists with abundances:
[Atomic number1,diff1]
[Atomic number2,diff2]
...
[Atomic numberM,diffM]
SYNTHEIS KEYWORDS:
isotopes= ('solar') use 'solar' or 'arcturus' isotope ratios; can also be a dictionary with isotope ratios (e.g., isotopes= {'6.012':'0.9375','6.013':'0.0625'})
wmin, wmax, dw, width= (15000.000, 17000.000, 0.10000000) spectral synthesis limits and step of calculation (see MOOG)
babsma_wmin, babsma_wmax= (wmin,wmax)) allows opacity limits to be different (broader) than for the synthesis itself
costheta= (1.) cosine of the viewing angle
LINELIST KEYWORDS:
air= (True) if True, perform the synthesis in air wavelengths (affects the default Hlinelist, nothing else; output is in air if air, vacuum otherwise); set to False at your own risk, as Turbospectrum expects the linelist in air wavelengths!)
Hlinelist= (None) Hydrogen linelists to use; can be set to the path of a linelist file or to the name of an APOGEE linelist; if None, then we first search for the Hlinedata.vac in the APOGEE linelist directory (if air=False) or we use the internal Turbospectrum Hlinelist (if air=True)
linelist= (None) molecular and atomic linelists to use; can be set to the path of a linelist file or to the name of an APOGEE linelist, or lists of such files; if a single filename is given, the code will first search for files with extensions '.atoms', '.molec' or that start with 'turboatoms.' and 'turbomolec.'
ATMOSPHERE KEYWORDS:
modelatm= (None) model-atmosphere instance
vmicro= (2.) microturbulence (km/s)
modelopac= (None)
(a) if set to an existing filename: assume babsma_lu has already been run and use this continuous opacity in bsyn_lu
(b) if set to a non-existing filename: store the continuous opacity in this file
MISCELLANEOUS KEYWORDS:
dr= data release
saveTurboInput= if set to a string, the input to and output from Turbospectrum will be saved as a tar.gz file with this name; can be a filename in the current directory or a full path
OUTPUT:
(wavelengths,cont-norm. spectrum, spectrum (nwave))
HISTORY:
2015-04-13 - Written - Bovy (IAS)
"""
# Get the spectral synthesis limits
wmin= kwargs.pop('wmin',_WMIN_DEFAULT)
wmax= kwargs.pop('wmax',_WMAX_DEFAULT)
dw= kwargs.pop('dw',_DW_DEFAULT)
babsma_wmin= kwargs.pop('babsma_wmin',wmin)
babsma_wmax= kwargs.pop('babsma_wmax',wmax)
if babsma_wmin > wmin or babsma_wmax < wmax:
raise ValueError("Opacity wavelength range must encompass the synthesis range")
if int(numpy.ceil((wmax-wmin)/dw > 150000)):
raise ValueError('Too many wavelengths for Turbospectrum synthesis, reduce the wavelength step dw (to, e.g., 0.016)')
costheta= kwargs.pop('costheta',1.)
# Linelists
Hlinelist= kwargs.pop('Hlinelist',None)
linelist= kwargs.pop('linelist',None)
# Parse isotopes
isotopes= kwargs.pop('isotopes','solar')
if isinstance(isotopes,str) and isotopes.lower() == 'solar':
isotopes= {}
elif isinstance(isotopes,str) and isotopes.lower() == 'arcturus':
isotopes= {'6.012':'0.9375',
'6.013':'0.0625'}
elif not isinstance(isotopes,dict):
raise ValueError("'isotopes=' input not understood, should be 'solar', 'arcturus', or a dictionary")
# We will run in a subdirectory of the current directory
tmpDir= tempfile.mkdtemp(dir=os.getcwd())
# Get the model atmosphere
modelatm= kwargs.pop('modelatm',None)
if not modelatm is None:
if isinstance(modelatm,str) and os.path.exists(modelatm):
raise ValueError('modelatm= input is an existing filename, but you need to give an Atmosphere object instead')
elif isinstance(modelatm,str):
raise ValueError('modelatm= input needs to be an Atmosphere instance')
else:
# Check temperature
if modelatm._teff > 7000.:
warnings.warn('Turbospectrum does not include all necessary physics to model stars hotter than about 7000 K; proceed with caution',RuntimeWarning)
# Write atmosphere to file
modelfilename= os.path.join(tmpDir,'atm.mod')
modelatm.writeto(modelfilename,turbo=True)
modeldirname= os.path.dirname(modelfilename)
modelbasename= os.path.basename(modelfilename)
# Get the name of the linelists
if Hlinelist is None:
if kwargs.get('air',True):
Hlinelist= 'DATA/Hlinedata' # will be symlinked
else:
Hlinelist= appath.linelistPath('Hlinedata.vac',
dr=kwargs.get('dr',None))
if not os.path.exists(Hlinelist) and not Hlinelist == 'DATA/Hlinedata':
Hlinelist= appath.linelistPath(Hlinelist,
dr=kwargs.get('dr',None))
if not os.path.exists(Hlinelist) and not kwargs.get('air',True):
print("Hlinelist in vacuum linelist not found, using Turbospectrum's, which is in air...")
Hlinelist= 'DATA/Hlinedata' # will be symlinked
linelistfilenames= [Hlinelist]
if isinstance(linelist,str):
if os.path.exists(linelist):
linelistfilenames.append(linelist)
else:
# Try finding the linelist
atomlinelistfilename= appath.linelistPath(\
'%s.atoms' % linelist,
dr=kwargs.get('dr',None))
moleclinelistfilename= appath.linelistPath(\
'%s.molec' % linelist,
dr=kwargs.get('dr',None))
if os.path.exists(atomlinelistfilename) \
and os.path.exists(moleclinelistfilename):
linelistfilenames.append(atomlinelistfilename)
linelistfilenames.append(moleclinelistfilename)
else:
atomlinelistfilename= appath.linelistPath(\
'turboatoms.%s' % linelist,
dr=kwargs.get('dr',None))
moleclinelistfilename= appath.linelistPath(\
'turbomolec.%s' % linelist,
dr=kwargs.get('dr',None))
if not os.path.exists(atomlinelistfilename) \
and '201404080919' in atomlinelistfilename \
and kwargs.get('air',True):
download.linelist(os.path.basename(atomlinelistfilename),
dr=kwargs.get('dr',None))
if not os.path.exists(moleclinelistfilename) \
and '201404080919' in moleclinelistfilename \
and kwargs.get('air',True):
download.linelist(os.path.basename(moleclinelistfilename),
dr=kwargs.get('dr',None))
if os.path.exists(atomlinelistfilename) \
and os.path.exists(moleclinelistfilename):
linelistfilenames.append(atomlinelistfilename)
linelistfilenames.append(moleclinelistfilename)
if linelist is None or len(linelistfilenames) == 1:
os.remove(modelfilename)
os.rmdir(tmpDir)
raise ValueError('linelist= must be set (see docuementation) and given linelist must exist (either as absolute path or in the linelist directory)')
# Link the Turbospectrum DATA directory
os.symlink(os.getenv('TURBODATA'),os.path.join(tmpDir,'DATA'))
# Cut the linelist to the desired wavelength range, if necessary,
# Skipped because it is unnecessary, but left in case we still want to
# use it
rmLinelists= False
for ll, linelistfilename in enumerate(linelistfilenames[1:]):
if not _CUTLINELIST: continue #SKIP
if wmin == _WMIN_DEFAULT and wmax == _WMAX_DEFAULT: continue
rmLinelists= True
with open(os.path.join(tmpDir,'cutlines.awk'),'w') as awkfile:
awkfile.write('($1>%.3f && $1<%.3f) || ( substr($1,1,1) == "'
%(wmin-7.,wmax+7.) +"'"+'")\n')
keeplines= open(os.path.join(tmpDir,'lines.tmp'),'w')
stderr= open('/dev/null','w')
try:
subprocess.check_call(['awk','-f','cutlines.awk',
linelistfilename],
cwd=tmpDir,stdout=keeplines,stderr=stderr)
keeplines.close()
except subprocess.CalledProcessError:
os.remove(os.path.join(tmpDir,'lines.tmp'))
os.remove(os.path.join(tmpDir,'DATA'))
raise RuntimeError("Removing unnecessary linelist entries failed ...")
finally:
os.remove(os.path.join(tmpDir,'cutlines.awk'))
stderr.close()
# Remove elements that aren't used altogether, adjust nlines
with open(os.path.join(tmpDir,'lines.tmp'),'r') as infile:
lines= infile.readlines()
nl_list= [l[0] == "'" for l in lines]
nl= numpy.array(nl_list,dtype='int')
nl_list.append(True)
nl_list.append(True)
nlines= [numpy.sum(1-nl[ii:nl_list[ii+2:].index(True)+ii+2])
for ii in range(len(nl))]
with open(os.path.join(tmpDir,os.path.basename(linelistfilename)),
'w') \
as outfile:
for ii, line in enumerate(lines):
if ii < len(lines)-2:
if not lines[ii][0] == "'":
outfile.write(lines[ii])
elif not (lines[ii+2][0] == "'" and lines[ii+1][0] == "'"):
if lines[ii+1][0] == "'":
# Adjust nlines
outfile.write(lines[ii].replace(lines[ii].split()[-1]+'\n',
'%i\n' % nlines[ii]))
else:
outfile.write(lines[ii])
else:
if not lines[ii][0] == "'": outfile.write(lines[ii])
os.remove(os.path.join(tmpDir,'lines.tmp'))
# cp the linelists to the temporary directory
shutil.copy(linelistfilename,tmpDir)
linelistfilenames[ll]= os.path.basename(linelistfilename)
# Parse the abundances
if len(args) == 0: #special case that there are *no* differences
args= ([26,0.],)
indiv_abu= {}
for arg in args:
indiv_abu[arg[0]]= arg[1]+solarabundances._ASPLUND05[arg[0]]\
+modelatm._metals
if arg[0] == 6: indiv_abu[arg[0]]+= modelatm._cm
if arg[0] in [8,10,12,14,16,18,20,22]: indiv_abu[arg[0]]+= modelatm._am
modelopac= kwargs.get('modelopac',None)
if modelopac is None or \
(isinstance(modelopac,str) and not os.path.exists(modelopac)):
# Now write the script file for babsma_lu
scriptfilename= os.path.join(tmpDir,'babsma.par')
modelopacname= os.path.join(tmpDir,'mopac')
_write_script(scriptfilename,
babsma_wmin,babsma_wmax,dw,
None,
modelfilename,
None,
modelopacname,
modelatm._metals,
modelatm._am,
indiv_abu,
kwargs.get('vmicro',2.),
None,None,None,bsyn=False)
# Run babsma
sys.stdout.write('\r'+"Running Turbospectrum babsma_lu ...\r")
sys.stdout.flush()
if kwargs.get('verbose',False):
stdout= None
stderr= None
else:
stdout= open('/dev/null', 'w')
stderr= subprocess.STDOUT
try:
p= subprocess.Popen(['babsma_lu'],
cwd=tmpDir,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
with open(os.path.join(tmpDir,'babsma.par'),'r') as parfile:
for line in parfile:
p.stdin.write(line.encode('utf-8'))
stdout, stderr= p.communicate()
except subprocess.CalledProcessError:
for linelistfilename in linelistfilenames:
os.remove(linelistfilename,tmpDir)
if os.path.exists(os.path.join(tmpDir,'DATA')):
os.remove(os.path.join(tmpDir,'DATA'))
raise RuntimeError("Running babsma_lu failed ...")
finally:
if os.path.exists(os.path.join(tmpDir,'babsma.par')) \
and not 'saveTurboInput' in kwargs:
os.remove(os.path.join(tmpDir,'babsma.par'))
sys.stdout.write('\r'+download._ERASESTR+'\r')
sys.stdout.flush()
if isinstance(modelopac,str):
shutil.copy(modelopacname,modelopac)
else:
shutil.copy(modelopac,tmpDir)
modelopacname= os.path.join(tmpDir,os.path.basename(modelopac))
# Now write the script file for bsyn_lu
scriptfilename= os.path.join(tmpDir,'bsyn.par')
outfilename= os.path.join(tmpDir,'bsyn.out')
_write_script(scriptfilename,
wmin,wmax,dw,
costheta,
modelfilename,
None,
modelopacname,
modelatm._metals,
modelatm._am,
indiv_abu,
None,
outfilename,
isotopes,
linelistfilenames,
bsyn=True)
# Run bsyn
sys.stdout.write('\r'+"Running Turbospectrum bsyn_lu ...\r")
sys.stdout.flush()
if kwargs.get('verbose',False):
stdout= None
stderr= None
else:
stdout= open('/dev/null', 'w')
stderr= subprocess.STDOUT
try:
p= subprocess.Popen(['bsyn_lu'],
cwd=tmpDir,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
with open(os.path.join(tmpDir,'bsyn.par'),'r') as parfile:
for line in parfile:
p.stdin.write(line.encode('utf-8'))
stdout, stderr= p.communicate()
except subprocess.CalledProcessError:
raise RuntimeError("Running bsyn_lu failed ...")
finally:
if 'saveTurboInput' in kwargs:
turbosavefilename= kwargs['saveTurboInput']
if os.path.dirname(turbosavefilename) == '':
turbosavefilename= os.path.join(os.getcwd(),turbosavefilename)
try:
subprocess.check_call(['tar','cvzf',turbosavefilename,
os.path.basename(os.path.normpath(tmpDir))])
except subprocess.CalledProcessError:
raise RuntimeError("Tar-zipping the Turbospectrum input and output failed; you will have to manually delete the temporary directory ...")
# Need to remove babsma.par, bc not removed above
if os.path.exists(os.path.join(tmpDir,'babsma.par')):
os.remove(os.path.join(tmpDir,'babsma.par'))
if os.path.exists(os.path.join(tmpDir,'bsyn.par')):
os.remove(os.path.join(tmpDir,'bsyn.par'))
if os.path.exists(modelopacname):
os.remove(modelopacname)
if os.path.exists(modelopacname+'.mod'):
os.remove(modelopacname+'.mod')
if os.path.exists(os.path.join(tmpDir,'DATA')):
os.remove(os.path.join(tmpDir,'DATA'))
if os.path.exists(os.path.join(tmpDir,'dummy-output.dat')):
os.remove(os.path.join(tmpDir,'dummy-output.dat'))
if os.path.exists(modelfilename):
os.remove(modelfilename)
if rmLinelists:
for linelistfilename in linelistfilenames[1:]:
os.remove(linelistfilename)
sys.stdout.write('\r'+download._ERASESTR+'\r')
sys.stdout.flush()
# Now read the output
turboOut= numpy.loadtxt(outfilename)
# Clean up
os.remove(outfilename)
os.rmdir(tmpDir)
# Return wav, cont-norm, full spectrum
return (turboOut[:,0],turboOut[:,1],turboOut[:,2])
0
Example 128
Project: DataflowPythonSDK Source File: dependency.py
def stage_job_resources(
options, file_copy=_dependency_file_copy, build_setup_args=None,
temp_dir=None, populate_requirements_cache=_populate_requirements_cache):
"""Creates (if needed) and stages job resources to options.staging_location.
Args:
options: Command line options. More specifically the function will expect
staging_location, requirements_file, setup_file, and save_main_session
options to be present.
file_copy: Callable for copying files. The default version will copy from
a local file to a GCS location using the gsutil tool available in the
Google Cloud SDK package.
build_setup_args: A list of command line arguments used to build a setup
package. Used only if options.setup_file is not None. Used only for
testing.
temp_dir: Temporary folder where the resource building can happen. If None
then a unique temp directory will be created. Used only for testing.
populate_requirements_cache: Callable for populating the requirements cache.
Used only for testing.
Returns:
A list of file names (no paths) for the resources staged. All the files
are assumed to be staged in options.staging_location.
Raises:
RuntimeError: If files specified are not found or error encountered while
trying to create the resources (e.g., build a setup package).
"""
temp_dir = temp_dir or tempfile.mkdtemp()
resources = []
google_cloud_options = options.view_as(GoogleCloudOptions)
setup_options = options.view_as(SetupOptions)
# Make sure that all required options are specified. There are a few that have
# defaults to support local running scenarios.
if google_cloud_options.staging_location is None:
raise RuntimeError(
'The --staging_location option must be specified.')
if google_cloud_options.temp_location is None:
raise RuntimeError(
'The --temp_location option must be specified.')
# Stage a requirements file if present.
if setup_options.requirements_file is not None:
if not os.path.isfile(setup_options.requirements_file):
raise RuntimeError('The file %s cannot be found. It was specified in the '
'--requirements_file command line option.' %
setup_options.requirements_file)
staged_path = utils.path.join(google_cloud_options.staging_location,
REQUIREMENTS_FILE)
file_copy(setup_options.requirements_file, staged_path)
resources.append(REQUIREMENTS_FILE)
requirements_cache_path = (
os.path.join(tempfile.gettempdir(), 'dataflow-requirements-cache')
if setup_options.requirements_cache is None
else setup_options.requirements_cache)
# Populate cache with packages from requirements and stage the files
# in the cache.
if not os.path.exists(requirements_cache_path):
os.makedirs(requirements_cache_path)
populate_requirements_cache(
setup_options.requirements_file, requirements_cache_path)
for pkg in glob.glob(os.path.join(requirements_cache_path, '*')):
file_copy(pkg, utils.path.join(google_cloud_options.staging_location,
os.path.basename(pkg)))
resources.append(os.path.basename(pkg))
# Handle a setup file if present.
# We will build the setup package locally and then copy it to the staging
# location because the staging location is a GCS path and the file cannot be
# created directly there.
if setup_options.setup_file is not None:
if not os.path.isfile(setup_options.setup_file):
raise RuntimeError('The file %s cannot be found. It was specified in the '
'--setup_file command line option.' %
setup_options.setup_file)
if os.path.basename(setup_options.setup_file) != 'setup.py':
raise RuntimeError(
'The --setup_file option expects the full path to a file named '
'setup.py instead of %s' % setup_options.setup_file)
tarball_file = _build_setup_package(setup_options.setup_file, temp_dir,
build_setup_args)
staged_path = utils.path.join(google_cloud_options.staging_location,
WORKFLOW_TARBALL_FILE)
file_copy(tarball_file, staged_path)
resources.append(WORKFLOW_TARBALL_FILE)
# Handle extra local packages that should be staged.
if setup_options.extra_packages is not None:
resources.extend(
_stage_extra_packages(setup_options.extra_packages,
google_cloud_options.staging_location,
file_copy=file_copy,
temp_dir=temp_dir))
# Pickle the main session if requested.
# We will create the pickled main session locally and then copy it to the
# staging location because the staging location is a GCS path and the file
# cannot be created directly there.
if setup_options.save_main_session:
pickled_session_file = os.path.join(temp_dir,
names.PICKLED_MAIN_SESSION_FILE)
pickler.dump_session(pickled_session_file)
staged_path = utils.path.join(google_cloud_options.staging_location,
names.PICKLED_MAIN_SESSION_FILE)
file_copy(pickled_session_file, staged_path)
resources.append(names.PICKLED_MAIN_SESSION_FILE)
if hasattr(setup_options, 'sdk_location') and setup_options.sdk_location:
if setup_options.sdk_location == 'default':
stage_tarball_from_remote_location = True
elif (setup_options.sdk_location.startswith('gs://') or
setup_options.sdk_location.startswith('http://') or
setup_options.sdk_location.startswith('https://')):
stage_tarball_from_remote_location = True
else:
stage_tarball_from_remote_location = False
staged_path = utils.path.join(google_cloud_options.staging_location,
names.DATAFLOW_SDK_TARBALL_FILE)
if stage_tarball_from_remote_location:
# If --sdk_location is not specified then the appropriate URL is built
# based on the version of the currently running SDK. If the option is
# present then no version matching is made and the exact URL or path
# is expected.
#
# Unit tests running in the 'python setup.py test' context will
# not have the sdk_location attribute present and therefore we
# will not stage a tarball.
if setup_options.sdk_location == 'default':
sdk_remote_location = '%s/v%s.tar.gz' % (
PACKAGES_URL_PREFIX, __version__)
else:
sdk_remote_location = setup_options.sdk_location
_stage_dataflow_sdk_tarball(sdk_remote_location, staged_path, temp_dir)
resources.append(names.DATAFLOW_SDK_TARBALL_FILE)
else:
# Check if we have a local Dataflow SDK tarball present. This branch is
# used by tests running with the SDK built at head.
if setup_options.sdk_location == 'default':
module_path = os.path.abspath(__file__)
sdk_path = os.path.join(
os.path.dirname(module_path), '..', names.DATAFLOW_SDK_TARBALL_FILE)
elif os.path.isdir(setup_options.sdk_location):
sdk_path = os.path.join(
setup_options.sdk_location, names.DATAFLOW_SDK_TARBALL_FILE)
else:
sdk_path = setup_options.sdk_location
if os.path.isfile(sdk_path):
logging.info('Copying dataflow SDK "%s" to staging location.', sdk_path)
file_copy(sdk_path, staged_path)
resources.append(names.DATAFLOW_SDK_TARBALL_FILE)
else:
if setup_options.sdk_location == 'default':
raise RuntimeError('Cannot find default Dataflow SDK tar file "%s"',
sdk_path)
else:
raise RuntimeError(
'The file "%s" cannot be found. Its location was specified by '
'the --sdk_location command-line option.' %
sdk_path)
# Delete all temp files created while staging job resources.
shutil.rmtree(temp_dir)
return resources
0
Example 129
Project: medpy Source File: loadsave.py
def test_SaveLoad(self):
"""
The bases essence of this test is to check if any one image format in any one
dimension can be saved and read, as this is the only base requirement for using
medpy.
Additionally checks the basic expected behaviour of the load and save
functionality.
Since this usually does not make much sense, this implementation allows also to
set a switch (verboose) which causes the test to print a comprehensive overview
over which image formats with how many dimensions and which pixel data types
can be read and written.
"""
####
# VERBOOSE SETTINGS
# The following are three variables that can be used to print some nicely
# formatted additional output. When one of them is set to True, this unittest
# should be run stand-alone.
####
# Print a list of supported image types, dimensions and pixel data types
supported = True
# Print a list of image types that were tested but are not supported
notsupported = False
# Print a list of image type, dimensions and pixel data types configurations,
# that seem to work but failed the consistency tests. These should be handled
# with special care, as they might be the source of errors.
inconsistent = True
####
# OTHER SETTINGS
####
# debug settings
logger = Logger.getInstance()
#logger.setLevel(logging.DEBUG)
# run test either for most important formats or for all
#__suffixes = self.__important # (choice 1)
__suffixes = self.__pydicom + self.__nifti + self.__itk + self.__itk_more # (choice 2)
# dimensions and dtypes to check
__suffixes = list(set(__suffixes))
__ndims = [1, 2, 3, 4, 5]
__dtypes = [scipy.bool_,
scipy.int8, scipy.int16, scipy.int32, scipy.int64,
scipy.uint8, scipy.uint16, scipy.uint32, scipy.uint64,
scipy.float32, scipy.float64,
scipy.complex64, scipy.complex128]
# prepare struct to save settings that passed the test
valid_types = dict.fromkeys(__suffixes)
for k1 in valid_types:
valid_types[k1] = dict.fromkeys(__ndims)
for k2 in valid_types[k1]:
valid_types[k1][k2] = []
# prepare struct to save settings that did not
unsupported_type = dict.fromkeys(__suffixes)
for k1 in unsupported_type:
unsupported_type[k1] = dict.fromkeys(__ndims)
for k2 in unsupported_type[k1]:
unsupported_type[k1][k2] = dict.fromkeys(__dtypes)
# prepare struct to save settings that did not pass the data integrity test
invalid_types = dict.fromkeys(__suffixes)
for k1 in invalid_types:
invalid_types[k1] = dict.fromkeys(__ndims)
for k2 in invalid_types[k1]:
invalid_types[k1][k2] = dict.fromkeys(__dtypes)
# create artifical images, save them, load them again and compare them
path = tempfile.mkdtemp()
try:
for ndim in __ndims:
logger.debug('Testing for dimension {}...'.format(ndim))
arr_base = scipy.random.randint(0, 10, range(10, ndim + 10))
for dtype in __dtypes:
arr_save = arr_base.astype(dtype)
for suffix in __suffixes:
# do not run test, if in avoid array
if ndim in self.__avoid and suffix in self.__avoid[ndim]:
unsupported_type[suffix][ndim][dtype] = "Test skipped, as combination in the tests __avoid array."
continue
image = '{}/img{}'.format(path, suffix)
try:
# attempt to save the image
save(arr_save, image)
self.assertTrue(os.path.exists(image), 'Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.'.format(suffix, arr_save.shape, dtype))
# attempt to load the image
arr_load, header = load(image)
self.assertTrue(header, 'Image of type {} with shape={}/dtype={} has been loaded without exception, but no header has been supplied (got: {})'.format(suffix, arr_save.shape, dtype, header))
# check for data consistency
msg = self.__diff(arr_save, arr_load)
if msg:
invalid_types[suffix][ndim][dtype] = msg
#elif list == type(valid_types[suffix][ndim]):
else:
valid_types[suffix][ndim].append(dtype)
# remove image
if os.path.exists(image): os.remove(image)
except Exception as e: # clean up
unsupported_type[suffix][ndim][dtype] = e.message
if os.path.exists(image): os.remove(image)
except Exception:
if not os.listdir(path): os.rmdir(path)
else: logger.debug('Could not delete temporary directory {}. Is not empty.'.format(path))
raise
if supported:
print '\nsave() and load() support (at least) the following image configurations:'
print 'type\tndim\tdtypes'
for suffix in valid_types:
for ndim, dtypes in valid_types[suffix].iteritems():
if list == type(dtypes) and not 0 == len(dtypes):
print '{}\t{}D\t{}'.format(suffix, ndim, map(lambda x: str(x).split('.')[-1][:-2], dtypes))
if notsupported:
print '\nthe following configurations are not supported:'
print 'type\tndim\tdtype\t\terror'
for suffix in unsupported_type:
for ndim in unsupported_type[suffix]:
for dtype, msg in unsupported_type[suffix][ndim].iteritems():
if msg:
print '{}\t{}D\t{}\t\t{}'.format(suffix, ndim, str(dtype).split('.')[-1][:-2], msg)
if inconsistent:
print '\nthe following configurations show inconsistent saving and loading behaviour:'
print 'type\tndim\tdtype\t\terror'
for suffix in invalid_types:
for ndim in invalid_types[suffix]:
for dtype, msg in invalid_types[suffix][ndim].iteritems():
if msg:
print '{}\t{}D\t{}\t\t{}'.format(suffix, ndim, str(dtype).split('.')[-1][:-2], msg)
0
Example 130
Project: datafari Source File: install.py
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.download_dir:
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
0
Example 131
Project: multidrive Source File: multidrive.py
def main():
parser = argparse.ArgumentParser(description='MultiDrive version ' +
str(__version__) +
'\nMultiple Cloud Storage '
'Operations')
parser.add_argument('-s', '--source', nargs=1, required=True,
help='set primary service for this command. Valid '
'values are clouddrive, onedrive and googledrive')
parser.add_argument('-a', '--action', nargs=1, required=True,
help='action to perform, valid actions include '
'download, upload, list, copy, and quota')
parser.add_argument('-d', '--destination', nargs=1,
help='set secondary service for this command, Valid '
'values are clouddrive, onedrive and googledrive. '
'Only valid with copy command')
parser.add_argument('-l', '--local', nargs=1,
help='path of local file or folder')
parser.add_argument('-r', '--remote', nargs=1,
help='path of remote file or folder')
parser.add_argument('-c', '--createfolder',
help='enable creation of necessary remote folders',
action='store_true')
parser.add_argument('-e', '--secondaryremote', nargs=1,
help='path secondary remote file or folder (for copy '
'action)')
parser.add_argument('-o', '--overwrite',
help='enable overwriting of files',
action='store_true')
parser.add_argument('-b', '--debug', help="enable debug logging",
action='store_true')
args = parser.parse_args()
service = get_storage_service(args.source[0])
if service is None:
raise ValueError("Please specify a valid source service.")
service.authorize()
if args.debug is True:
logging.getLogger("multidrive").setLevel(logging.DEBUG)
logging.getLogger("multidrive").debug("Logging enabled.")
if args.action[0].lower() == "upload":
if args.local is None:
raise ValueError("Please specify a local file to upload.")
destination = None
if args.remote is not None:
destination = args.remote[0]
if os.path.isdir(args.local[0]):
if destination is not None and service.is_folder(destination) is False:
if args.createfolder is False:
raise ValueError("Non-existant folder necessary but create folder not set.")
service.create_folder(destination)
base_local_path = args.local[0]
last_part_of_local_path = os.path.basename(os.path.normpath(base_local_path))
if destination is None:
destination = last_part_of_local_path
else:
destination = destination + '/' + last_part_of_local_path
base_remote_path = destination
if service.is_folder(base_remote_path) is False:
if args.createfolder is False:
raise ValueError("Non-existant folder necessary but create folder not set.")
service.create_folder(base_remote_path)
for (root, dirs, files) in os.walk(base_local_path):
for cur_dir in dirs:
cur_remote_path = base_remote_path+root[len(base_local_path):]+"/"+cur_dir
if service.is_folder(destination) is False:
if args.createfolder is False:
raise ValueError("Non-existant folder necessary but create folder not set.")
service.create_folder(cur_remote_path)
for cur_file in files:
service.upload(os.path.join(root, cur_file),
destination=base_remote_path+root[len(base_local_path):],
create_folder=args.createfolder,
overwrite=args.overwrite)
else:
service.upload(args.local[0], destination=destination,
create_folder=args.createfolder,
overwrite=args.overwrite)
elif args.action[0].lower() == "download":
if args.remote is None:
raise ValueError("Please specify a remote file or folder to "
"download.")
local_path = None
if args.local is not None:
local_path = args.local[0]
if service.is_folder(args.remote[0]) is True:
# TODO: Give an error earlier if the
# destination folder doesn't exist
remote_files = service.list_folder(args.remote[0])
for (cur_file, path) in remote_files:
destination = None
if local_path is None:
if path is None or len(path) == 0:
destination = None
else:
# TODO: is is portable to windows? Should I be using a
# "/".join method?
destination = os.path.join(*path)
else:
destination = os.path.join(local_path, *path)
service.download_item(cur_file,
destination=destination,
overwrite=args.overwrite,
create_folder=True)
else:
service.download(args.remote[0],
local_path,
overwrite=args.overwrite)
elif args.action[0].lower() == "list":
if args.remote is None:
raise ValueError("Please specify a remote folder to list.")
if service.is_folder(args.remote[0]) is False:
raise ValueError("Remote path is either does not exist or is not "
"a folder")
for (cur_file, path) in service.list_folder(args.remote[0]):
new_path = list(path)
new_path.append(service.get_file_name(cur_file))
print("/".join(new_path))
elif args.action[0].lower() == "copy":
if args.destination is None:
raise ValueError("Please specify a destination for copy "
"operation.")
service2 = get_storage_service(args.destination[0])
service2.authorize()
if service2 is None:
raise ValueError("Please specify a valid secondary source "
"service.")
if args.source[0].lower() == args.secondaryremote[0].lower():
raise ValueError("Primary and secondary services must be "
"different")
if args.remote is None:
raise ValueError("Please specify a remote file or folder to copy "
"from.")
if args.secondaryremote is None:
raise ValueError("Please specify a secondary remote file or "
"folder to copy to.")
if args.createfolder is False and \
service2.is_folder(args.secondaryremote[0]) \
is False:
raise ValueError("Secondary remote folder does not exist. Use the "
"createfolder option to create it")
tmp_path = tempfile.mkdtemp()
try:
if service.is_folder(args.remote[0]) is True:
remote_files = service.list_folder(args.remote[0])
for (cur_file, path) in remote_files:
cur_dest = args.secondaryremote[0]
if len(path) > 0:
cur_dest = os.path.join(cur_dest, *path)
if service.is_folder_from_file_type(cur_file):
if not service2.is_folder(cur_dest):
service2.create_folder(cur_dest)
else:
(local_temp,
last_mod) = (service.download_item(
cur_file,
destination=tmp_path,
overwrite=args.overwrite))
service2.upload(local_temp, destination=cur_dest,
modified_time=last_mod,
create_folder=args.createfolder,
overwrite=args.overwrite)
os.remove(local_temp)
else:
(local_temp, last_mod) = (service.download(
args.remote[0], tmp_path,
overwrite=args.overwrite))
service2.upload(local_temp,
destination=args.secondaryremote[0],
modified_time=last_mod,
create_folder=args.createfolder,
overwrite=args.overwrite)
os.remove(local_temp)
finally:
shutil.rmtree(tmp_path)
elif args.action[0].lower() == "quota":
print(service.get_quota())
else:
raise ValueError("Please specify a valid action.")
0
Example 132
Project: GerbLook Source File: main.py
@mod.route('/', methods=['GET', 'POST'])
def index():
errors = []
form = UploadForm()
form.soldermask_color.choices = [(x, x) for x in app.config['SOLDERMASK_COLORS'].keys()]
form.silkscreen_color.choices = [(x, x) for x in app.config['SILKSCREEN_COLORS'].keys()]
form.copper_color.choices = [(x, x) for x in app.config['COPPER_COLORS'].keys()]
if form.validate_on_submit():
tempdir = tempfile.mkdtemp()
gerberdir = os.path.join(tempdir, 'gerbers')
os.mkdir(gerberdir)
gerbers = []
for f in request.files.getlist('gerbers'):
mimetype = magic.from_buffer(f.read(1024), mime=True)
f.seek(0)
if mimetype == 'application/zip':
archive = ZipFile(f)
for member in archive.namelist():
safe_filename = secure_filename(member)
extracted_filename = os.path.join(gerberdir, safe_filename)
open(extracted_filename, 'wb').write(archive.read(member))
gerbers.append(safe_filename)
elif mimetype == 'application/x-rar':
archive_name = os.path.join(tempdir, secure_filename(f.filename))
f.save(archive_name)
archive = RarFile(archive_name)
for member in archive.namelist():
safe_filename = secure_filename(member)
extracted_filename = os.path.join(gerberdir, safe_filename)
open(extracted_filename, 'wb').write(archive.read(member))
gerbers.append(safe_filename)
elif mimetype == 'text/plain':
safe_filename = secure_filename(f.filename)
f.save(os.path.join(gerberdir, safe_filename))
gerbers.append(safe_filename)
else:
shutil.rmtree(tempdir)
error = 'That was an unexpected file type. Please upload a zip, rar or selection of gerber files.'
error += ' Check the <a class="alert-link" href="/faq">FAQ</a> for the supported file names.'
flash(error, 'error')
return render_template('index.html', form=form)
layers = guess_layers(gerbers, gerberdir)
if 'outline' not in layers.keys():
errors.append("Couldn't find outline layer.")
if 'top_copper' not in layers.keys():
errors.append("Couldn't find top copper layer.")
if 'top_soldermask' not in layers.keys():
errors.append("Couldn't find top soldermask layer.")
if 'bottom_copper' not in layers.keys():
errors.append("Couldn't find bottom copper layer.")
if 'bottom_soldermask' not in layers.keys():
errors.append("Couldn't find bottom soldermask layer.")
if errors:
shutil.rmtree(tempdir)
else:
uid = str(shortuuid.uuid())
basedir = os.path.join(app.config['DATA_DIR'], uid)
shutil.move(tempdir, basedir)
gerberdir = os.path.join(basedir, 'gerbers')
imagedir = os.path.join(basedir, 'images')
os.mkdir(imagedir)
try:
color_silkscreen = app.config['SILKSCREEN_COLORS'][form.silkscreen_color.data]
except KeyError:
color_silkscreen = '#eeeeeeee'
try:
color_background = app.config['SOLDERMASK_COLORS'][form.soldermask_color.data]
except KeyError:
color_background = '#225533'
try:
color_copper = app.config['COPPER_COLORS'][form.copper_color.data]
except KeyError:
color_copper = '#a0a0a0ff'
# Calculate size of gerber and output images
try:
w, h = gerber_size(os.path.join(gerberdir, layers['outline'][0]))
except:
w, h = 0, 0
area = w * h
DPI = '200'
if area == 0:
DPI = '200'
elif area < 500:
DPI = '600'
elif area < 1000:
DPI = '500'
elif area < 5000:
DPI = '400'
elif area < 10000:
DPI = '300'
elif area < 20000:
DPI = '200'
details = {
'gerber_size': (w, h),
'dpi': DPI,
'color_silkscreen': color_silkscreen,
'color_background': color_background,
'color_copper': color_copper,
'layers': layers,
'rendered': False,
}
detail_file = os.path.join(basedir, 'details.json')
json.dump(details, open(detail_file, 'w'))
project = Project()
project.id = uid
project.layer_info = layers
project.width = w
project.height = h
project.color_silkscreen = color_silkscreen
project.color_background = color_background
project.color_copper = color_copper
if current_user.is_authenticated():
project.user = current_user
db.session.add(project)
db.session.commit()
app.r.lpush('gerblook/renderqueue', uid)
return redirect(url_for('.pcb', uid=uid))
return render_template('index.html', errors=errors, form=form)
0
Example 133
Project: webassets Source File: compass.py
def open(self, out, source_path, **kw):
"""Compass currently doesn't take data from stdin, and doesn't allow
us accessing the result from stdout either.
Also, there's a bunch of other issues we need to work around:
- compass doesn't support given an explict output file, only a
"--css-dir" output directory.
We have to "guess" the filename that will be created in that
directory.
- The output filename used is based on the input filename, and
simply cutting of the length of the "sass_dir" (and changing
the file extension). That is, compass expects the input
filename to always be inside the "sass_dir" (which defaults to
./src), and if this is not the case, the output filename will
be gibberish (missing characters in front). See:
https://github.com/chriseppstein/compass/issues/304
We fix this by setting the proper --sass-dir option.
- Compass insists on creating a .sass-cache folder in the
current working directory, and unlike the sass executable,
there doesn't seem to be a way to disable it.
The workaround is to set the working directory to our temp
directory, so that the cache folder will be deleted at the end.
"""
# Create temp folder one dir below output_path so sources in
# sourcemap are correct. This will be in the project folder,
# and as such, while exteremly unlikely, this could interfere
# with existing files and directories.
tempout_dir = path.normpath(
path.join(path.dirname(kw['output_path']), '../')
)
tempout = tempfile.mkdtemp(dir=tempout_dir)
# Temporarily move to "tempout", so .sass-cache will be created there
old_wd = os.getcwd()
os.chdir(tempout)
try:
# Make sure to use normpath() to not cause trouble with
# compass' simplistic path handling, where it just assumes
# source_path is within sassdir, and cuts off the length of
# sassdir from the input file.
sassdir = path.normpath(path.dirname(source_path))
source_path = path.normpath(source_path)
# Compass offers some helpers like image-url(), which need
# information about the urls under which media files will be
# available. This is hard for two reasons: First, the options in
# question aren't supported on the command line, so we need to write
# a temporary config file. Secondly, they assume defined and
# separate directories for "images", "stylesheets" etc., something
# webassets knows nothing of: we don't support the user defining
# such directories. Because we traditionally had this
# filter point all type-specific directories to the root media
# directory, we will define the paths to match this. In other
# words, in Compass, both inline-image("img/test.png) and
# image-url("img/test.png") will find the same file, and assume it
# to be {env.directory}/img/test.png.
# However, this partly negates the purpose of an utility like
# image-url() in the first place - you not having to hard code
# the location of your images. So we allow direct modification of
# the configuration file via the COMPASS_CONFIG setting (see
# tickets #36 and #125).
#
# Note that there is also the --relative-assets option, which we
# can't use because it calculates an actual relative path between
# the image and the css output file, the latter being in a
# temporary directory in our case.
config = CompassConfig(
project_path=self.ctx.directory,
http_path=self.ctx.url,
http_images_dir='',
http_stylesheets_dir='',
http_fonts_dir='',
http_javascripts_dir='',
images_dir='',
output_style=':expanded',
)
# Update with the custom config dictionary, if any.
if self.config:
config.update(self.config)
config_file = path.join(tempout, '.config.rb')
f = open(config_file, 'w')
try:
f.write(config.to_string())
f.flush()
finally:
f.close()
command = [self.compass or 'compass', 'compile']
for plugin in self.plugins or []:
command.extend(('--require', plugin))
command.extend(['--sass-dir', sassdir,
'--css-dir', tempout,
'--config', config_file,
'--quiet',
'--boring',
source_path])
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# shell: necessary on windows to execute
# ruby files, but doesn't work on linux.
shell=(os.name == 'nt'))
stdout, stderr = proc.communicate()
# compass seems to always write a utf8 header? to stderr, so
# make sure to not fail just because there's something there.
if proc.returncode != 0:
raise FilterError(('compass: subprocess had error: stderr=%s, '+
'stdout=%s, returncode=%s') % (
stderr, stdout, proc.returncode))
guessed_outputfilename = path.splitext(path.basename(source_path))[0]
guessed_outputfilepath = path.join(tempout, guessed_outputfilename)
output_file = open("%s.css" % guessed_outputfilepath, encoding='utf-8')
if config.get('sourcemap'):
sourcemap_file = open("%s.css.map" % guessed_outputfilepath)
sourcemap_output_filepath = path.join(
path.dirname(kw['output_path']),
path.basename(sourcemap_file.name)
)
if not path.exists(path.dirname(sourcemap_output_filepath)):
os.mkdir(path.dirname(sourcemap_output_filepath))
sourcemap_output_file = open(sourcemap_output_filepath, 'w')
sourcemap_output_file.write(sourcemap_file.read())
sourcemap_file.close()
try:
contents = output_file.read()
out.write(contents)
finally:
output_file.close()
finally:
# Restore previous working dir
os.chdir(old_wd)
# Clean up the temp dir
shutil.rmtree(tempout)
0
Example 134
Project: django-freeze Source File: writer.py
def write(data, include_media = settings.FREEZE_INCLUDE_MEDIA, include_static = settings.FREEZE_INCLUDE_STATIC, html_in_memory = False, zip_all = settings.FREEZE_ZIP_ALL, zip_in_memory = False):
if os.path.exists(settings.FREEZE_ROOT):
shutil.rmtree(settings.FREEZE_ROOT)
if not os.path.exists(settings.FREEZE_ROOT):
os.makedirs(settings.FREEZE_ROOT)
#create site tree
files_root = tempfile.mkdtemp() if html_in_memory else settings.FREEZE_ROOT
if html_in_memory:
print(u'\ncreate site tree and write it to a temporary directory...')
files_root = tempfile.mkdtemp()
else:
print(u'\ncreate site tree and write it to disk...')
files_root = settings.FREEZE_ROOT
if not os.path.exists(files_root):
os.makedirs(files_root)
#create directories tree and index(es).html files
for d in data:
file_dirs = os.path.join(os.path.normpath(files_root + d['file_dirs']))
file_path = os.path.join(os.path.normpath(files_root + d['file_path']))
file_data = d['file_data']
if not os.path.exists(file_dirs):
os.makedirs(file_dirs)
#print(u'create directory: %s' % (file_dirs, ))
print(u'create file: %s' % (file_path, ))
file_obj = open(file_path, 'wb')
file_obj.write(file_data)
file_obj.close()
if zip_all:
print(u'\nzip files...')
if zip_in_memory:
zip_file_stream = BytesIO()
zip_file = zipfile.ZipFile(zip_file_stream, 'w')
else:
zip_file = zipfile.ZipFile(settings.FREEZE_ZIP_PATH, 'w')
for d in data:
file_src_path = os.path.normpath(files_root + d['file_path'])
if zip_all:
file_rel_path = d['file_path']
print(u'zip file: %s' % (file_rel_path, ))
zip_file.write(file_src_path, file_rel_path)
if include_static:
if zip_all:
print(u'\nzip static files...')
else:
print(u'\ncopy static files...')
include_static_dirs = isinstance(include_static, (list, tuple, ))
for root, dirs, files in os.walk(settings.FREEZE_STATIC_ROOT):
include_dir = False
if include_static_dirs:
for static_dir in include_static:
static_dir_path = os.path.join(settings.FREEZE_STATIC_ROOT + static_dir)
if root.find(static_dir_path) == 0:
include_dir = True
break
else:
include_dir = True
if not include_dir:
continue
for file in files:
file_src_path = os.path.join(root, file)
file_dst_path = file_src_path[file_src_path.find(settings.FREEZE_STATIC_URL):]
if zip_all:
print(u'zip static file: %s' % (file_dst_path, ))
zip_file.write(file_src_path, file_dst_path)
else:
file_dst_path = os.path.normpath(settings.FREEZE_ROOT + '/' + file_dst_path)
file_dst_dirname = os.path.dirname(file_dst_path)
print(u'copy static file: %s - %s' % (file_src_path, file_dst_path, ))
if not os.path.exists(file_dst_dirname):
os.makedirs(file_dst_dirname)
shutil.copy2(file_src_path, file_dst_path)
if include_media:
if zip_all:
print(u'\nzip media files...')
else:
print(u'\ncopy media files...')
include_media_dirs = isinstance(include_media, (list, tuple, ))
for root, dirs, files in os.walk(settings.FREEZE_MEDIA_ROOT):
include_dir = False
if include_media_dirs:
for media_dir in include_media:
media_dir_path = os.path.join(settings.FREEZE_MEDIA_ROOT + media_dir)
if root.find(media_dir_path) == 0:
include_dir = True
break
else:
include_dir = True
if not include_dir:
continue
for file in files:
file_src_path = os.path.join(root, file)
file_dst_path = file_src_path[file_src_path.find(settings.FREEZE_MEDIA_URL):]
if zip_all:
print(u'zip media file: %s' % (file_dst_path, ))
zip_file.write(file_src_path, file_dst_path)
else:
file_dst_path = os.path.normpath(settings.FREEZE_ROOT + '/' + file_dst_path)
file_dst_dirname = os.path.dirname(file_dst_path)
print(u'copy media file: %s - %s' % (file_src_path, file_dst_path, ))
if not os.path.exists(file_dst_dirname):
os.makedirs(file_dst_dirname)
shutil.copy2(file_src_path, file_dst_path)
if zip_all:
zip_file.close()
if zip_in_memory:
zip_file_stream.seek(0)
zip_file_stream_value = zip_file_stream.getvalue()
zip_file_stream.close()
return zip_file_stream_value
else:
print(u'\nstatic site zipped ready at: %s' % (settings.FREEZE_ZIP_PATH, ))
else:
print(u'\nstatic site ready at: %s' % (settings.FREEZE_ROOT, ))
0
Example 135
Project: phylowgs Source File: evolve.py
def do_mcmc(state_manager, backup_manager, safe_to_exit, run_succeeded, config, state, tree_writer, codes, n_ssms, n_cnvs, NTPS, tmp_dir_parent):
start_iter = state['last_iteration'] + 1
unwritten_trees = []
mcmc_sample_times = []
last_mcmc_sample_time = time.time()
# If --tmp-dir is not specified on the command line, it will by default be
# None, which will cause mkdtemp() to place this directory under the system's
# temporary directory. This is the desired behaviour.
config['tmp_dir'] = tempfile.mkdtemp(prefix='pwgsdataexchange.', dir=tmp_dir_parent)
for iteration in range(start_iter, state['num_samples']):
safe_to_exit.set()
if iteration < 0:
logmsg(iteration)
# Referring to tssb as local variable instead of dictionary element is much
# faster.
tssb = state['tssb']
tssb.resample_assignments()
tssb.cull_tree()
# assign node ids
wts, nodes = tssb.get_mixture()
for i, node in enumerate(nodes):
node.id = i
##################################################
## some useful info about the tree,
## used by CNV related computations,
## to be called only after resampling assignments
set_node_height(tssb)
set_path_from_root_to_node(tssb)
map_datum_to_node(tssb)
##################################################
state['mh_acc'] = metropolis(
tssb,
state['mh_itr'],
state['mh_std'],
state['mh_burnin'],
n_ssms,
n_cnvs,
state['ssm_file'],
state['cnv_file'],
state['rand_seed'],
NTPS,
config['tmp_dir']
)
if float(state['mh_acc']) < 0.08 and state['mh_std'] < 10000:
state['mh_std'] = state['mh_std']*2.0
logmsg("Shrinking MH proposals. Now %f" % state['mh_std'])
if float(state['mh_acc']) > 0.5 and float(state['mh_acc']) < 0.99:
state['mh_std'] = state['mh_std']/2.0
logmsg("Growing MH proposals. Now %f" % state['mh_std'])
tssb.resample_sticks()
tssb.resample_stick_orders()
tssb.resample_hypers(dp_alpha=True, alpha_decay=True, dp_gamma=True)
last_llh = tssb.complete_data_log_likelihood()
if iteration >= 0:
state['cd_llh_traces'][iteration] = last_llh
if True or mod(iteration, 10) == 0:
weights, nodes = tssb.get_mixture()
logmsg(' '.join([str(v) for v in (iteration, len(nodes), state['cd_llh_traces'][iteration], state['mh_acc'], tssb.dp_alpha, tssb.dp_gamma, tssb.alpha_decay)]))
if argmax(state['cd_llh_traces'][:iteration+1]) == iteration:
logmsg("%f is best per-data complete data likelihood so far." % (state['cd_llh_traces'][iteration]))
else:
state['burnin_cd_llh_traces'][iteration + state['burnin']] = last_llh
# Can't just put tssb in unwritten_trees, as this object will be modified
# on subsequent iterations, meaning any stored references in
# unwritten_trees will all point to the same sample.
serialized = pickle.dumps(tssb, protocol=pickle.HIGHEST_PROTOCOL)
unwritten_trees.append((serialized, iteration, last_llh))
state['tssb'] = tssb
state['rand_state'] = get_state()
state['last_iteration'] = iteration
new_mcmc_sample_time = time.time()
mcmc_sample_times.append(new_mcmc_sample_time - last_mcmc_sample_time)
last_mcmc_sample_time = new_mcmc_sample_time
# It's not safe to exit while performing file IO, as we don't want
# trees.zip or the computation state file to become corrupted from an
# interrupted write.
safe_to_exit.clear()
should_write_backup = iteration % state['write_backups_every'] == 0 and iteration != start_iter
should_write_state = iteration % state['write_state_every'] == 0
is_last_iteration = (iteration == state['num_samples'] - 1)
# If backup is scheduled to be written, write both it and full program
# state regardless of whether we're scheduled to write state this
# iteration.
if should_write_backup or should_write_state or is_last_iteration:
with open('mcmc_samples.txt', 'a') as mcmcf:
llhs_and_times = [(itr, llh, itr_time) for (tssb, itr, llh), itr_time in zip(unwritten_trees, mcmc_sample_times)]
llhs_and_times = '\n'.join(['%s\t%s\t%s' % (itr, llh, itr_time) for itr, llh, itr_time in llhs_and_times])
mcmcf.write(llhs_and_times + '\n')
tree_writer.write_trees(unwritten_trees)
state_manager.write_state(state)
unwritten_trees = []
mcmc_sample_times = []
if should_write_backup:
backup_manager.save_backup()
safe_to_exit.clear()
#save the best tree
print_top_trees(TreeWriter.default_archive_fn, state['top_k_trees_file'], state['top_k'])
#save clonal frequencies
freq = dict([(g,[] )for g in state['glist']])
glist = array(freq.keys(),str)
glist.shape=(1,len(glist))
savetxt(state['clonal_freqs_file'] ,vstack((glist, array([freq[g] for g in freq.keys()]).T)), fmt='%s', delimiter=', ')
state_manager.delete_state_file()
safe_to_exit.set()
run_succeeded.set()
0
Example 136
Project: easybuild-easyblocks Source File: aladin.py
def configure_step(self):
"""Custom configuration procedure for ALADIN."""
# unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking
if 'LIBRARY_PATH' in os.environ:
self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH'])
self.orig_library_path = os.environ.pop('LIBRARY_PATH')
# build auxiliary libraries
auxlibs_dir = None
my_gnu = None
if self.toolchain.comp_family() == toolchain.GCC:
my_gnu = 'y' # gfortran
for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']:
flags = os.getenv(var)
env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags)
self.log.info("Updated %s to '%s'" % (var, os.getenv(var)))
elif self.toolchain.comp_family() == toolchain.INTELCOMP:
my_gnu = 'i' # icc/ifort
else:
raise EasyBuildError("Don't know how to set 'my_gnu' variable in auxlibs build script.")
self.log.info("my_gnu set to '%s'" % my_gnu)
tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_')
try:
cwd = os.getcwd()
os.chdir(self.builddir)
builddirs = os.listdir(self.builddir)
auxlibs_dir = [x for x in builddirs if x.startswith('auxlibs_installer')][0]
os.chdir(auxlibs_dir)
auto_driver = 'driver_automatic'
for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line)
line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision
line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build
line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line)
sys.stdout.write(line)
run_cmd("./%s" % auto_driver)
os.chdir(cwd)
except OSError, err:
raise EasyBuildError("Failed to build ALADIN: %s", err)
# build gmkpack, update PATH and set GMKROOT
# we build gmkpack here because a config file is generated in the gmkpack isntall path
try:
gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0]
os.chdir(os.path.join(self.builddir, gmkpack_dir))
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n',
}
run_cmd_qa("./build_gmkpack", qa)
os.chdir(cwd)
paths = os.getenv('PATH').split(':')
paths.append(os.path.join(self.builddir, gmkpack_dir, 'util'))
env.setvar('PATH', ':'.join(paths))
env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir))
except OSError, err:
raise EasyBuildError("Failed to build gmkpack: %s", err)
# generate gmkpack configuration file
self.conf_file = 'ALADIN_%s' % self.version
self.conf_filepath = os.path.join(self.builddir, 'gmkpack_support', 'arch', '%s.x' % self.conf_file)
try:
if os.path.exists(self.conf_filepath):
os.remove(self.conf_filepath)
self.log.info("Removed existing gmpack config file %s" % self.conf_filepath)
archdir = os.path.dirname(self.conf_filepath)
if not os.path.exists(archdir):
mkdir(archdir, parents=True)
except OSError, err:
raise EasyBuildError("Failed to remove existing file %s: %s", self.conf_filepath, err)
mpich = 'n'
known_mpi_libs = [toolchain.MPICH, toolchain.MPICH2, toolchain.INTELMPI]
if self.toolchain.options.get('usempi', None) and self.toolchain.mpi_family() in known_mpi_libs:
mpich = 'y'
qpref = 'Please type the ABSOLUTE name of '
qsuff = ', or ignore (environment variables allowed) :'
qsuff2 = ', or ignore : (environment variables allowed) :'
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC:
gribdir = 'GNU'
elif comp_fam == toolchain.INTELCOMP:
gribdir = 'INTEL'
else:
raise EasyBuildError("Don't know which grib lib dir to use for compiler %s", comp_fam)
aux_lib_gribex = os.path.join(tmp_installroot, gribdir, 'lib', 'libgribex.a')
aux_lib_ibm = os.path.join(tmp_installroot, gribdir, 'lib', 'libibmdummy.a')
grib_api_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api.a')
grib_api_f90_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api_f90.a')
grib_api_inc = os.path.join(get_software_root('grib_api'), 'include')
jasperlib = os.path.join(get_software_root('JasPer'), 'lib', 'libjasper.a')
mpilib = os.path.join(os.getenv('MPI_LIB_DIR'), os.getenv('MPI_LIB_SHARED'))
# netCDF
netcdf = get_software_root('netCDF')
netcdf_fortran = get_software_root('netCDF-Fortran')
if netcdf:
netcdfinc = os.path.join(netcdf, 'include')
if netcdf_fortran:
netcdflib = os.path.join(netcdf_fortran, get_software_libdir('netCDF-Fortran'), 'libnetcdff.a')
else:
netcdflib = os.path.join(netcdf, get_software_libdir('netCDF'), 'libnetcdff.a')
if not os.path.exists(netcdflib):
raise EasyBuildError("%s does not exist", netcdflib)
else:
raise EasyBuildError("netCDF(-Fortran) not available")
ldpaths = [ldflag[2:] for ldflag in os.getenv('LDFLAGS').split(' ')] # LDFLAGS have form '-L/path/to'
lapacklibs = []
for lib in os.getenv('LAPACK_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
lapacklibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
lapacklib = ' '.join(lapacklibs)
blaslibs = []
for lib in os.getenv('BLAS_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
blaslibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
blaslib = ' '.join(blaslibs)
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'y',
'Do you want to setup your configuration file for MPICH (y/n) [n] ?': mpich,
'Please type the directory name where to find a dummy file mpif.h or ignore :': os.getenv('MPI_INC_DIR'),
'%sthe library gribex or emos%s' % (qpref, qsuff2): aux_lib_gribex,
'%sthe library ibm%s' % (qpref, qsuff): aux_lib_ibm,
'%sthe library grib_api%s' % (qpref, qsuff): grib_api_lib,
'%sthe library grib_api_f90%s' % (qpref, qsuff): grib_api_f90_lib,
'%sthe JPEG auxilary library if enabled by Grib_api%s' % (qpref, qsuff2): jasperlib,
'%sthe library netcdf%s' % (qpref, qsuff): netcdflib,
'%sthe library lapack%s' % (qpref, qsuff): lapacklib,
'%sthe library blas%s' % (qpref, qsuff): blaslib,
'%sthe library mpi%s' % (qpref, qsuff): mpilib,
'%sa MPI dummy library for serial executions, or ignore :' % qpref: '',
'Please type the directory name where to find grib_api headers, or ignore :': grib_api_inc,
'Please type the directory name where to find fortint.h or ignore :': '',
'Please type the directory name where to find netcdf headers, or ignore :': netcdfinc,
'Do you want to define CANARI (y/n) [y] ?': 'y',
'Please type the name of the script file used to generate a preprocessed blacklist file, or ignore :': '',
'Please type the name of the script file used to recover local libraries (gget), or ignore :': '',
'Please type the options to tune the gnu compilers, or ignore :': os.getenv('F90FLAGS'),
}
f90_seq = os.getenv('F90_SEQ')
if not f90_seq:
# F90_SEQ is only defined when usempi is enabled
f90_seq = os.getenv('F90')
stdqa = OrderedDict([
(r'Confirm library .* is .*', 'y'), # this one needs to be tried first!
(r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', os.getenv('F90')),
(r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''),
(r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file),
])
no_qa = [
".*ignored.",
]
env.setvar('GMKTMP', self.builddir)
env.setvar('GMKFILE', self.conf_file)
run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa, no_qa=no_qa)
# set environment variables for installation dirs
env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack'))
env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack'))
env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack'))
env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack'))
# patch config file to include right Fortran compiler flags
regex_subs = [(r"^(FRTFLAGS\s*=.*)$", r"\1 %s" % os.getenv('FFLAGS'))]
apply_regex_substitutions(self.conf_filepath, regex_subs)
0
Example 137
Project: airmozilla Source File: videoinfo.py
def fetch_screencapture(
event, save=False, save_locally=False, verbose=False, use_https=True,
import_=True, import_if_possible=False, video_url=None,
set_first_available=False, import_immediately=False,
timestamps=None, callback=None,
):
"""return number of files that were successfully created or None"""
assert event.duration, "no duration"
# When you set `import_` to False, it creates the JPEGs and leaves
# them there in a predictable location (so they can be swept up
# by import_screencaptures later).
# However, if you want to continue doing that plus at least
# try to import the created pictures, then set
# `import_if_possible=True`.
# Then, if the import fails, the pictures are still there to be
# picked up by the import_screencaptures() later.
if import_if_possible:
import_ = False
if video_url:
assert not save_locally
else:
video_url, filepath = get_video_url(
event,
use_https,
save_locally,
verbose=verbose,
)
if import_:
save_dir = tempfile.mkdtemp('screencaptures-%s' % event.id)
else:
# Instead of importing we're going to put them in a directory
# that does NOT get deleted when it has created the screecaps.
save_dir = os.path.join(
tempfile.gettempdir(),
settings.SCREENCAPTURES_TEMP_DIRECTORY_NAME
)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
directory_name = '%s_%s' % (event.id, event.slug)
save_dir = os.path.join(save_dir, directory_name)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
def format_time(seconds):
m = seconds / 60
s = seconds % 60
h = m / 60
m = m % 60
return '%02d:%02d:%02d' % (h, m, s)
# First, assume we will delete the temporary save_dir.
# This is toggled if an exception happens in importing
# the pictures.
delete_save_dir = True
try:
if verbose: # pragma: no cover
print "Video duration:",
print show_duration(event.duration, include_seconds=True)
ffmpeg_location = getattr(
settings,
'FFMPEG_LOCATION',
'ffmpeg'
)
if verbose and not which(ffmpeg_location):
print ffmpeg_location, "is not an executable path"
incr = float(event.duration) / settings.SCREENCAPTURES_NO_PICTURES
seconds = 0
created = 0
t0 = time.time()
number = 0
output_template = os.path.join(save_dir, 'screencap-%02d.jpg')
all_out = []
all_err = []
def extract_frame(seconds, save_name):
command = [
ffmpeg_location,
'-ss',
format_time(seconds),
'-i',
video_url,
'-vframes',
'1',
save_name,
]
if verbose: # pragma: no cover
print ' '.join(command)
out, err = wrap_subprocess(command)
all_out.append(out)
all_err.append(err)
if timestamps is not None:
for timestamp in timestamps:
extract_frame(timestamp, output_template % timestamp)
if callback:
created = _callback_files(
callback,
_get_files(save_dir),
delete_opened_files=True,
)
# else:
# raise NotImplementedError
if import_immediately:
created += _import_files(
event,
_get_files(save_dir),
delete_opened_files=True,
timestamp=timestamp,
)
else:
while seconds < event.duration:
number += 1
extract_frame(seconds, output_template % number)
seconds += incr
if import_immediately:
created += _import_files(
event,
_get_files(save_dir),
set_first_available=set_first_available,
delete_opened_files=True,
)
# If 'set_first_available' *was* true, it should have at
# that point set the picture for that first one.
if created:
set_first_available = False
t1 = time.time()
files = _get_files(save_dir)
if verbose: # pragma: no cover
print (
"Took %.2f seconds to extract %d pictures" % (
t1 - t0,
len(files)
)
)
if import_ or import_if_possible:
if (
verbose and
not files and
not import_immediately
): # pragma: no cover
print "No output. Error:"
print '\n'.join(all_err)
try:
created += _import_files(
event,
files,
set_first_available=set_first_available
)
except Exception:
delete_save_dir = False
raise
if verbose: # pragma: no cover
print "Created", created, "pictures"
# end of this section, so add some margin
print "\n"
return created
else:
if verbose: # pragma: no cover
print "Created Temporary Directory", save_dir
print '\t' + '\n\t'.join(os.listdir(save_dir))
return len(files)
finally:
if save_locally:
if os.path.isfile(filepath):
shutil.rmtree(os.path.dirname(filepath))
if (
delete_save_dir and
os.path.isdir(save_dir) and
(import_ or import_if_possible)
):
if verbose: # pragma: no cover
print "Deleting temporary directory"
print save_dir
shutil.rmtree(save_dir)
0
Example 138
Project: ganga Source File: Remote.py
def preparejob(self, jobconfig, master_input_sandbox):
"""Prepare the script to create the job on the remote host"""
import tempfile
workdir = tempfile.mkdtemp()
job = self.getJobObject()
script = """#!/usr/bin/env python
from __future__ import print_function
#-----------------------------------------------------
# This job wrapper script is automatically created by
# GANGA Remote backend handler.
#
# It controls:
# 1. unpack input sandbox
# 2. create the new job
# 3. submit it
#-----------------------------------------------------
import os,os.path,shutil,tempfile
import sys,popen2,time,traceback
import tarfile
############################################################################################
###INLINEMODULES###
############################################################################################
j = Job()
output_sandbox = ###OUTPUTSANDBOX###
input_sandbox = ###INPUTSANDBOX###
appexec = ###APPLICATIONEXEC###
appargs = ###APPLICATIONARGS###
back_end = ###BACKEND###
ganga_dir = ###GANGADIR###
code = ###CODE###
environment = ###ENVIRONMENT###
user_env = ###USERENV###
if user_env != None:
for env_var in user_env:
environment[env_var] = user_env[env_var]
j.outputsandbox = output_sandbox
j.backend = back_end
# Unpack the input sandboxes
shutil.move(os.path.expanduser(ganga_dir + "/__subjob_input_sbx__" + code), j.inputdir+"/__subjob_input_sbx__")
shutil.move(os.path.expanduser(ganga_dir + "/__master_input_sbx__" + code), j.inputdir+"/__master_input_sbx__")
# Add the files in the sandbox to the job
inputsbx = []
fullsbxlist = []
try:
tar = tarfile.open(j.inputdir+"/__master_input_sbx__")
filelist = tar.getnames()
print(filelist)
for f in filelist:
fullsbxlist.append( f )
inputsbx.append( j.inputdir + "/" + f )
except:
print("Unable to open master input sandbox")
try:
tar = tarfile.open(j.inputdir+"/__subjob_input_sbx__")
filelist = tar.getnames()
for f in filelist:
fullsbxlist.append( f )
inputsbx.append( j.inputdir + "/" + f )
except:
print("Unable to open subjob input sandbox")
# sort out the path of the exe
if appexec in fullsbxlist:
j.application = Executable ( exe = File(os.path.join(j.inputdir, appexec)), args = appargs, env = environment )
print("Script found: %s" % appexec)
else:
j.application = Executable ( exe = appexec, args = appargs, env = environment )
j.inputsandbox = inputsbx
getPackedInputSandbox(j.inputdir+"/__subjob_input_sbx__", j.inputdir + "/.")
getPackedInputSandbox(j.inputdir+"/__master_input_sbx__", j.inputdir + "/.")
# submit the job
j.submit()
# Start pickle token
print("***_START_PICKLE_***")
# pickle the job
import pickle
print(j.outputdir)
print(pickle.dumps(j._impl))
# print a finished token
print("***_END_PICKLE_***")
print("***_FINISHED_***")
"""
import inspect
import Ganga.Core.Sandbox as Sandbox
script = script.replace('###ENVIRONMENT###', repr(jobconfig.env))
script = script.replace('###USERENV###', repr(self.environment))
script = script.replace(
'###INLINEMODULES###', inspect.getsource(Sandbox.WNSandbox))
script = script.replace(
'###OUTPUTSANDBOX###', repr(jobconfig.outputbox))
script = script.replace(
'###APPLICATIONEXEC###', repr(os.path.basename(jobconfig.getExeString())))
script = script.replace(
'###APPLICATIONARGS###', repr(jobconfig.getArgStrings()))
# get a string describing the required backend
import cStringIO
be_out = cStringIO.StringIO()
job.backend.remote_backend.printTree(be_out, "copyable")
be_str = be_out.getvalue()
script = script.replace('###BACKEND###', be_str)
script = script.replace('###GANGADIR###', repr(self.ganga_dir))
script = script.replace('###CODE###', repr(self._code))
sandbox_list = jobconfig.getSandboxFiles()
str_list = "[ "
for fname in sandbox_list:
str_list += "j.inputdir + '/' + " + \
repr(os.path.basename(fname.name))
str_list += ", "
str_list += "j.inputdir + '/__master_input_sbx__' ]"
script = script.replace('###INPUTSANDBOX###', str_list)
return job.getInputWorkspace().writefile(FileBuffer('__jobscript__.py', script), executable=0)
0
Example 139
Project: onearth Source File: oe_create_mvt_mrf.py
def geojson_to_mrf(geojson_path, output_file_prefix, dest_path, proj_string, debug=False, max_zoom=3, del_src=False):
"""Takes an input geojson and outputs an MVT MRF in the given location.
Args:
geojson_path (str): A path to the .geojson file that will be processed.
mrf_file_prefix (str): The string that will be used as the prefix for MRF files that will be created, i.e. mrf_file_prefix_.idx
dest_path (str): A path to the location where the output MRF files will be stored.
proj_string (str): The EPSG projection being used by the dataset.
(optional) debug (bool): Toggles verbose output and will leave behind artifact files.
(optional) max_zoom (int): Number of zoom levels that will be added to the MRF. Defaults to 3.
"""
if debug:
temp_dir = './'
else:
temp_dir = tempfile.mkdtemp()
# Use tippecanoe to generate MBTiles stack
if debug:
print 'Converting GeoJSON to vector tiles...'
output_mbtiles_path = os.path.join(temp_dir, output_file_prefix + '.mbtiles')
subprocess.call(('tippecanoe', '-o', output_mbtiles_path, '-s', proj_string, '-z', str(max_zoom), '-pk', geojson_path))
# Store MBTile stack contents into MRF
if debug:
print 'Storing vector tiles as MRF...'
conn = sqlite3.connect(output_mbtiles_path)
cur = conn.cursor()
notile = struct.pack('!QQ', 0, 0)
offset = 0
try:
fidx = open(os.path.join(dest_path, output_file_prefix + '_.idx'), 'w')
fout = open(os.path.join(dest_path, output_file_prefix + '_.pvt'), 'w')
fmrf = open(os.path.join(dest_path, output_file_prefix + '_.mrf'), 'w')
except OSError as e:
raise e
cur.execute('SELECT zoom_level, tile_row, tile_column FROM tiles')
tile_list = cur.fetchall()
for z in xrange(max_zoom, -1, -1):
max_tiles = int(math.pow(2, z))
for y in xrange(max_tiles):
for x in xrange(max_tiles):
if debug:
print 'Writing tile {0}/{1}/{2}'.format(z, y, x)
# MBTiles use Tile Map Service Specification for indicies -- have to flip y-index
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
flipped_y = max_tiles - 1 - y
if (z, flipped_y, x) in tile_list:
cur.execute('SELECT tile_data FROM tiles WHERE zoom_level=? AND tile_row=? AND tile_column=?', (z, flipped_y, x))
tile_row = cur.fetchone()
tile_data = tile_row[0]
# Write tile data bytes to pvt
fout.write(tile_data)
tile_index = struct.pack('!QQ', offset, len(tile_data))
offset += len(tile_data)
else:
tile_index = notile
fidx.write(tile_index)
fidx.close()
fout.close()
if del_src:
os.remove(geojson_path)
if not debug:
os.remove(output_mbtiles_path)
# Now build the MRF XML
mrf_impl = xml.dom.minidom.getDOMImplementation()
mrf_dom = mrf_impl.createDocuement(None, 'MRF_META', None)
mrf_meta = mrf_dom.docuementElement
raster_node = mrf_dom.createElement('Raster')
# Create <Size> element
size_node = mrf_dom.createElement('Size')
size_node.setAttribute('x', str(int(math.pow(2, max_zoom) * 256)))
size_node.setAttribute('y', str(int(math.pow(2, max_zoom) * 256)))
size_node.setAttribute('c', str(1))
raster_node.appendChild(size_node)
# Create <PageSize> element
page_size_node = mrf_dom.createElement('PageSize')
page_size_node.setAttribute('x', str(256))
page_size_node.setAttribute('y', str(256))
page_size_node.setAttribute('c', str(1))
raster_node.appendChild(page_size_node)
# Create <Compression> element
compression_node = mrf_dom.createElement('Compression')
compression_value = mrf_dom.createTextNode('PBF')
compression_node.appendChild(compression_value)
raster_node.appendChild(compression_node)
# Add <DataValues> element
data_values_node = mrf_dom.createElement('DataValues')
data_values_node.setAttribute('NoData', '0')
raster_node.appendChild(data_values_node)
# Add <Quality> element
# quality_node = mrf_dom.createElement('Quality')
# quality_value = mrf_dom.createTextNode('80')
# quality_node.appendChild(quality_value)
# raster_node.appendChild(quality_node)
mrf_meta.appendChild(raster_node)
# Create <Rsets> element
rsets_node = mrf_dom.createElement('Rsets')
rsets_node.setAttribute('model', 'uniform')
mrf_meta.appendChild(rsets_node)
# Create <GeoTags> element
geotags_node = mrf_dom.createElement('GeoTags')
bbox_node = mrf_dom.createElement('BoundingBox')
bbox_node.setAttribute('minx', '-20037508.34000000')
bbox_node.setAttribute('miny', '-20037508.34000000')
bbox_node.setAttribute('maxx', '20037508.34000000')
bbox_node.setAttribute('maxy', '20037508.34000000')
geotags_node.appendChild(bbox_node)
projection_node = mrf_dom.createElement('Projection')
projection_text = mrf_dom.createTextNode('PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]]')
projection_node.appendChild(projection_text)
geotags_node.appendChild(projection_node)
mrf_meta.appendChild(geotags_node)
fmrf.write(mrf_meta.toprettyxml())
fmrf.close()
return
0
Example 140
def configure_step(self):
"""Custom configuration procedure for NWChem."""
# check whether a (valid) symlink to a .nwchemrc config file exists (via a dummy file if necessary)
# fail early if the link is not what's we expect, since running the test cases will likely fail in this case
try:
if os.path.exists(self.home_nwchemrc) or os.path.islink(self.home_nwchemrc):
# create a dummy file to check symlink
if not os.path.exists(self.local_nwchemrc):
write_file(self.local_nwchemrc, 'dummy')
self.log.debug("Contents of %s: %s", os.path.dirname(self.local_nwchemrc),
os.listdir(os.path.dirname(self.local_nwchemrc)))
if os.path.islink(self.home_nwchemrc) and not os.path.samefile(self.home_nwchemrc, self.local_nwchemrc):
raise EasyBuildError("Found %s, but it's not a symlink to %s. "
"Please (re)move %s while installing NWChem; it can be restored later",
self.home_nwchemrc, self.local_nwchemrc, self.home_nwchemrc)
# ok to remove, we'll recreate it anyway
os.remove(self.local_nwchemrc)
except (IOError, OSError), err:
raise EasyBuildError("Failed to validate %s symlink: %s", self.home_nwchemrc, err)
# building NWChem in a long path name is an issue, so let's try to make sure we have a short one
try:
# NWChem insists that version is in name of build dir
tmpdir = tempfile.mkdtemp(suffix='-%s-%s' % (self.name, self.version))
# remove created directory, since we're not going to use it as is
os.rmdir(tmpdir)
# avoid having '['/']' characters in build dir name, NWChem doesn't like that
start_dir = tmpdir.replace('[', '_').replace(']', '_')
mkdir(os.path.dirname(start_dir), parents=True)
os.symlink(self.cfg['start_dir'], start_dir)
os.chdir(start_dir)
self.cfg['start_dir'] = start_dir
except OSError, err:
raise EasyBuildError("Failed to symlink build dir to a shorter path name: %s", err)
# change to actual build dir
try:
os.chdir('src')
except OSError, err:
raise EasyBuildError("Failed to change to build dir: %s", err)
nwchem_modules = self.cfg['modules']
# set required NWChem environment variables
env.setvar('NWCHEM_TOP', self.cfg['start_dir'])
if len(self.cfg['start_dir']) > 64:
# workaround for:
# "The directory name chosen for NWCHEM_TOP is longer than the maximum allowed value of 64 characters"
# see also https://svn.pnl.gov/svn/nwchem/trunk/src/util/util_nwchem_srcdir.F
self.setvar_env_makeopt('NWCHEM_LONG_PATHS', 'Y')
env.setvar('NWCHEM_TARGET', self.cfg['target'])
env.setvar('MSG_COMMS', self.cfg['msg_comms'])
env.setvar('ARMCI_NETWORK', self.cfg['armci_network'])
if self.cfg['armci_network'] in ["OPENIB"]:
env.setvar('IB_INCLUDE', "/usr/include")
env.setvar('IB_LIB', "/usr/lib64")
env.setvar('IB_LIB_NAME', "-libumad -libverbs -lpthread")
if 'python' in self.cfg['modules']:
python_root = get_software_root('Python')
if not python_root:
raise EasyBuildError("Python module not loaded, you should add Python as a dependency.")
env.setvar('PYTHONHOME', python_root)
pyver = '.'.join(get_software_version('Python').split('.')[0:2])
env.setvar('PYTHONVERSION', pyver)
# if libreadline is loaded, assume it was a dependency for Python
# pass -lreadline to avoid linking issues (libpython2.7.a doesn't include readline symbols)
libreadline = get_software_root('libreadline')
if libreadline:
libreadline_libdir = os.path.join(libreadline, get_software_libdir('libreadline'))
ncurses = get_software_root('ncurses')
if not ncurses:
raise EasyBuildError("ncurses is not loaded, but required to link with libreadline")
ncurses_libdir = os.path.join(ncurses, get_software_libdir('ncurses'))
readline_libs = ' '.join([
os.path.join(libreadline_libdir, 'libreadline.a'),
os.path.join(ncurses_libdir, 'libcurses.a'),
])
extra_libs = os.environ.get('EXTRA_LIBS', '')
env.setvar('EXTRA_LIBS', ' '.join([extra_libs, readline_libs]))
env.setvar('LARGE_FILES', 'TRUE')
env.setvar('USE_NOFSCHECK', 'TRUE')
env.setvar('CCSDTLR', 'y') # enable CCSDTLR
env.setvar('CCSDTQ', 'y') # enable CCSDTQ (compilation is long, executable is big)
if LooseVersion(self.version) >= LooseVersion("6.2"):
env.setvar('MRCC_METHODS','y') # enable multireference coupled cluster capability
if LooseVersion(self.version) >= LooseVersion("6.5"):
env.setvar('EACCSD','y') # enable EOM electron-attachemnt coupled cluster capability
env.setvar('IPCCSD','y') # enable EOM ionization-potential coupled cluster capability
for var in ['USE_MPI', 'USE_MPIF', 'USE_MPIF4']:
env.setvar(var, 'y')
for var in ['CC', 'CXX', 'F90']:
env.setvar('MPI_%s' % var, os.getenv('MPI%s' % var))
env.setvar('MPI_LOC', os.path.dirname(os.getenv('MPI_INC_DIR')))
env.setvar('MPI_LIB', os.getenv('MPI_LIB_DIR'))
env.setvar('MPI_INCLUDE', os.getenv('MPI_INC_DIR'))
libmpi = None
mpi_family = self.toolchain.mpi_family()
if mpi_family in toolchain.OPENMPI:
libmpi = "-lmpi_f90 -lmpi_f77 -lmpi -ldl -Wl,--export-dynamic -lnsl -lutil"
elif mpi_family in [toolchain.INTELMPI]:
if self.cfg['armci_network'] in ["MPI-MT"]:
libmpi = "-lmpigf -lmpigi -lmpi_ilp64 -lmpi_mt"
else:
libmpi = "-lmpigf -lmpigi -lmpi_ilp64 -lmpi"
elif mpi_family in [toolchain.MPICH, toolchain.MPICH2]:
libmpi = "-lmpichf90 -lmpich -lopa -lmpl -lrt -lpthread"
else:
raise EasyBuildError("Don't know how to set LIBMPI for %s", mpi_family)
env.setvar('LIBMPI', libmpi)
# compiler optimization flags: set environment variables _and_ add them to list of make options
self.setvar_env_makeopt('COPTIMIZE', os.getenv('CFLAGS'))
self.setvar_env_makeopt('FOPTIMIZE', os.getenv('FFLAGS'))
# BLAS and ScaLAPACK
self.setvar_env_makeopt('BLASOPT', '%s -L%s %s %s' % (os.getenv('LDFLAGS'), os.getenv('MPI_LIB_DIR'),
os.getenv('LIBSCALAPACK_MT'), libmpi))
self.setvar_env_makeopt('SCALAPACK', '%s %s' % (os.getenv('LDFLAGS'), os.getenv('LIBSCALAPACK_MT')))
if self.toolchain.options['i8']:
size = 8
self.setvar_env_makeopt('USE_SCALAPACK_I8', 'y')
self.cfg.update('lib_defines', '-DSCALAPACK_I8')
else:
self.setvar_env_makeopt('HAS_BLAS', 'yes')
self.setvar_env_makeopt('USE_SCALAPACK', 'y')
size = 4
# set sizes
for lib in ['BLAS', 'LAPACK', 'SCALAPACK']:
self.setvar_env_makeopt('%s_SIZE' % lib, str(size))
env.setvar('NWCHEM_MODULES', nwchem_modules)
env.setvar('LIB_DEFINES', self.cfg['lib_defines'])
# clean first (why not)
run_cmd("make clean", simple=True, log_all=True, log_ok=True)
# configure build
cmd = "make %s nwchem_config" % self.cfg['buildopts']
run_cmd(cmd, simple=True, log_all=True, log_ok=True, log_output=True)
0
Example 141
Project: rez Source File: pip.py
def pip_install_package(source_name, pip_version=None, python_version=None,
mode=InstallMode.min_deps, release=False):
"""Install a pip-compatible python package as a rez package.
Args:
source_name (str): Name of package or archive/url containing the pip
package source. This is the same as the arg you would pass to
the 'pip install' command.
pip_version (str or `Version`): Version of pip to use to perform the
install, uses latest if None.
python_version (str or `Version`): Python version to use to perform the
install, and subsequently have the resulting rez package depend on.
mode (`InstallMode`): Installation mode, determines how dependencies are
managed.
release (bool): If True, install as a released package; otherwise, it
will be installed as a local package.
Returns:
2-tuple:
List of `Variant`: Installed variants;
List of `Variant`: Skipped variants (already installed).
"""
installed_variants = []
skipped_variants = []
pip_exe, context = find_pip(pip_version, python_version)
# TODO: should check if packages_path is writable before continuing with pip
#
packages_path = (config.release_packages_path if release
else config.local_packages_path)
tmpdir = mkdtemp(suffix="-rez", prefix="pip-")
stagingdir = os.path.join(tmpdir, "rez_staging")
stagingsep = "".join([os.path.sep, "rez_staging", os.path.sep])
destpath = os.path.join(stagingdir, "python")
binpath = os.path.join(stagingdir, "bin")
incpath = os.path.join(stagingdir, "include")
datapath = stagingdir
if context and config.debug("package_release"):
buf = StringIO()
print >> buf, "\n\npackage download environment:"
context.print_info(buf)
_log(buf.getvalue())
# Build pip commandline
cmd = [pip_exe, "install",
"--install-option=--install-lib=%s" % destpath,
"--install-option=--install-scripts=%s" % binpath,
"--install-option=--install-headers=%s" % incpath,
"--install-option=--install-data=%s" % datapath]
if mode == InstallMode.no_deps:
cmd.append("--no-deps")
cmd.append(source_name)
_cmd(context=context, command=cmd)
_system = System()
# Collect resulting python packages using distlib
distribution_path = DistributionPath([destpath], include_egg=True)
distributions = [d for d in distribution_path.get_distributions()]
for distribution in distribution_path.get_distributions():
requirements = []
if distribution.metadata.run_requires:
# Handle requirements. Currently handles conditional environment based
# requirements and normal requirements
# TODO: Handle optional requirements?
for requirement in distribution.metadata.run_requires:
if "environment" in requirement:
if interpret(requirement["environment"]):
requirements.extend(_get_dependencies(requirement, distributions))
elif "extra" in requirement:
# Currently ignoring optional requirements
pass
else:
requirements.extend(_get_dependencies(requirement, distributions))
tools = []
src_dst_lut = {}
for installed_file in distribution.list_installed_files(allow_fail=True):
source_file = os.path.normpath(os.path.join(destpath, installed_file[0]))
if os.path.exists(source_file):
destination_file = installed_file[0].split(stagingsep)[1]
exe = False
if is_exe(source_file) and \
destination_file.startswith("%s%s" % ("bin", os.path.sep)):
_, _file = os.path.split(destination_file)
tools.append(_file)
exe = True
data = [destination_file, exe]
src_dst_lut[source_file] = data
else:
_log("Source file does not exist: " + source_file + "!")
def make_root(variant, path):
"""Using distlib to iterate over all installed files of the current
distribution to copy files to the target directory of the rez package
variant
"""
for source_file, data in src_dst_lut.items():
destination_file, exe = data
destination_file = os.path.normpath(os.path.join(path, destination_file))
if not os.path.exists(os.path.dirname(destination_file)):
os.makedirs(os.path.dirname(destination_file))
shutil.copyfile(source_file, destination_file)
if exe:
shutil.copystat(source_file, destination_file)
# determine variant requirements
# TODO detect if platform/arch/os necessary, no if pure python
variant_reqs = []
variant_reqs.append("platform-%s" % _system.platform)
variant_reqs.append("arch-%s" % _system.arch)
variant_reqs.append("os-%s" % _system.os)
if context is None:
# since we had to use system pip, we have to assume system python version
py_ver = '.'.join(map(str, sys.version_info[:2]))
else:
python_variant = context.get_resolved_package("python")
py_ver = python_variant.version.trim(2)
variant_reqs.append("python-%s" % py_ver)
name, _ = parse_name_and_version(distribution.name_and_version)
name = distribution.name[0:len(name)].replace("-", "_")
with make_package(name, packages_path, make_root=make_root) as pkg:
pkg.version = distribution.version
if distribution.metadata.summary:
pkg.description = distribution.metadata.summary
pkg.variants = [variant_reqs]
if requirements:
pkg.requires = requirements
commands = []
commands.append("env.PYTHONPATH.append('{root}/python')")
if tools:
pkg.tools = tools
commands.append("env.PATH.append('{root}/bin')")
pkg.commands = '\n'.join(commands)
installed_variants.extend(pkg.installed_variants or [])
skipped_variants.extend(pkg.skipped_variants or [])
# cleanup
shutil.rmtree(tmpdir)
return installed_variants, skipped_variants
0
Example 142
Project: ops-server-config Source File: ValidateHostedServiceSources.py
def main():
exit_err_code = 1
# Print/get script arguments
results = print_args()
if not results:
sys.exit(exit_err_code)
portal_address, adminuser, password = results
total_success = True
title_break_count = 100
section_break_count = 75
search_query = None
print '=' * title_break_count
print 'Validate Hosted Service Sources'
print '=' * title_break_count
source_items = []
hosted_items = []
root_folder_path = None
root_folder_path = tempfile.mkdtemp()
print 'Temporary directory: {}'.format(root_folder_path)
orig_dir = os.getcwd()
try:
portal = Portal(portal_address, adminuser, password)
items = portal.search()
# ---------------------------------------------------------------------
# Get info about hosted service source items
# (currently service definitions)
# ---------------------------------------------------------------------
for item in items:
if item['type'] == 'Service Definition':
print '\nDownloading and extracting Service Definition item {}'.format(item['id'])
# Download .sd file
download_root_path = os.path.join(root_folder_path, item['id'])
os.mkdir(download_root_path)
download_path = portal.item_datad(item['id'], download_root_path)
# Extract serviceconfiguration.json file from downloaded .sd file
file_name = 'serviceconfiguration.json'
extract_path = download_path.replace('.sd', '')
#print extract_path
os.mkdir(extract_path)
err_stat = extractFromSDFile(download_path, extract_path, file_name)
print 'Extract status: {}'.format(err_stat)
# Open extract .json file
file_path = findFilePath(extract_path, file_name)
os.chdir(os.path.dirname(file_path))
service_config = json.load(open(file_name))
# [{id: val, owner: val, title: val, type: val
# service_config: {stuff from .json file}}]
d = {
'id': item['id'],
'owner': item['owner'],
'title': item['title'],
'type': item['type'],
'service_config': service_config
}
source_items.append(d)
# ---------------------------------------------------------------------
# Get info about hosted service items
# ---------------------------------------------------------------------
print '\nDetermine what hosted services exist...'
h_service_items = get_hosted_service_items(portal, items)
for item in h_service_items:
d = {
'id': item['id'],
'owner': item['owner'],
'title': item['title'],
'type': item['type'],
'url': item['url']
}
hosted_items.append(d)
# ---------------------------------------------------------------------
# For each hosted service find the associated source item
# ---------------------------------------------------------------------
print '=' * section_break_count
print '\nDetermine which source items are associated with each hosted service...'
print '=' * section_break_count
num_hosted_no_match = 0
num_hosted_match = 0
num_hosted_mismatch_owner = 0
write_str = "\tid: {:<34}owner: {:<20}type: {:<25}service: {:<50}\n"
for hosted_d in hosted_items:
found = False
found_num = 0
# Get last components of URL (i.e., SRTM_V2_56020/FeatureServer)
hosted_url = '/'.join(hosted_d['url'].split('/')[-2:])
print '\n{}'.format('-' * 100)
print 'Hosted Service Item: Title - "{}"\n'.format(hosted_d['title'])
hosted_str = write_str.format(
hosted_d['id'],
hosted_d['owner'],
hosted_d['type'],
hosted_url)
print hosted_str
# Look for match in source items
print '\tMatching Source Item:'
for source_d in source_items:
src_service_info = source_d['service_config']['service']
src_service_name = src_service_info['serviceName']
src_service_type = src_service_info['type']
src_service_url = '{}/{}'.format(src_service_name, src_service_type)
if hosted_url == src_service_url:
found = True
found_num += 1
match_str = write_str.format(
source_d['id'],
source_d['owner'],
source_d['type'],
src_service_url)
print '\n\tTitle: "{}"'.format(source_d['title'])
print match_str
if source_d['owner'] != hosted_d['owner']:
print '*** ERROR: owner does not match hosted service item owner.'
num_hosted_mismatch_owner += 1
if found_num == 0:
print '*** ERROR: no matching source item found.'
if found_num > 1:
print '*** ERROR: there is more then one matching source item found.'
if found:
num_hosted_match += 1
else:
num_hosted_no_match += 1
# ---------------------------------------------------------------------
# For each source item find the associated hosted service
# ---------------------------------------------------------------------
print '=' * section_break_count
print '\nDetermine which hosted services are associated with each source item...'
print '=' * section_break_count
num_source_no_match = 0
num_source_match = 0
num_source_mismatch_owner = 0
write_str = "\tid: {:<34}owner: {:<20}type: {:<25}service: {:<50}\n"
for source_d in source_items:
found = False
found_num = 0
src_service_info = source_d['service_config']['service']
src_service_name = src_service_info['serviceName']
src_service_type = src_service_info['type']
src_service_url = '{}/{}'.format(src_service_name, src_service_type)
print '\n{}'.format('-' * 100)
print 'Source Item: Title - "{}"\n'.format(source_d['title'])
source_str = write_str.format(
source_d['id'],
source_d['owner'],
source_d['type'],
src_service_url)
print source_str
# Look for match in source items
print '\tMatching Hosted Service:'
for hosted_d in hosted_items:
# Get last components of URL (i.e., SRTM_V2_56020/FeatureServer)
hosted_url = '/'.join(hosted_d['url'].split('/')[-2:])
if hosted_url == src_service_url:
found = True
found_num += 1
match_str = write_str.format(
hosted_d['id'],
hosted_d['owner'],
hosted_d['type'],
hosted_url)
print '\n\tTitle: "{}"'.format(hosted_d['title'])
print match_str
if hosted_d['owner'] != source_d['owner']:
print '*** ERROR: owner does not match associated source owner.'
num_source_mismatch_owner += 1
if found_num == 0:
print '*** ERROR: no matching hosted service found.'
if found_num > 1:
print '*** ERROR: there is more then one hosted service found.'
if found:
num_source_match += 1
else:
num_source_no_match += 1
print '\n{}'.format('=' * section_break_count)
print 'Summary:\n'
print 'Number of hosted services: {}'.format(len(hosted_items))
print 'With matching source item: {}'.format(num_hosted_match)
print 'With NO matching source item: {}'.format(num_hosted_no_match)
print 'With mis-matching owners: {}'.format(num_hosted_mismatch_owner)
print '\nNumber of source items: {}'.format(len(source_items))
print 'With matching hosted service: {}'.format(num_source_match)
print 'With NO matching hosted service: {}'.format(num_source_no_match)
print 'With mis-matching owners: {}'.format(num_source_mismatch_owner)
except:
total_success = False
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error
# into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + \
"\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "cuem* ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
os.chdir(orig_dir)
if root_folder_path:
shutil.rmtree(root_folder_path)
print '\nDone.'
if total_success:
sys.exit(0)
else:
sys.exit(exit_err_code)
0
Example 143
Project: dipy Source File: setup_helpers.py
def add_flag_checking(build_ext_class, flag_defines, top_package_dir=''):
""" Override input `build_ext_class` to check compiler `flag_defines`
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
flag_defines : sequence
A sequence of elements, where the elements are sequences of length 4
consisting of (``compile_flags``, ``link_flags``, ``code``,
``defvar``). ``compile_flags`` is a sequence of compiler flags;
``link_flags`` is a sequence of linker flags. We
check ``compile_flags`` to see whether a C source string ``code`` will
compile, and ``link_flags`` to see whether the resulting object file
will link. If both compile and link works, we add ``compile_flags`` to
``extra_compile_args`` and ``link_flags`` to ``extra_link_args`` of
each extension when we build the extensions. If ``defvar`` is not
None, it is the name of C variable to be defined in ``build/config.h``
with 1 if the combination of (``compile_flags``, ``link_flags``,
``code``) will compile and link, 0 otherwise. If None, do not write
variable.
top_package_dir : str
String giving name of top-level package, for writing Python file
containing configuration variables. If empty, do not write this file.
Variables written are the same as the Cython variables generated via
the `flag_defines` setting.
Returns
-------
checker_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds all working
``compile_flags`` values to the ``extra_compile_args`` and working
``link_flags`` to ``extra_link_args`` attributes of extensions, before
compiling.
"""
class Checker(build_ext_class):
flag_defs = tuple(flag_defines)
def can_compile_link(self, compile_flags, link_flags, code):
cc = self.compiler
fname = 'test.c'
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
with open(fname, 'wt') as fobj:
fobj.write(code)
try:
objects = cc.compile([fname],
extra_postargs=compile_flags)
except CompileError:
return False
try:
# Link shared lib rather then executable to avoid
# http://bugs.python.org/issue4431 with MSVC 10+
cc.link_shared_lib(objects, "testlib",
extra_postargs=link_flags)
except (LinkError, TypeError):
return False
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
return True
def build_extensions(self):
""" Hook into extension building to check compiler flags """
def_vars = []
good_compile_flags = []
good_link_flags = []
config_dir = dirname(CONFIG_H)
for compile_flags, link_flags, code, def_var in self.flag_defs:
compile_flags = list(compile_flags)
link_flags = list(link_flags)
flags_good = self.can_compile_link(compile_flags,
link_flags,
code)
if def_var:
def_vars.append((def_var, flags_good))
if flags_good:
good_compile_flags += compile_flags
good_link_flags += link_flags
else:
log.warn("Flags {0} omitted because of compile or link "
"error".format(compile_flags + link_flags))
if def_vars: # write config.h file
if not exists(config_dir):
self.mkpath(config_dir)
with open(CONFIG_H, 'wt') as fobj:
fobj.write('/* Automatically generated; do not edit\n')
fobj.write(' C defines from build-time checks */\n')
for v_name, v_value in def_vars:
fobj.write('int {0} = {1};\n'.format(
v_name, 1 if v_value else 0))
if def_vars and top_package_dir: # write __config__.py file
config_py_dir = (top_package_dir if self.inplace else
pjoin(self.build_lib, top_package_dir))
if not exists(config_py_dir):
self.mkpath(config_py_dir)
config_py = pjoin(config_py_dir, CONFIG_PY)
with open(config_py, 'wt') as fobj:
fobj.write('# Automatically generated; do not edit\n')
fobj.write('# Variables from compile checks\n')
for v_name, v_value in def_vars:
fobj.write('{0} = {1}\n'.format(v_name, v_value))
if def_vars or good_compile_flags or good_link_flags:
for ext in self.extensions:
ext.extra_compile_args += good_compile_flags
ext.extra_link_args += good_link_flags
if def_vars:
ext.include_dirs.append(config_dir)
build_ext_class.build_extensions(self)
return Checker
0
Example 144
Project: inasafe Source File: default.py
def qgis_composer_renderer(impact_report, component):
"""
Render using qgis composer for a given impact_report data and component
context
:param impact_report: ImpactReport contains data about the report that is
going to be generated
:type impact_report: safe.reportv4.impact_report.ImpactReport
:param component: Contains the component metadata and context for
rendering the output
:type component:
safe.reportv4.report_metadata.QgisComposerComponentsMetadata
:return: whatever type of output the component should be
"""
context = component.context
""":type: safe.reportv4.extractors.composer.QGISComposerContext"""
qgis_composition_context = impact_report.qgis_composition_context
inasafe_context = impact_report.inasafe_context
# load composition object
composition = QgsComposition(qgis_composition_context.map_settings)
# load template
main_template_folder = impact_report.metadata.template_folder
template_path = os.path.join(main_template_folder, component.template)
with open(template_path) as template_file:
template_content = template_file.read()
docuement = QtXml.QDomDocuement()
docuement.setContent(template_content)
load_status = composition.loadFromTemplate(
docuement, context.substitution_map)
if not load_status:
raise TemplateLoadingError(
tr('Error loading template: %s') % template_path)
# replace image path
for img in context.image_elements:
item_id = img.get('id')
path = img.get('path')
image = composition.getComposerItemById(item_id)
""":type: qgis.core.QgsComposerPicture"""
if image is not None and path is not None:
try:
image.setPictureFile(path)
except:
pass
# replace html frame
for html_el in context.html_frame_elements:
item_id = html_el.get('id')
mode = html_el.get('mode')
html_element = composition.getComposerItemById(item_id)
""":type: qgis.core.QgsComposerHtml"""
if html_element:
if mode == 'text':
text = html_el.get('text')
text = text if text else ''
html_element.setContentMode(QgsComposerHtml.ManualHtml)
html_element.setHtml(text)
html_element.loadHtml()
elif mode == 'url':
url = html_el.get('url')
html_element.setContentMode(QgsComposerHtml.Url)
qurl = QUrl.fromLocalFile(url)
html_element.setUrl(qurl)
# resize map extent
for map_el in context.map_elements:
item_id = map_el.get('id')
extent = map_el.get('extent')
split_count = map_el.get('grid_split_count')
composer_map = composition.getComposerItemById(item_id)
""":type: qgis.core.QgsComposerMap"""
if composer_map:
canvas_extent = extent
width = canvas_extent.width()
height = canvas_extent.height()
longest_width = width if width > height else height
half_length = longest_width / 2
margin = half_length / 5
center = canvas_extent.center()
min_x = center.x() - half_length - margin
max_x = center.x() + half_length + margin
min_y = center.y() - half_length - margin
max_y = center.y() + half_length + margin
# noinspection PyCallingNonCallable
square_extent = QgsRectangle(min_x, min_y, max_x, max_y)
composer_map.zoomToExtent(square_extent)
composer_map.renderModeUpdateCachedImage()
actual_extent = composer_map.extent()
# calculate intervals for grid
x_interval = actual_extent.width() / split_count
composer_map.setGridIntervalX(x_interval)
y_interval = actual_extent.height() / split_count
composer_map.setGridIntervalY(y_interval)
# calculate legend element
for leg_el in context.map_legends:
item_id = leg_el.get('id')
title = leg_el.get('title')
layers = leg_el.get('layers')
symbol_count = leg_el.get('symbol_count')
column_count = leg_el.get('column_count')
legend = composition.getComposerItemById(item_id)
if legend:
# set column count
if column_count:
legend.setColumnCount(column_count)
elif symbol_count <= 5:
legend.setColumnCount(1)
else:
legend.setColumnCount(symbol_count / 5 + 1)
# set legend title
if title is not None:
legend.setTitle(title)
# set legend
root_group = legend.modelV2().rootGroup()
for l in layers:
root_group.addLayer(l)
legend.synchronizeWithModel()
# process to output
# in case output folder not specified
if impact_report.output_folder is None:
impact_report.output_folder = mkdtemp(dir=temp_dir())
output_path = os.path.join(
impact_report.output_folder, component.output_path)
# make sure directory is created
dirname = os.path.dirname(output_path)
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
output_format = component.output_format
# for QGIS composer only pdf and png output are available
if output_format == 'pdf':
try:
composition.setPlotStyle(
impact_report.qgis_composition_context.plot_style)
composition.setPrintResolution(component.page_dpi)
composition.setPaperSize(
component.page_width, component.page_height)
composition.setPrintAsRaster(
impact_report.qgis_composition_context.save_as_raster)
composition.exportAsPDF(output_path)
component.output = output_path
except Exception as exc:
LOGGER.error(exc)
return component.output
0
Example 145
Project: portage Source File: AbstractEbuildProcess.py
def _start(self):
need_builddir = self.phase not in self._phases_without_builddir
# This can happen if the pre-clean phase triggers
# die_hooks for some reason, and PORTAGE_BUILDDIR
# doesn't exist yet.
if need_builddir and \
not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
msg = _("The ebuild phase '%s' has been aborted "
"since PORTAGE_BUILDDIR does not exist: '%s'") % \
(self.phase, self.settings['PORTAGE_BUILDDIR'])
self._eerror(textwrap.wrap(msg, 72))
self._set_returncode((self.pid, 1 << 8))
self._async_wait()
return
# Check if the cgroup hierarchy is in place. If it's not, mount it.
if (os.geteuid() == 0 and platform.system() == 'Linux'
and 'cgroup' in self.settings.features
and self.phase not in self._phases_without_cgroup):
cgroup_root = '/sys/fs/cgroup'
cgroup_portage = os.path.join(cgroup_root, 'portage')
try:
# cgroup tmpfs
if not os.path.ismount(cgroup_root):
# we expect /sys/fs to be there already
if not os.path.isdir(cgroup_root):
os.mkdir(cgroup_root, 0o755)
subprocess.check_call(['mount', '-t', 'tmpfs',
'-o', 'rw,nosuid,nodev,noexec,mode=0755',
'tmpfs', cgroup_root])
# portage subsystem
if not os.path.ismount(cgroup_portage):
if not os.path.isdir(cgroup_portage):
os.mkdir(cgroup_portage, 0o755)
subprocess.check_call(['mount', '-t', 'cgroup',
'-o', 'rw,nosuid,nodev,noexec,none,name=portage',
'tmpfs', cgroup_portage])
with open(os.path.join(
cgroup_portage, 'release_agent'), 'w') as f:
f.write(os.path.join(self.settings['PORTAGE_BIN_PATH'],
'cgroup-release-agent'))
with open(os.path.join(
cgroup_portage, 'notify_on_release'), 'w') as f:
f.write('1')
else:
# Update release_agent if it no longer exists, because
# it refers to a temporary path when portage is updating
# itself.
release_agent = os.path.join(
cgroup_portage, 'release_agent')
try:
with open(release_agent) as f:
release_agent_path = f.readline().rstrip('\n')
except EnvironmentError:
release_agent_path = None
if (release_agent_path is None or
not os.path.exists(release_agent_path)):
with open(release_agent, 'w') as f:
f.write(os.path.join(
self.settings['PORTAGE_BIN_PATH'],
'cgroup-release-agent'))
cgroup_path = tempfile.mkdtemp(dir=cgroup_portage,
prefix='%s:%s.' % (self.settings["CATEGORY"],
self.settings["PF"]))
except (subprocess.CalledProcessError, OSError):
pass
else:
self.cgroup = cgroup_path
if self.background:
# Automatically prevent color codes from showing up in logs,
# since we're not displaying to a terminal anyway.
self.settings['NOCOLOR'] = 'true'
if self._enable_ipc_daemon:
self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
if self.phase not in self._phases_without_builddir:
if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings:
self._build_dir = EbuildBuildDir(
scheduler=self.scheduler, settings=self.settings)
self._build_dir.lock()
self.settings['PORTAGE_IPC_DAEMON'] = "1"
self._start_ipc_daemon()
else:
self.settings.pop('PORTAGE_IPC_DAEMON', None)
else:
# Since the IPC daemon is disabled, use a simple tempfile based
# approach to detect unexpected exit like in bug #190128.
self.settings.pop('PORTAGE_IPC_DAEMON', None)
if self.phase not in self._phases_without_builddir:
exit_file = os.path.join(
self.settings['PORTAGE_BUILDDIR'],
'.exit_status')
self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
try:
os.unlink(exit_file)
except OSError:
if os.path.exists(exit_file):
# make sure it doesn't exist
raise
else:
self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
if self.fd_pipes is None:
self.fd_pipes = {}
null_fd = None
if 0 not in self.fd_pipes and \
self.phase not in self._phases_interactive_whitelist and \
"interactive" not in self.settings.get("PROPERTIES", "").split():
null_fd = os.open('/dev/null', os.O_RDONLY)
self.fd_pipes[0] = null_fd
try:
SpawnProcess._start(self)
finally:
if null_fd is not None:
os.close(null_fd)
0
Example 146
Project: stalker Source File: test_review.py
def setUp(self):
"""set up the test
"""
db.setup()
db.init()
self.user1 = User(
name='Test User 1',
login='test_user1',
email='[email protected]',
password='secret'
)
DBSession.add(self.user1)
self.user2 = User(
name='Test User 2',
login='test_user2',
email='[email protected]',
password='secret'
)
DBSession.add(self.user2)
self.user3 = User(
name='Test User 2',
login='test_user3',
email='[email protected]',
password='secret'
)
DBSession.add(self.user3)
# Review Statuses
self.status_new = Status.query.filter_by(code='NEW').first()
self.status_rrev = Status.query.filter_by(code='RREV').first()
self.status_app = Status.query.filter_by(code='APP').first()
# Task Statuses
self.status_wfd = Status.query.filter_by(code='WFD').first()
self.status_rts = Status.query.filter_by(code='RTS').first()
self.status_wip = Status.query.filter_by(code='WIP').first()
self.status_prev = Status.query.filter_by(code='PREV').first()
self.status_hrev = Status.query.filter_by(code='HREV').first()
self.status_drev = Status.query.filter_by(code='DREV').first()
self.status_cmpl = Status.query.filter_by(code='CMPL').first()
self.project_status_list = StatusList(
target_entity_type='Project',
statuses=[
self.status_new, self.status_wip, self.status_cmpl
]
)
DBSession.add(self.project_status_list)
self.temp_path = tempfile.mkdtemp()
self.repo = Repository(
name='Test Repository',
linux_path=self.temp_path,
windows_path=self.temp_path,
osx_path=self.temp_path
)
DBSession.add(self.repo)
self.structure = Structure(
name='Test Project Structure'
)
DBSession.add(self.structure)
self.project = Project(
name='Test Project',
code='TP',
status_list=self.project_status_list,
repository=self.repo
)
DBSession.add(self.project)
self.task1 = Task(
name='Test Task 1',
project=self.project,
resources=[self.user1],
responsible=[self.user2]
)
DBSession.add(self.task1)
self.task2 = Task(
name='Test Task 2',
project=self.project,
responsible=[self.user1]
)
DBSession.add(self.task2)
self.task3 = Task(
name='Test Task 3',
parent=self.task2,
resources=[self.user1]
)
DBSession.add(self.task3)
self.task4 = Task(
name='Test Task 4',
project=self.project,
resources=[self.user1],
depends=[self.task3],
responsible=[self.user2],
schedule_timing=2,
schedule_unit='h'
)
DBSession.add(self.task4)
self.task5 = Task(
name='Test Task 5',
project=self.project,
resources=[self.user2],
depends=[self.task3],
responsible=[self.user2],
schedule_timing=2,
schedule_unit='h'
)
DBSession.add(self.task5)
self.task6 = Task(
name='Test Task 6',
project=self.project,
resources=[self.user3],
depends=[self.task3],
responsible=[self.user2],
schedule_timing=2,
schedule_unit='h'
)
DBSession.add(self.task6)
self.kwargs = {
'task': self.task1,
'reviewer': self.user1
}
#self.review = Review(**self.kwargs)
#DBSession.add(self.review)
# add everything to the db
DBSession.commit()
0
Example 147
Project: ete Source File: phylobuild.py
def _main(arguments, builtin_apps_path=None):
global BASEPATH, APPSPATH, args
if builtin_apps_path:
APPSPATH = builtin_apps_path
if not pexist(pjoin(APPSPATH, "bin")):
APPSPATH = os.path.expanduser("~/.etetoolkit/ext_apps-latest/")
ETEHOMEDIR = os.path.expanduser("~/.etetoolkit/")
if len(arguments) == 1:
if not pexist(APPSPATH):
print(colorify('\nWARNING: external applications not found', "yellow"), file=sys.stderr)
print(colorify('Install using conda (recomended):', "lgreen"), file=sys.stderr)
print(colorify(' conda install -c etetoolkit ete3_external_tools', "white"), file=sys.stderr)
print(colorify('or manually compile by running:', "lgreen"), file=sys.stderr)
print(colorify(' ete3 upgrade-external-tools', "white"), file=sys.stderr)
print()
if len(arguments) > 1:
_config_path = pjoin(BASEPATH, 'phylobuild.cfg')
if arguments[1] == "check":
if not pexist(APPSPATH):
print(colorify('\nWARNING: external applications not found', "yellow"), file=sys.stderr)
print(colorify('Install using conda (recomended):', "lgreen"), file=sys.stderr)
print(colorify(' conda install -c etetoolkit ete3_external_tools', "white"), file=sys.stderr)
print(colorify('or manually compile by running:', "lgreen"), file=sys.stderr)
print(colorify(' ete3 upgrade-external-tools', "white"), file=sys.stderr)
sys.exit(0)
try:
toolchain_version = open(pjoin(APPSPATH, "__version__")).readline()
except IOError:
toolchain_version = "unknown"
print("Current Toolchain path: %s " %APPSPATH)
print("Current Toolchain version: %s" %toolchain_version)
# setup portable apps
config = {}
for k in apps.builtin_apps:
cmd = apps.get_call(k, APPSPATH, "/tmp", "1")
config[k] = cmd
apps.test_apps(config)
sys.exit(0)
elif arguments[1] in ("workflows", "wl"):
if arguments[1] == "wl":
print(colorify("WARNING: 'wl' is obsolete and will be removed in the future, use 'workflows' instead", "orange"), file=sys.stderr)
base_config = check_config(_config_path)
try:
wf_type = arguments[2]
except IndexError:
wf_type = None
list_workflows(base_config, wf_type)
sys.exit(0)
elif arguments[1] == "apps":
base_config = check_config(_config_path)
list_apps(base_config, set(arguments[2:]))
sys.exit(0)
elif arguments[1] == "show":
base_config = check_config(_config_path)
try:
block = arguments[2]
except IndexError:
print("Expected a block name, found none")
sys.exit(1)
block_detail(block, base_config)
sys.exit(0)
elif arguments[1] == "dump":
if len(arguments) > 2:
base_config = check_config(_config_path)
block_detail(arguments[2], base_config, color=False)
else:
print(open(_config_path).read())
sys.exit(0)
elif arguments[1] == "validate":
print('Validating configuration file ', arguments[2])
if pexist(arguments[2]):
base_config = check_config(arguments[2])
print('Everything ok')
else:
print('File does not exist')
sys.exit(-1)
sys.exit(0)
elif arguments[1] == "version":
print(__version__)
sys.exit(0)
parser = argparse.ArgumentParser(description=__DESCRIPTION__ + __EXAMPLES__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Input data related flags
input_group = parser.add_argument_group('==== Input Options ====')
input_group.add_argument('[check | workflows | apps | show | dump | validate | version | install_tools]',
nargs='?',
help=("Utility commands:\n"
"check: check that external applications are executable.\n"
"wl: show a list of available workflows.\n"
"show [name]: show the configuration parameters of a given workflow or application config block.\n"
"dump [name]: dump the configuration parameters of the specified block (allows to modify predefined config).\n"
"validate [configfile]: Validate a custom configuration file.\n"
"version: Show current version.\n"
))
input_group.add_argument("-c", "--custom-config", dest="custom_config",
type=is_file,
help="Custom configuration file.")
input_group.add_argument("--base-config", dest="base_config",
type=is_file, default=BASEPATH+'/phylobuild.cfg',
help="Base configuration file.")
input_group.add_argument("--tools-dir", dest="tools_dir",
type=str,
help="Custom path where external software is avaiable.")
input_group.add_argument("-w", dest="workflow",
required=True,
nargs='+',
help="One or more gene-tree workflow names. All the specified workflows will be executed using the same input data.")
input_group.add_argument("-m", dest="supermatrix_workflow",
required=False,
nargs='+',
help="One or more super-matrix workflow names. All the specified workflows will be executed using the same input data.")
input_group.add_argument("-a", dest="aa_seed_file",
type=is_file,
help="Initial multi sequence file with"
" protein sequences.")
input_group.add_argument("-n", dest="nt_seed_file",
type=is_file,
help="Initial multi sequence file with"
" nucleotide sequences")
# input_group.add_argument("--seqformat", dest="seqformat",
# choices=["fasta", "phylip", "iphylip", "phylip_relaxed", "iphylip_relaxed"],
# default="fasta",
# help="")
input_group.add_argument("--dealign", dest="dealign",
action="store_true",
help="when used, gaps in the orginal fasta file will"
" be removed, thus allowing to use alignment files as input.")
input_group.add_argument("--seq-name-parser", dest="seq_name_parser",
type=str,
help=("A Perl regular expression containing a matching group, which is"
" used to parse sequence names from the input files. Use this option to"
" customize the names that should be shown in the output files."
" The matching group (the two parentheses) in the provided regular"
" expression will be assumed as sequence name. By default, all "
" characthers until the first blank space or tab delimiter are "
" used as the sequence names."),
default='^([^\s]+)')
input_group.add_argument("--no-seq-rename", dest="seq_rename",
action="store_false",
help="If used, sequence names will NOT be"
" internally translated to 10-character-"
"identifiers.")
input_group.add_argument("--no-seq-checks", dest="no_seq_checks",
action="store_true",
help="Skip consistency sequence checks for not allowed symbols, etc.")
input_group.add_argument("--no-seq-correct", dest="no_seq_correct",
action="store_true",
help="Skip sequence compatibility changes: i.e. U, J and O symbols are converted into X by default.")
dup_names_group = input_group.add_mutually_exclusive_group()
dup_names_group.add_argument("--ignore-dup-seqnames", dest="ignore_dup_seqnames",
action = "store_true",
help=("If duplicated sequence names exist in the input"
" fasta file, a single random instance will be used."))
dup_names_group.add_argument("--rename-dup-seqnames", dest="rename_dup_seqnames",
action = "store_true",
help=("If duplicated sequence names exist in the input"
" fasta file, duplicates will be renamed."))
input_group.add_argument("--seqdb", dest="seqdb",
type=str,
help="Uses a custom sequence database file")
# supermatrix workflow
input_group.add_argument("--cogs", dest="cogs_file",
type=is_file,
help="A file defining clusters of orthologous groups."
" One per line. Tab delimited sequence ids. ")
input_group.add_argument("--lineages", dest="lineages_file",
type=is_file,
help="EXPERIMENTAL:A file containing the (sorted) lineage "
"track of each species. It enables "
"NPR algorithm to fix what taxonomic "
"levels should be optimized."
"Note that linage tracks must consist in "
"a comma separated list of taxonomic levels "
"sorted from deeper to swallower clades "
"(i.e. 9606 [TAB] Eukaryotes,Mammals,Primates)"
)
input_group.add_argument("--spname-delimiter", dest="spname_delimiter",
type=str, default="_",
help="spname_delimiter is used to split"
" the name of sequences into species code and"
" sequence identifier (i.e. HUMAN_p53 = HUMAN, p53)."
" Note that species name must always precede seq.identifier.")
input_group.add_argument("--spfile", dest="spfile",
type=is_file,
help="If specified, only the sequences and ortholog"
" pairs matching the group of species in this file"
" (one species code per line) will be used. ")
npr_group = parser.add_argument_group('==== NPR options ====')
npr_group.add_argument("-r", "--recursive", dest="npr_workflows",
required=False,
nargs="*",
help="EXPERIMENTAL:Enables recursive NPR capabilities (Nested Phylogenetic Reconstruction)"
" and specifies custom workflows and filters for each NPR iteration.")
npr_group.add_argument("--nt-switch-threshold", dest="nt_switch_thr",
required=False,
type=float,
default = 0.95,
help="Sequence similarity at which nucleotide based alignments should be used"
" instead of amino-acids. ")
npr_group.add_argument("--max-iters", dest="max_iters",
required=False,
type=int,
default=99999999,
help="EXPERIMENTAL:Set a maximum number of NPR iterations allowed.")
npr_group.add_argument("--first-split-outgroup", dest="first_split",
type=str,
default='midpoint',
help=("EXPERIMENTAL:When used, it overrides first_split option"
" in any tree merger config block in the"
" config file. Default: 'midpoint' "))
# Output data related flags
output_group = parser.add_argument_group('==== Output Options ====')
output_group.add_argument("-o", "--outdir", dest="outdir",
type=str, required=True,
help="""Output directory for results.""")
output_group.add_argument("--scratch-dir", dest="scratch_dir",
type=is_dir,
help="""If provided, ete-build will run on the scratch folder and all files will be transferred to the output dir when finished. """)
output_group.add_argument("--db-dir", dest="db_dir",
type=is_dir,
help="""Alternative location of the database directory""")
output_group.add_argument("--tasks-dir", dest="tasks_dir",
type=is_dir,
help="""Output directory for the executed processes (intermediate files).""")
output_group.add_argument("--compress", action="store_true",
help="Compress all intermediate files when"
" a workflow is finished.")
output_group.add_argument("--logfile", action="store_true",
help="Log messages will be saved into a file named npr.log within the output directory.")
output_group.add_argument("--noimg", action="store_true",
help="Tree images will not be generated when a workflow is finished.")
output_group.add_argument("--email", dest="email",
type=str,
help="EXPERIMENTAL:Send an email when errors occur or a workflow is done.")
output_group.add_argument("--email-report-time", dest="email_report_time",
type=int, default = 0,
help="EXPERIMENTAL:How often (in minutes) an email reporting the status of the execution should be sent. 0=No reports")
# Task execution related flags
exec_group = parser.add_argument_group('==== Execution Mode Options ====')
exec_group.add_argument("-C", "--cpu", dest="maxcores", type=int,
default=1, help="Maximum number of CPU cores"
" available in the execution host. If higher"
" than 1, tasks with multi-threading"
" capabilities will enabled. Note that this"
" number will work as a hard limit for all applications,"
"regardless of their specific configuration.")
exec_group.add_argument("-t", "--schedule-time", dest="schedule_time",
type=float, default=2,
help="""How often (in secs) tasks should be checked for available results.""")
exec_group.add_argument("--launch-time", dest="launch_time",
type=float, default=3,
help="""How often (in secs) queued jobs should be checked for launching""")
exec_type_group = exec_group.add_mutually_exclusive_group()
exec_type_group.add_argument("--noexec", dest="no_execute",
action="store_true",
help=("Prevents launching any external application."
" Tasks will be processed and intermediate steps will"
" run, but no real computation will be performed."))
# exec_type_group.add_argument("--sge", dest="sge_execute",
# action="store_true", help="EXPERIMENTAL!: Jobs will be"
# " launched using the Sun Grid Engine"
# " queue system.")
exec_group.add_argument("--monitor", dest="monitor",
action="store_true",
help="Monitor mode: pipeline jobs will be"
" detached from the main process. This means that"
" when npr execution is interrupted, all currently"
" running jobs will keep running. Use this option if you"
" want to stop and recover an execution thread or"
" if jobs are expected to be executed remotely."
)
exec_group.add_argument("--resume", dest="resume",
action="store_true",
help="If output directory exists, reuse data from it if possible. ")
exec_group.add_argument("--clearall", dest="clearall",
action="store_true",
help="If output directory exists, erase all previous data and start a clean execution.")
exec_group.add_argument("--softclear", dest="softclear",
action="store_true",
help="Clear all precomputed data (data.db), but keeps task raw data in the directory, so they can be re-processed.")
exec_group.add_argument("--clear-seqdb", dest="clearseqs",
action="store_true",
help="Reload sequences deleting previous database if necessary.")
# exec_group.add_argument("--arch", dest="arch",
# choices=["auto", "32", "64"],
# default="auto", help="Set the architecture of"
# " execution hosts (needed only when using"
# " built-in applications.)")
exec_group.add_argument("--nochecks", dest="nochecks",
action="store_true",
help="Skip basic checks (i.e. tools available) everytime the application starts.")
# Interface related flags
ui_group = parser.add_argument_group("==== Program Interface Options ====")
# ui_group.add_argument("-u", dest="enable_ui",
# action="store_true", help="When used, a color"
# " based interface is launched to monitor NPR"
# " processes. This feature is EXPERIMENTAL and"
# " requires NCURSES libraries installed in your"
# " system.")
ui_group.add_argument("-v", dest="verbosity",
default=0,
type=int, choices=[0,1,2,3,4],
help="Verbosity level: 0=very quiet, 4=very "
" verbose.")
ui_group.add_argument("--debug", nargs="?",
const="all",
help="Start debugging"
" A taskid can be provided, so"
" debugging will start from such task on.")
args = parser.parse_args(arguments)
if args.tools_dir:
APPSPATH = args.tools_dir
try:
toolchain_version = open(pjoin(APPSPATH, "__version__")).readline()
except IOError:
toolchain_version = "unknown"
print("Toolchain path: %s " %APPSPATH)
print("Toolchain version: %s" %toolchain_version)
if not pexist(APPSPATH):
print(colorify('\nWARNING: external applications directory are not found at %s' %APPSPATH, "yellow"), file=sys.stderr)
print(colorify('Use "ete build install_tools" to install or upgrade tools', "orange"), file=sys.stderr)
args.enable_ui = False
if not args.noimg:
try:
from .. import Tree
Tree().render('/tmp/etenpr_img_test.png')
except:
print('X11 DISPLAY = %s' %colorify(os.environ.get('DISPLAY', 'not detected!'), 'yellow'))
print('(You can use --noimg to disable graphical capabilities)')
raise ConfigError('img generation not supported')
if not args.aa_seed_file and not args.nt_seed_file:
parser.error('At least one input file argument (-a, -n) is required')
outdir = os.path.abspath(args.outdir)
final_dir, runpath = os.path.split(outdir)
if not runpath:
raise ValueError("Invalid outdir")
GLOBALS["output_dir"] = os.path.abspath(args.outdir)
if args.scratch_dir:
# set paths for scratch folder for sqlite files
print("Creating temporary scratch dir...", file=sys.stderr)
base_scratch_dir = os.path.abspath(args.scratch_dir)
scratch_dir = tempfile.mkdtemp(prefix='npr_tmp', dir=base_scratch_dir)
GLOBALS["scratch_dir"] = scratch_dir
GLOBALS["basedir"] = scratch_dir
else:
GLOBALS["basedir"] = GLOBALS["output_dir"]
GLOBALS["first_split_outgroup"] = args.first_split
GLOBALS["email"] = args.email
GLOBALS["verbosity"] = args.verbosity
GLOBALS["email_report_time"] = args.email_report_time * 60
GLOBALS["launch_time"] = args.launch_time
GLOBALS["cmdline"] = ' '.join(arguments)
GLOBALS["threadinfo"] = defaultdict(dict)
GLOBALS["seqtypes"] = set()
GLOBALS["target_species"] = set()
GLOBALS["target_sequences"] = set()
GLOBALS["spname_delimiter"] = args.spname_delimiter
GLOBALS["color_shell"] = True
GLOBALS["citator"] = Citator()
GLOBALS["lineages"] = None
GLOBALS["cogs_file"] = None
GLOBALS["citator"].add("ETE")
if not pexist(GLOBALS["basedir"]):
os.makedirs(GLOBALS["basedir"])
# when killed, translate signal into exception so program can exit cleanly
def raise_control_c(_signal, _frame):
if GLOBALS.get('_background_scheduler', None):
GLOBALS['_background_scheduler'].terminate()
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, raise_control_c)
# Start the application
app_wrapper(main, args)
0
Example 148
Project: campies Source File: campies.py
def build(bootcamp_package):
"""Extracts a BootCamp package and builds a ZIP file containing drivers"""
# Verify that the Boot Camp volume is not already mounted
if os.path.exists('/Volumes/Boot Camp'):
raise CampiesError(
'The Boot Camp volume (/Volumes/Boot Camp) already appears to '
'be mounted; please eject this volume and try again'
)
# Verify that the BootCamp package location provided actually exists
if not os.path.isfile(bootcamp_package):
raise CampiesError(
'Unable to find file {bootcamp_package}'.format(
bootcamp_package=bootcamp_package
)
)
bootcamp_extract_dir = tempfile.mkdtemp(prefix='campies')
print(
GREEN +
'Using temporary directory {bootcamp_extract_dir}'.format(
bootcamp_extract_dir=bootcamp_extract_dir
) +
ENDC
)
print(BLUE + 'Extracting the BootCampESD package' + ENDC)
try:
run([
'pkgutil', '--expand', bootcamp_package,
'{bootcamp_extract_dir}/BootCampESD'.format(
bootcamp_extract_dir=bootcamp_extract_dir
)
])
except CampiesSubprocessError:
raise CampiesError('Unable to extract the BootCampESD package')
print(BLUE + 'Extracting the Payload from the BootCampESD package' + ENDC)
try:
run([
'tar', 'xfz', '{bootcamp_extract_dir}/BootCampESD/Payload'.format(
bootcamp_extract_dir=bootcamp_extract_dir
), '--strip', '3', '-C', bootcamp_extract_dir
])
except CampiesSubprocessError:
raise CampiesError(
'Unable to extract Payload from the BootCampESD package'
)
print(BLUE + 'Attaching the Windows Support DMG image' + ENDC)
try:
run([
'hdiutil', 'attach', '-quiet',
'{bootcamp_extract_dir}/BootCamp/WindowsSupport.dmg'.format(
bootcamp_extract_dir=bootcamp_extract_dir
)
])
except CampiesSubprocessError:
raise CampiesError('Unable to attach the Windows Support DMG image')
try:
bootcamp_etree = ElementTree.parse(
'/Volumes/Boot Camp/BootCamp/BootCamp.xml'
)
bootcamp = bootcamp_etree.getroot()
except xml.etree.ElementTree.ParseError:
raise CampiesError(
'Unable to parse BootCamp XML to obtain the software version'
)
try:
bootcamp_version = bootcamp.find('MsiInfo').find('ProductVersion').text
except AttributeError:
raise CampiesError('Unable to determine BootCamp version')
print(
GREEN +
'Determined your BootCamp version to be {bootcamp_version}'.format(
bootcamp_version=bootcamp_version
) +
ENDC
)
bootcamp_package_dir = os.path.dirname(bootcamp_package)
bootcamp_archive = (
'{bootcamp_package_dir}/BootCamp {bootcamp_version}'.format(
bootcamp_package_dir=bootcamp_package_dir,
bootcamp_version=bootcamp_version
)
)
print(
BLUE +
'Creating a ZIP archive of the BootCamp Windows installer' +
ENDC
)
try:
shutil.make_archive(bootcamp_archive, 'zip', '/Volumes/Boot Camp')
except OSError:
raise CampiesError(
'Unable to create ZIP archive of the BootCamp Windows installer'
)
print(BLUE + 'Detaching the Windows Support DMG image' + ENDC)
try:
run(['hdiutil', 'detach', '-quiet', '/Volumes/Boot Camp'])
except CampiesSubprocessError:
raise CampiesError('Unable to detach the Windows Support DMG image')
print(BLUE + 'Cleaning up temporary directory' + ENDC)
try:
shutil.rmtree(bootcamp_extract_dir)
except OSError:
print(YELLOW + 'Unable to clean temporary directory' + ENDC)
print(GREEN + 'All processing was completed successfully!' + ENDC)
print(
GREEN +
'Your BootCamp archive is available at '
'"{bootcamp_archive}.zip"'.format(bootcamp_archive=bootcamp_archive) +
ENDC
)
0
Example 149
def main(args):
rounds = args.rounds
user = args.user
hosts = []
if args.hosts is not None:
hosts.extend(args.hosts)
if args.hostfile is not None:
hosts.extend(paramgmt.parse_stream(args.hostfile))
args.hostfile.close()
if not hosts:
print('no hosts specified')
return 0
for round in range(0, rounds):
print('cuem*******************************************')
print('*** Starting test {0} of {1}'.format(round+1, rounds))
print('***********************************************')
print('')
tmp = tempfile.mkdtemp()
ctl = paramgmt.Controller(hosts=hosts, user=user, parallel=True,
quiet=False, color=True, attempts=3)
ctl.attempts = 1
sts = ctl.local_command(['ping', '-c', '1', '?HOST'])
assert paramgmt.all_success(sts)
ctl.attempts = 1
sts = ctl.local_command(['mkdir', '-p', os.path.join(tmp, '?HOST')])
assert paramgmt.all_success(sts)
for host in hosts:
do('test -d {0}'.format(os.path.join(tmp, host)))
for num in range(1, 4):
filepath1 = os.path.join(tmp, 'test{0}.txt'.format(num))
do('echo "test {0}" > {1}'.format(num, filepath1))
do('test -f {0}'.format(filepath1))
for num in range(1, 4):
filepath1 = os.path.join(tmp, 'test{0}.txt'.format(num))
filepath2 = os.path.join(tmp, '?HOST', 'test{0}.txt'.format(num))
ctl.attempts = 1
sts = ctl.local_command(['cp', filepath1, filepath2])
assert paramgmt.all_success(sts)
for host in hosts:
filepath3 = os.path.join(tmp, host, 'test{0}.txt'.format(num))
do('test -f {0}'.format(filepath3))
ctl.attempts = 3
sts = ctl.remote_command(['rm', '-rf', tmp])
assert paramgmt.all_success(sts)
ctl.attempts = 3
filepath1 = os.path.join(tmp, '?HOST')
sts = ctl.remote_command(['mkdir', '-p', filepath1])
assert paramgmt.all_success(sts)
ctl.attempts = 3
filepath1 = os.path.join(tmp, '?HOST')
sts = ctl.remote_command(['test', '-d', filepath1])
assert paramgmt.all_success(sts)
ctl.attempts = 3
filepath1 = os.path.join(tmp, '?HOST', 'test1.txt')
filepath2 = os.path.join(tmp, '?HOST', 'test2.txt')
filepath3 = os.path.join(tmp, '?HOST', 'test3.txt')
filepath4 = os.path.join(tmp, '?HOST')
sts = ctl.remote_push([filepath1, filepath2], filepath4)
assert paramgmt.all_success(sts)
ctl.attempts = 3
sts = ctl.remote_command(['test', '-f', filepath1])
assert paramgmt.all_success(sts)
ctl.attempts = 3
sts = ctl.remote_command(['test', '-f', filepath2])
assert paramgmt.all_success(sts)
ctl.attempts = 3
sts = ctl.remote_push([filepath3], filepath4)
assert paramgmt.all_success(sts)
ctl.attempts = 3
sts = ctl.remote_command(['test', '-f', filepath3])
assert paramgmt.all_success(sts)
ctl.attempts = 1
filepath5 = os.path.join(tmp, '?HOST', 'pull')
sts = ctl.local_command(['mkdir', '-p', filepath5])
assert paramgmt.all_success(sts)
for host in hosts:
filepath6 = os.path.join(tmp, host, 'pull')
do('test -d {0}'.format(filepath6))
ctl.attempts = 3
sts = ctl.remote_pull([filepath1, filepath3], filepath5)
assert paramgmt.all_success(sts)
for host in hosts:
filepath6 = os.path.join(tmp, host, 'pull', 'test1.txt')
filepath7 = os.path.join(tmp, host, 'pull', 'test3.txt')
do('test -f {0}'.format(filepath6))
do('test -f {0}'.format(filepath7))
ctl.attempts = 1
filepath6 = os.path.join(tmp, '?HOST', 'pull', 'test1.txt')
filepath7 = os.path.join(tmp, '?HOST', 'pull', 'test2.txt')
filepath8 = os.path.join(tmp, '?HOST', 'pull', 'test3.txt')
sts = ctl.local_command(['test', '-f', filepath6])
assert paramgmt.all_success(sts)
ctl.attempts = 1
sts = ctl.local_command(['test', '-f', filepath8])
assert paramgmt.all_success(sts)
ctl.attempts = 3
sts = ctl.remote_pull([filepath2], filepath5)
assert paramgmt.all_success(sts)
for host in hosts:
filepath9 = os.path.join(tmp, host, 'pull', 'test2.txt')
do('test -f {0}'.format(filepath9))
ctl.attempts = 1
sts = ctl.local_command(['test', '-f', filepath7])
assert paramgmt.all_success(sts)
for num in range(1, 4):
scriptfileA = os.path.join(tmp, 'script{0}.sh'.format(num))
scriptfileB = os.path.join(tmp, '?HOST', 'script{0}.sh'.format(num))
with open(scriptfileA, 'w') as fd:
fd.write(get_script(num, tmp))
ctl.attempts = 1
sts = ctl.local_command(['cp', scriptfileA, scriptfileB])
assert paramgmt.all_success(sts)
for host in hosts:
scriptfileC = os.path.join(tmp, host, 'script{0}.sh'.format(num))
do('test -f {0}'.format(scriptfileC))
ctl.attempts = 3
script1 = os.path.join(tmp, '?HOST', 'script1.sh')
script2 = os.path.join(tmp, '?HOST', 'script2.sh')
script3 = os.path.join(tmp, '?HOST', 'script3.sh')
sts = ctl.remote_script([script1, script2, script3])
assert paramgmt.all_success(sts)
ctl.attempts = 3
test1 = os.path.join(tmp, '?HOST', 'test1.txt')
test2 = os.path.join(tmp, '?HOST', 'test2.txt')
test3 = os.path.join(tmp, '?HOST', 'test3.txt')
sts = ctl.remote_command(
['test ! -f {0} && test -f {1} && test ! -f {2}'
.format(test1, test2, test3)])
assert paramgmt.all_success(sts)
ctl.attempts = 1
filepath = os.path.join(tmp, '?HOST', 'pull', 'test3.txt')
sts = ctl.local_command(
['cat {0} | grep test && echo ?HOST is awesome'.format(filepath)])
assert paramgmt.all_success(sts)
for s in sts:
assert s.stdout == ('test 3\n' + s.host + ' is awesome')
ctl.attempts = 1
sts = ctl.local_command(
['echo "This is stderr text" 1>&2 && echo "This is stdout text"'])
assert paramgmt.all_success(sts)
for s in sts:
assert s.stdout == 'This is stdout text'
assert s.stderr == 'This is stderr text'
do('rm -rf {0}'.format(tmp))
return 0
0
Example 150
Project: instaseis Source File: conftest.py
def repack_databases():
"""
Repack databases and create a couple of temporary test databases.
It will generate various repacked databases and use them in the test
suite - this for one tests the repacking but also that Instaseis can
work with a number of different database layouts.
"""
try:
import netCDF4 # NOQA
import click # NOQA
except ImportError:
print("\nSkipping database repacking tests which require `click` and "
"`netCDF4` to be installed.\n")
return {
"root_folder": None,
"databases": {}
}
import h5py
from instaseis.scripts.repack_db import merge_files, repack_file
root_folder = tempfile.mkdtemp()
# First create a transposed database - make it contiguous.
transposed_bw_db = os.path.join(
root_folder, "transposed_100s_db_bwd_displ_only")
os.makedirs(transposed_bw_db)
db = os.path.join(TEST_DATA, "100s_db_bwd_displ_only")
f = "ordered_output.nc4"
px = os.path.join(db, "PX", "Data", f)
pz = os.path.join(db, "PZ", "Data", f)
px_tr = os.path.join(transposed_bw_db, "PX", f)
pz_tr = os.path.join(transposed_bw_db, "PZ", f)
os.makedirs(os.path.dirname(px_tr))
os.makedirs(os.path.dirname(pz_tr))
print("Creating transposed test database ...")
repack_file(input_filename=px, output_filename=px_tr, contiguous=True,
compression_level=None, quiet=True, transpose=True)
repack_file(input_filename=pz, output_filename=pz_tr, contiguous=True,
compression_level=None, quiet=True, transpose=True)
# Now transpose it again which should result in the original layout.
transposed_and_back_bw_db = os.path.join(
root_folder, "transposed_and_back_100s_db_bwd_displ_only")
os.makedirs(transposed_and_back_bw_db)
px_tr_and_back = os.path.join(transposed_and_back_bw_db, "PX", f)
pz_tr_and_back = os.path.join(transposed_and_back_bw_db, "PZ", f)
os.makedirs(os.path.dirname(px_tr_and_back))
os.makedirs(os.path.dirname(pz_tr_and_back))
print("Creating compressed re-transposed test database ...")
repack_file(input_filename=px_tr, output_filename=px_tr_and_back,
contiguous=False, compression_level=4, quiet=True,
transpose=True)
repack_file(input_filename=pz_tr, output_filename=pz_tr_and_back,
contiguous=False, compression_level=4, quiet=True,
transpose=True)
# Now add another simple repacking test - repack the original one and
# repack the transposed one.
repacked_bw_db = os.path.join(
root_folder, "repacked_100s_db_bwd_displ_only")
os.makedirs(repacked_bw_db)
px_r = os.path.join(repacked_bw_db, "PX", f)
pz_r = os.path.join(repacked_bw_db, "PZ", f)
os.makedirs(os.path.dirname(px_r))
os.makedirs(os.path.dirname(pz_r))
print("Creating a simple repacked test database ...")
repack_file(input_filename=px, output_filename=px_r, contiguous=True,
compression_level=None, quiet=True, transpose=False)
repack_file(input_filename=pz, output_filename=pz_r, contiguous=True,
compression_level=None, quiet=True, transpose=False)
# Also repack the transposed database.
repacked_transposed_bw_db = os.path.join(
root_folder, "repacked_transposed_100s_db_bwd_displ_only")
os.makedirs(repacked_transposed_bw_db)
px_r_tr = os.path.join(repacked_transposed_bw_db, "PX", f)
pz_r_tr = os.path.join(repacked_transposed_bw_db, "PZ", f)
os.makedirs(os.path.dirname(px_r_tr))
os.makedirs(os.path.dirname(pz_r_tr))
print("Creating a simple transposed and repacked test database ...")
repack_file(input_filename=px_tr, output_filename=px_r_tr, contiguous=True,
compression_level=None, quiet=True, transpose=False)
repack_file(input_filename=pz_tr, output_filename=pz_r_tr, contiguous=True,
compression_level=None, quiet=True, transpose=False)
# Add a merged database.
merged_bw_db = os.path.join(
root_folder, "merged_100s_db_bwd_displ_only")
os.makedirs(merged_bw_db)
print("Creating a merged test database ...")
merge_files(filenames=[px, pz], output_folder=merged_bw_db,
contiguous=True, compression_level=None, quiet=True)
# Another merged database but this time originating from a transposed
# database.
merged_transposed_bw_db = os.path.join(
root_folder, "merged_transposed_100s_db_bwd_displ_only")
os.makedirs(merged_transposed_bw_db)
print("Creating a merged transposed test database ...")
merge_files(filenames=[px_tr, pz_tr],
output_folder=merged_transposed_bw_db,
contiguous=True, compression_level=None, quiet=True)
# Make a horizontal only merged database.
horizontal_only_merged_db = os.path.join(
root_folder, "horizontal_only_merged_db")
os.makedirs(horizontal_only_merged_db)
print("Creating a horizontal only merged test database ...")
merge_files(filenames=[px_tr],
output_folder=horizontal_only_merged_db,
contiguous=False, compression_level=2, quiet=True)
# Make a vertical only merged database.
vertical_only_merged_db = os.path.join(
root_folder, "vertical_only_merged_db")
os.makedirs(vertical_only_merged_db)
print("Creating a vertical only merged test database ...")
merge_files(filenames=[pz_tr],
output_folder=vertical_only_merged_db,
contiguous=False, compression_level=2, quiet=True)
# Create a merged version of the fwd database.
fwd_db = os.path.join(TEST_DATA, "100s_db_fwd")
merged_fwd_db = os.path.join(
root_folder, "merged_100s_db_fwd")
os.makedirs(merged_fwd_db)
f = "ordered_output.nc4"
d1 = os.path.join(fwd_db, "MZZ", "Data", f)
d2 = os.path.join(fwd_db, "MXX_P_MYY", "Data", f)
d3 = os.path.join(fwd_db, "MXZ_MYZ", "Data", f)
d4 = os.path.join(fwd_db, "MXY_MXX_M_MYY", "Data", f)
assert os.path.exists(d1), d1
assert os.path.exists(d2), d2
assert os.path.exists(d3), d3
assert os.path.exists(d4), d4
print("Creating a merged forward test database ...")
merge_files(filenames=[d1, d2, d3, d4],
output_folder=merged_fwd_db, contiguous=False,
compression_level=2, quiet=True)
# Actually test the shapes of the fields to see that something happened.
with h5py.File(pz, mode="r") as f:
original_shape = f["Snapshots"]["disp_z"].shape
with h5py.File(pz_tr, mode="r") as f:
transposed_shape = f["Snapshots"]["disp_z"].shape
with h5py.File(pz_tr_and_back, mode="r") as f:
transposed_and_back_shape = f["Snapshots"]["disp_z"].shape
with h5py.File(pz_r, mode="r") as f:
repacked_shape = f["Snapshots"]["disp_z"].shape
with h5py.File(pz_r_tr, mode="r") as f:
repacked_transposed_shape = f["Snapshots"]["disp_z"].shape
with h5py.File(os.path.join(merged_bw_db, "merged_output.nc4"), "r") as f:
merged_shape = f["MergedSnapshots"].shape
with h5py.File(os.path.join(merged_transposed_bw_db,
"merged_output.nc4"), "r") as f:
merged_tr_shape = f["MergedSnapshots"].shape
with h5py.File(os.path.join(horizontal_only_merged_db,
"merged_output.nc4"), "r") as f:
horizontal_only_merged_tr_shape = f["MergedSnapshots"].shape
with h5py.File(os.path.join(vertical_only_merged_db,
"merged_output.nc4"), "r") as f:
vertical_only_merged_tr_shape = f["MergedSnapshots"].shape
with h5py.File(os.path.join(merged_fwd_db,
"merged_output.nc4"), "r") as f:
merged_fwd_shape = f["MergedSnapshots"].shape
assert original_shape == tuple(reversed(transposed_shape))
assert original_shape == transposed_and_back_shape
assert original_shape == repacked_shape
assert original_shape == tuple(reversed(repacked_transposed_shape))
assert merged_shape == (192, 5, 5, 5, 73), str(merged_shape)
assert merged_tr_shape == (192, 5, 5, 5, 73), str(merged_tr_shape)
assert horizontal_only_merged_tr_shape == (192, 3, 5, 5, 73), \
str(horizontal_only_merged_tr_shape)
assert vertical_only_merged_tr_shape == (192, 2, 5, 5, 73), \
str(vertical_only_merged_tr_shape)
assert merged_fwd_shape == (192, 10, 5, 5, 73), \
str(merged_fwd_shape)
dbs = collections.OrderedDict()
# Important is that the name is fairly similar to the original
# as some tests use the patterns in the name.
dbs["transposed_100s_db_bwd_displ_only"] = transposed_bw_db
dbs["transposed_and_back_100s_db_bwd_displ_only"] = \
transposed_and_back_bw_db
dbs["repacked_100s_db_bwd_displ_only"] = repacked_bw_db
dbs["repacked_transposed_100s_db_bwd_displ_only"] = \
repacked_transposed_bw_db
dbs["merged_100s_db_bwd_displ_only"] = merged_bw_db
dbs["merged_transposed_100s_db_bwd_displ_only"] = merged_transposed_bw_db
# Special databases.
dbs["horizontal_only_merged_database"] = horizontal_only_merged_db
dbs["vertical_only_merged_database"] = vertical_only_merged_db
# Forward databases.
dbs["merged_100s_db_fwd"] = merged_fwd_db
return {
"root_folder": root_folder,
"databases": dbs
}