Here are the examples of the python api tempfile.mkdtemp taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
160 Examples
0
Example 51
Project: gitian-builder Source File: gitian_updater.py
def run():
full_prog = sys.argv[0]
prog = os.path.basename(full_prog)
parser = argparse.ArgumentParser(description='Download a verify a gitian package')
parser.add_argument('-u', '--url', metavar='URL', type=str, nargs='+', required=False,
help='one or more URLs where the package can be found')
parser.add_argument('-c', '--config', metavar='CONF', type=str, required=not have_injected_config,
help='a configuration file')
parser.add_argument('-d', '--dest', metavar='DEST', type=str, required=False,
help='the destination directory for unpacking')
parser.add_argument('-q', '--quiet', action='append_const', const=1, default=[], help='be quiet')
parser.add_argument('-f', '--force', action='store_true', help='force downgrades and such')
parser.add_argument('-n', '--dryrun', action='store_true', help='do not actually copy to destination')
parser.add_argument('-m', '--customize', metavar='OUTPUT', type=str, help='generate a customized version of the script with the given config')
parser.add_argument('-w', '--wait', type=float, metavar='HOURS', help='observe a waiting period or use zero for no waiting')
parser.add_argument('-g', '--gpg', metavar='GPG', type=str, help='path to GnuPG')
parser.add_argument('-p', '--post', metavar='COMMAND', type=str, help='Run after a successful install')
args = parser.parse_args()
quiet = len(args.quiet)
if args.config:
f = file(args.config, 'r')
if args.customize:
s = file(full_prog, 'r')
script = s.read()
s.close()
config = f.read()
script = script.replace(inject_config_string, config)
s = file(args.customize, 'w')
s.write(script)
s.close()
os.chmod(args.customize, 0750)
sys.exit(0)
config = yaml.safe_load(f)
f.close()
else:
config = yaml.safe_load(injected_config)
dest_path = args.dest
if not dest_path:
parser.error('argument -d/--dest is required unless -m is specified')
if args.wait is not None:
config['waiting_period'] = args.wait
gpg_path = args.gpg
if not gpg_path:
gpg_path = 'gpg'
rsses = []
if args.url:
urls = [{ 'url' : url, 'version_url' : None} for url in args.url]
else:
urls = config.get('urls')
if not urls:
parser.error('argument -u/--url is required since config does not specify it')
if config.has_key('rss'):
rsses = config['rss']
# TODO: rss, atom, etc.
old_manifest = None
if path.exists(dest_path):
files = os.listdir(dest_path)
if path.dirname(full_prog) == dest_path:
files.remove(prog)
if not files.count('.gitian-manifest') and len(files) > 0:
print>>sys.stderr, "destination already exists, no .gitian-manifest and directory not empty. Please empty destination."
sys.exit(1)
f = file(os.path.join(dest_path,'.gitian-manifest'), 'r')
old_manifest = yaml.load(f, OrderedDictYAMLLoader)
f.close()
if config.get('waiting_period', 0) > 0:
waiting_file = path.join(dest_path, '.gitian-waiting')
if path.exists(waiting_file):
f = file(waiting_file, 'r')
waiting = yaml.load(f)
f.close()
wait_start = waiting['time']
out_manifest = waiting['out_manifest']
waiting_path = waiting['waiting_path']
wait_time = wait_start + config['waiting_period'] * 3600 - time.time()
if wait_time > 0:
print>>sys.stderr, "Waiting another %.2f hours before applying update in %s"%(wait_time / 3600, waiting_path)
sys.exit(100)
os.remove(waiting_file)
if args.dryrun:
print>>sys.stderr, "Dry run, not copying"
else:
copy_to_destination(path.join(waiting_path, 'unpack'), dest_path, out_manifest, old_manifest)
if args.post:
os.system(args.post)
if quiet == 0:
print>>sys.stderr, "Copied from waiting area to destination"
shutil.rmtree(waiting_path)
sys.exit(0)
temp_dir = tempfile.mkdtemp('', prog)
atexit.register(remove_temp, temp_dir)
package_file = path.join(temp_dir, 'package')
downloaded = False
checked = False
if rsses:
import libxml2
for rss in rsses:
try:
feed = libxml2.parseDoc(urllib2.urlopen(rss['url']).read())
url = None
release = None
# Find the first matching node
for node in feed.xpathEval(rss['xpath']):
m = re.search(rss['pattern'], str(node))
if m:
if len(m.groups()) > 0:
release = m.group(1)
url = str(node)
break
# Make sure it's a new release
if old_manifest and release == old_manifest['release'] and not args.force:
checked = True
else:
try:
download(url, package_file)
downloaded = True
break
except:
print>>sys.stderr, "could not download from %s, trying next rss"%(url)
pass
except:
print>>sys.stderr, "could read not from rss %s"%(rss)
pass
if not downloaded:
for url in urls:
try:
release = None
if url['version_url']:
f = urllib2.urlopen(url['version_url'])
release = f.read(100).strip()
f.close()
if old_manifest and release == old_manifest['release'] and not args.force:
checked = True
else:
download(url['url'], package_file)
downloaded = True
except:
print>>sys.stderr, "could not download from %s, trying next url"%(url)
raise
if not downloaded:
if checked:
if quiet == 0:
print>>sys.stderr, "same release, not downloading"
else:
print>>sys.stderr, "out of places to try downloading from, try later"
sys.exit(2)
unpack_dir = path.join(temp_dir, 'unpack')
files = extract(unpack_dir, package_file)
import_keys(gpg_path, temp_dir, config)
(success, assertions, out_manifest) = get_assertions(gpg_path, temp_dir, unpack_dir, files)
if old_manifest:
check_name_and_version(out_manifest, old_manifest)
if not success and quiet <= 1:
print>>sys.stderr, "There were errors getting assertions"
total_weight = check_assertions(config, assertions)
if total_weight is None:
print>>sys.stderr, "There were errors checking assertions, build is untrusted, aborting"
sys.exit(5)
if quiet == 0:
print>>sys.stderr, "Successful with signature weight %d"%(total_weight)
if config.get('waiting_period', 0) > 0 and path.exists(dest_path):
waiting_path = tempfile.mkdtemp('', prog)
shutil.copytree(unpack_dir, path.join(waiting_path, 'unpack'))
f = file(path.join(dest_path, '.gitian-waiting'), 'w')
yaml.dump({'time': time.time(), 'out_manifest': out_manifest, 'waiting_path': waiting_path}, f)
f.close()
if quiet == 0:
print>>sys.stderr, "Started waiting period"
else:
if args.dryrun:
print>>sys.stderr, "Dry run, not copying"
else:
copy_to_destination(unpack_dir, dest_path, out_manifest, old_manifest)
if args.post:
os.system(args.post)
0
Example 52
Project: anima Source File: test_previs.py
def setUp(self):
"""create test data
"""
database_url = 'sqlite:///:memory:'
db.setup({'sqlalchemy.url': database_url})
db.init()
self.temp_repo_path = tempfile.mkdtemp()
self.user1 = User(
name='User 1',
login='User 1',
email='[email protected]',
password='12345'
)
self.repo1 = Repository(
name='Test Project Repository',
linux_path=self.temp_repo_path,
windows_path=self.temp_repo_path,
osx_path=self.temp_repo_path
)
self.status_new = Status.query.filter_by(code='NEW').first()
self.status_wip = Status.query.filter_by(code='WIP').first()
self.status_comp = Status.query.filter_by(code='CMPL').first()
self.task_template = FilenameTemplate(
name='Task Template',
target_entity_type='Task',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.asset_template = FilenameTemplate(
name='Asset Template',
target_entity_type='Asset',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.shot_template = FilenameTemplate(
name='Shot Template',
target_entity_type='Shot',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.sequence_template = FilenameTemplate(
name='Sequence Template',
target_entity_type='Sequence',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.structure = Structure(
name='Project Struture',
templates=[self.task_template, self.asset_template,
self.shot_template, self.sequence_template]
)
self.project_status_list = StatusList(
name='Project Statuses',
target_entity_type='Project',
statuses=[self.status_new, self.status_wip, self.status_comp]
)
self.image_format = ImageFormat(
name='HD 1080',
width=1920,
height=1080,
pixel_aspect=1.0
)
# create a test project
self.project = Project(
name='Test Project',
code='TP',
repository=self.repo1,
status_list=self.project_status_list,
structure=self.structure,
image_format=self.image_format
)
# create task hierarchy
#
# ASSETS
#
self.assets = Task(
name='Assets',
project=self.project,
responsible=[self.user1]
)
#
# SEQUENCES
#
self.sequences = Task(
name='Sequences',
project=self.project,
responsible=[self.user1]
)
self.seq001 = Sequence(
name='Seq001',
code='Seq001',
parent=self.sequences
)
self.scene_task = Task(
name='001_IST',
parent=self.seq001
)
self.scene_previs_type = Type(
name='Scene Previs',
code='Scene Previs',
target_entity_type='Task'
)
self.scene_previs = Task(
name='Scene Previs',
parent=self.scene_task,
type=self.scene_previs_type
)
self.shots = Task(
name='Shots',
parent=self.scene_task
)
self.shot1 = Shot(
name='Seq001_001_IST_0010',
code='Seq001_001_IST_0010',
parent=self.shots
)
# create shot tasks
self.previs = Task(
name='Previs',
parent=self.shot1
)
self.camera = Task(
name='Camera',
parent=self.shot1
)
self.animation = Task(
name='Animation',
parent=self.shot1
)
self.scene_assembly = Task(
name='SceneAssembly',
parent=self.shot1
)
self.lighting = Task(
name='Lighting',
parent=self.shot1
)
self.comp = Task(
name='Comp',
parent=self.shot1
)
# create maya files
self.maya_env = Maya()
pm.newFile(force=True)
sm = pm.PyNode('sequenceManager1')
seq1 = sm.create_sequence('001_IST')
# create 3 shots
shot1 = seq1.create_shot('shot1')
shot2 = seq1.create_shot('shot2')
shot3 = seq1.create_shot('shot3')
# set shot ranges
shot1.startFrame.set(1)
shot1.endFrame.set(100)
shot2.startFrame.set(101)
shot2.endFrame.set(200)
shot2.sequenceStartFrame.set(101)
shot3.startFrame.set(201)
shot3.endFrame.set(300)
shot3.sequenceStartFrame.set(201)
# save the file under scene previs
v = Version(task=self.scene_previs)
self.maya_env.save_as(v)
pm.newFile(force=1)
print(v.absolute_full_path)
0
Example 53
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--executable', help='override executable.')
parser.add_argument('-v', '--verbose', help='print more diagnotic messages.',
action='store_true')
parser.add_argument('-l', '--list', help='list all tests.',
action='store_true')
parser.add_argument('--list-exes',
help='list all executables needed for the tests.',
action='store_true')
parser.add_argument('-r', '--rebase',
help='rebase a test to its current output.',
action='store_true')
parser.add_argument('-j', '--jobs', help='number of jobs to use to run tests',
type=int, default=multiprocessing.cpu_count())
parser.add_argument('-t', '--timeout', type=float, default=DEFAULT_TIMEOUT,
help='per test timeout in seconds')
parser.add_argument('patterns', metavar='pattern', nargs='*',
help='test patterns.')
options = parser.parse_args(args)
if options.patterns:
pattern_re = '|'.join(fnmatch.translate('*%s*' % p)
for p in options.patterns)
else:
pattern_re = '.*'
test_names = FindTestFiles(SCRIPT_DIR, '.c', pattern_re)
if options.list:
for test_name in test_names:
print test_name
return 0
if not test_names:
print 'no tests match that filter'
return 1
if options.executable:
if not os.path.exists(options.executable):
parser.error('executable %s does not exist' % options.executable)
options.executable = os.path.abspath(options.executable)
isatty = os.isatty(1)
status = Status(options.verbose)
infos = GetAllTestInfo(test_names, status)
if options.list_exes:
exes = set([info.exe for info in infos])
if None in exes:
exes.remove(None)
exes.add(os.path.relpath(DEFAULT_EXE, os.getcwd()))
print '\n'.join(exes)
return 0
inq = multiprocessing.Queue()
test_count = 0
for info in infos:
if info.skip:
status.Skipped(info)
continue
inq.put(info)
test_count += 1
outq = multiprocessing.Queue()
num_proc = options.jobs
processes = []
status.Start(test_count)
def Worker(i, options, inq, outq):
try:
while True:
try:
info = inq.get(False)
try:
out = info.Run(options.timeout, temp_dir, options.executable)
except Exception as e:
outq.put((info, e))
continue
outq.put((info, out))
except Queue.Empty:
# Seems this can be fired even when the queue isn't actually empty.
# Double-check, via inq.empty()
if inq.empty():
break
except KeyboardInterrupt:
pass
temp_dir = tempfile.mkdtemp(prefix='wasm-e2e-')
try:
for i, p in enumerate(range(num_proc)):
proc = multiprocessing.Process(target=Worker,
args=(i, options, inq, outq))
processes.append(proc)
proc.start()
finished_tests = 0
while finished_tests < test_count:
try:
info, result = outq.get(True, 0.01)
except Queue.Empty:
status.Timeout()
continue
finished_tests += 1
try:
if isinstance(result, Exception):
raise result
stdout, stderr, returncode, duration = result
if returncode != info.expected_error:
# This test has already failed, but diff it anyway.
msg = 'expected error code %d, got %d.' % (info.expected_error,
returncode)
try:
info.Diff(stdout, stderr)
except Error as e:
msg += '\n' + str(e)
raise Error(msg)
else:
if options.rebase:
info.Rebase(stdout, stderr)
else:
info.Diff(stdout, stderr)
status.Passed(info, duration)
except Exception as e:
status.Failed(info, str(e))
except KeyboardInterrupt:
for proc in processes:
proc.join()
finally:
for proc in processes:
proc.terminate()
proc.join()
shutil.rmtree(temp_dir)
status.Clear()
ret = 0
if status.failed:
sys.stderr.write('cuem FAILED %s\n' % ('*' * (80 - 14)))
for info in status.failed_tests:
sys.stderr.write('- %s\n' % info.name)
ret = 1
status.Print()
return ret
0
Example 54
Project: esky Source File: f_bbfreeze.py
def freeze(dist):
"""Freeze the given distribution data using bbfreeze."""
includes = dist.includes
excludes = dist.excludes
options = dist.freezer_options
# Merge in any encludes/excludes given in freezer_options
for inc in options.pop("includes",()):
includes.append(inc)
for exc in options.pop("excludes",()):
excludes.append(exc)
if "pypy" not in includes and "pypy" not in excludes:
excludes.append("pypy")
# Freeze up the given scripts
f = bbfreeze.Freezer(dist.freeze_dir,includes=includes,excludes=excludes)
for (nm,val) in options.iteritems():
setattr(f,nm,val)
f.addModule("esky")
tdir = tempfile.mkdtemp()
try:
for exe in dist.get_executables():
f.addScript(exe.script,gui_only=exe.gui_only)
if "include_py" not in options:
f.include_py = False
if "linkmethod" not in options:
# Since we're going to zip it up, the benefits of hard-
# or sym-linking the loader exe will mostly be lost.
f.linkmethod = "loader"
f()
finally:
shutil.rmtree(tdir)
# Copy data files into the freeze dir
for (src,dst) in dist.get_data_files():
dst = os.path.join(dist.freeze_dir,dst)
dstdir = os.path.dirname(dst)
if not os.path.isdir(dstdir):
dist.mkpath(dstdir)
dist.copy_file(src,dst)
# Copy package data into the library.zip
lib = zipfile.ZipFile(os.path.join(dist.freeze_dir,"library.zip"),"a")
for (src,arcnm) in dist.get_package_data():
lib.write(src,arcnm)
lib.close()
# Create the bootstrap code, using custom code if specified.
# For win32 we include a special chainloader that can suck the selected
# version into the running process rather than spawn a new proc.
code_source = ["__name__ = '__main__'"]
esky_name = dist.distribution.get_name()
code_source.append("__esky_name__ = %r" % (esky_name,))
code_source.append(inspect.getsource(esky.bootstrap))
if dist.compile_bootstrap_exes:
if sys.platform == "win32":
# The pypy-compiled bootstrap exe will try to load a python env
# into its own process and run this "take2" code to bootstrap.
take2_code = code_source[1:]
take2_code.append(_CUSTOM_WIN32_CHAINLOADER)
take2_code.append(dist.get_bootstrap_code())
take2_code = compile("\n".join(take2_code),"<string>","exec")
take2_code = marshal.dumps(take2_code)
clscript = "import marshal; "
clscript += "exec marshal.loads(%r); " % (take2_code,)
clscript = clscript.replace("%","%%")
clscript += "chainload(\"%s\")"
# Here's the actual source for the compiled bootstrap exe.
from esky.bdist_esky import pypy_libpython
code_source.append(inspect.getsource(pypy_libpython))
code_source.append("_PYPY_CHAINLOADER_SCRIPT = %r" % (clscript,))
code_source.append(_CUSTOM_PYPY_CHAINLOADER)
code_source.append(dist.get_bootstrap_code())
code_source = "\n".join(code_source)
for exe in dist.get_executables(normalise=False):
if not exe.include_in_bootstrap_env:
continue
bsexe = dist.compile_to_bootstrap_exe(exe,code_source)
if sys.platform == "win32":
fexe = os.path.join(dist.freeze_dir,exe.name)
winres.copy_safe_resources(fexe,bsexe)
# We may also need the bundled MSVCRT libs
if sys.platform == "win32":
for nm in os.listdir(dist.freeze_dir):
if is_core_dependency(nm) and nm.startswith("Microsoft"):
dist.copy_to_bootstrap_env(nm)
else:
if sys.platform == "win32":
code_source.append(_CUSTOM_WIN32_CHAINLOADER)
code_source.append(dist.get_bootstrap_code())
code_source.append("bootstrap()")
code_source = "\n".join(code_source)
# For non-compiled bootstrap exe, store the bootstrapping code
# into the library.zip as __main__.
maincode = imp.get_magic() + struct.pack("<i",0)
maincode += marshal.dumps(compile(code_source,"__main__.py","exec"))
# Create code for a fake esky.bootstrap module
eskycode = imp.get_magic() + struct.pack("<i",0)
eskycode += marshal.dumps(compile("","esky/__init__.py","exec"))
eskybscode = imp.get_magic() + struct.pack("<i",0)
eskybscode += marshal.dumps(compile("","esky/bootstrap.py","exec"))
# Store bootstrap code as __main__ in the bootstrap library.zip.
# The frozen library.zip might have the loader prepended to it, but
# that gets overwritten here.
bslib_path = dist.copy_to_bootstrap_env("library.zip")
bslib = zipfile.PyZipFile(bslib_path,"w",zipfile.ZIP_STORED)
cdate = (2000,1,1,0,0,0)
bslib.writestr(zipfile.ZipInfo("__main__.pyc",cdate),maincode)
bslib.writestr(zipfile.ZipInfo("esky/__init__.pyc",cdate),eskycode)
bslib.writestr(zipfile.ZipInfo("esky/bootstrap.pyc",cdate),eskybscode)
bslib.close()
# Copy any core dependencies
if "fcntl" not in sys.builtin_module_names:
for nm in os.listdir(dist.freeze_dir):
if nm.startswith("fcntl"):
dist.copy_to_bootstrap_env(nm)
for nm in os.listdir(dist.freeze_dir):
if is_core_dependency(nm):
dist.copy_to_bootstrap_env(nm)
# Copy the bbfreeze interpreter if necessary
if f.include_py:
if sys.platform == "win32":
dist.copy_to_bootstrap_env("py.exe")
else:
dist.copy_to_bootstrap_env("py")
# Copy the loader program for each script.
# We explicitly strip the loader binaries, in case they were made
# by linking to the library.zip.
for exe in dist.get_executables(normalise=False):
if not exe.include_in_bootstrap_env:
continue
exepath = dist.copy_to_bootstrap_env(exe.name)
f.stripBinary(exepath)
0
Example 55
Project: baculafs Source File: FileSystem.py
def initialize(self, version):
'''
initialize database, catalog
'''
self._setup_logging()
# batch mode
self.batch_mode = (self.batch_list or
self.batch_bsr or
self.batch_extract)
# disable INFO level logging in batch mode
if self.batch_mode and self.loglevel == logging.INFO:
self.loglevel = logging.WARNING
self.logger.setLevel(self.loglevel)
self.logger.info('Populating file system ... ')
# setup cache
if self.user_cache_path:
self.cache_prefix = self.user_cache_path
else:
self.cache_prefix = tempfile.mkdtemp(prefix='baculafs-')
self.cache_path = os.path.normpath(self.cache_prefix + '/files')
makedirs(self.cache_path)
self.cache_bsrpath = os.path.normpath(self.cache_prefix + '/bsr')
makedirs(self.cache_bsrpath)
self.cache_symlinks = os.path.normpath(self.cache_prefix + '/symlinks')
makedirs(self.cache_symlinks)
# test for old version (2.x) of bacula
self.bsr_compat = int(version[0]) < 3
if self.bsr_compat:
self.logger.debug('Detected old Bacula: %s' % version)
# test access to sd conf file
open(self.conf, 'r').close()
# init bextract failure pattren
self.fail_pattern = (
'Mount Volume "([^"]+)" on device "%s" (.*) '
'and press return when ready:' %
self.device)
# init database and catalog
self.db = Database(self.driver,
self.host,
self.port,
self.database,
self.username,
self.password,
self.logger)
self.catalog = Catalog(self.db)
self.base64 = Base64()
files = self.catalog.query(self.client,
self.fileset,
self.datetime,
self.recent_job,
self.joblist)
# validated values
self.client = self.catalog.client
self.fileset = self.catalog.fileset[1]
self.datetime = self.catalog.datetime
# we don't need the database anymore
self.db.close()
prefetches = []
difflist = {}
# validate prefetch conditions
if self.prefetch_everything:
self.prefetch_recent = False
self.prefetch_regex = None
self.prefetch_diff = None
self.prefetch_difflist = None
self.prefetch_list = None
self.prefetch_symlinks = True
if self.prefetch_regex:
try:
regex = re.compile(self.prefetch_regex)
self.prefetch_attrs = True
except:
# bad regex: show traceback and ignore
self.logger.warning(traceback.format_exc())
self.prefetch_regex = None
if self.prefetch_diff:
self.prefetch_diff = os.path.normpath(
os.path.expanduser(self.prefetch_diff))
try:
if os.path.isdir(self.prefetch_diff):
self.prefetch_symlinks = True
else:
self.prefetch_diff = None
except:
# can't access target directory: show traceback and ignore
self.logger.warning(traceback.format_exc())
self.prefetch_diff = None
if self.prefetch_difflist:
self.prefetch_difflist = os.path.normpath(
os.path.expanduser(self.prefetch_difflist))
try:
difflistfile = (sys.stdin
if self.prefetch_difflist == '-'
else open(self.prefetch_difflist, 'rt'))
for line in difflistfile.readlines():
date = ' '.join(line.split()[:5])
difflist[line[(len(date) + 1):].strip()] = \
time.strptime(date, '%a %b %d %H:%M:%S %Y')
difflistfile.close()
self.prefetch_symlinks = True
except:
# can't access/parse difflist: show traceback and ignore
self.logger.warning(traceback.format_exc())
self.prefetch_difflist = None
if self.prefetch_list:
self.prefetch_list = os.path.normpath(
os.path.expanduser(self.prefetch_list))
try:
listfile = (sys.stdin
if self.prefetch_list == '-'
else open(self.prefetch_list, 'rt'))
matchlist = [line.strip() for line in listfile.readlines()]
listfile.close()
self.prefetch_symlinks = True
except:
# can't access/parse list: show traceback and ignore
self.logger.warning(traceback.format_exc())
self.prefetch_list = None
if self.prefetch_recent:
self.prefetch_symlinks = True
if self.prefetch_symlinks:
self.prefetch_attrs = True
if 'use_ino' in self.fuse_args.optlist:
self.use_ino = True
self.prefetch_attrs = True # must figure out max st_ino
for file in files:
head = file[0]
tail = file[1]
# handle windows directories
if not head.startswith('/'):
head = '/' + head
# make file entry
if self.prefetch_attrs:
entry = file[2:] + self._bacula_stat(file[-2])
# find max st_ino
if self.use_ino:
if entry[-1].st_ino > self.max_ino:
self.max_ino = entry[-1].st_ino
# detemine if we need to prefetch this entry
filepath = head + tail
if (not stat.S_ISDIR(entry[-1].st_mode) and
(self.prefetch_everything or
(self.prefetch_recent and
file[3] == self.catalog.most_recent_jobid) or
(self.prefetch_regex and
regex.search(filepath)) or
(self.prefetch_diff and
not self._match_stat(self.prefetch_diff + filepath,
entry[-1])) or
(self.prefetch_difflist and
(filepath[1:] not in difflist or
difflist[filepath[1:]][:-1] != \
time.localtime(entry[-1].st_mtime)[:-1])) or
(self.prefetch_list and
filepath in matchlist) or
(self.prefetch_symlinks and
stat.S_ISLNK(entry[-1].st_mode)))):
prefetches.append(filepath)
else:
entry = file[2:] + (None,) # stat info placeholder
# new directory
if head not in self.dirs:
self.dirs[head] = {}
# add parent directories
self._add_parent_dirs(head)
# directories are added to their parents
if head != '/' and tail == '':
head, tail = self._split(head[:-1])
# and finally
self.dirs[head][tail] = entry
# fix st_ino
if self.use_ino:
self._update_inodes('/')
npf = len(prefetches)
if npf > 0:
self.logger.info('Prefetching %d objects ... ' % npf)
self._extract(prefetches)
self.logger.debug('Cache directory is: %s' % self.cache_prefix)
self.joblist = ' '.join([str(job[0]) for job in self.catalog.jobs])
self.logger.debug('Job ids in file system: %s' % self.joblist)
self.logger.info('BaculaFS ready (%d files).' % len(files))
self._initialized = True
0
Example 56
Project: INGInious Source File: docker_agent.py
async def handle_new_job(self, message: BackendNewJob):
"""
Handles a new job: starts the grading container
"""
try:
self._logger.info("Received request for jobid %s", message.job_id)
course_id = message.course_id
task_id = message.task_id
debug = message.debug
environment_name = message.environment
enable_network = message.enable_network
time_limit = message.time_limit
hard_time_limit = message.hard_time_limit or time_limit * 3
mem_limit = message.mem_limit
if not os.path.exists(os.path.join(self.task_directory, course_id, task_id)):
self._logger.warning("Task %s/%s unavailable on this agent", course_id, task_id)
await self.send_job_result(message.job_id, "crash",
'Task unavailable on agent. Please retry later, the agents should synchronize soon. If the error '
'persists, please contact your course administrator.')
return
# Check for realistic memory limit value
if mem_limit < 20:
mem_limit = 20
elif mem_limit > self._max_memory_per_slot:
self._logger.warning("Task %s/%s ask for too much memory (%dMB)! Available: %dMB", course_id, task_id, mem_limit, self._max_memory_per_slot)
await self.send_job_result(message.job_id, "crash", 'Not enough memory on agent (available: %dMB). Please contact your course administrator.' % self._max_memory_per_slot)
return
if environment_name not in self._containers:
self._logger.warning("Task %s/%s ask for an unknown environment %s (not in aliases)", course_id, task_id, environment_name)
await self.send_job_result(message.job_id, "crash", 'Unknown container. Please contact your course administrator.')
return
environment = self._containers[environment_name]["id"]
# Handle ssh debugging
ssh_port = None
if debug == "ssh":
# allow 30 minutes of real time.
time_limit = 30 * 60
hard_time_limit = 30 * 60
# select a port
if len(self.ssh_ports) == 0:
self._logger.warning("User asked for an ssh debug but no ports are available")
await self.send_job_result(message.job_id, "crash", 'No slots are available for SSH debug right now. Please retry later.')
return
ssh_port = self.ssh_ports.pop()
# Create directories for storing all the data for the job
try:
container_path = tempfile.mkdtemp(dir=self.tmp_dir)
except Exception as e:
self._logger.error("Cannot make container temp directory! %s", str(e), exc_info=True)
await self.send_job_result(message.job_id, "crash", 'Cannot make container temp directory.')
if ssh_port is not None:
self.ssh_ports.add(ssh_port)
return
task_path = os.path.join(container_path, 'task') # tmp_dir/id/task/
sockets_path = os.path.join(container_path, 'sockets') # tmp_dir/id/socket/
student_path = os.path.join(task_path, 'student') # tmp_dir/id/task/student/
systemfiles_path = os.path.join(task_path, 'systemfiles') # tmp_dir/id/task/systemfiles/
# Create the needed directories
os.mkdir(sockets_path)
os.chmod(container_path, 0o777)
os.chmod(sockets_path, 0o777)
# TODO: avoid copy
copytree(os.path.join(self.task_directory, course_id, task_id), task_path)
os.chmod(task_path, 0o777)
if not os.path.exists(student_path):
os.mkdir(student_path)
os.chmod(student_path, 0o777)
# Run the container
try:
container_id = await self._loop.run_in_executor(None, lambda: self._docker.create_container(environment, enable_network, mem_limit,
task_path, sockets_path, ssh_port))
except Exception as e:
self._logger.warning("Cannot create container! %s", str(e), exc_info=True)
await self.send_job_result(message.job_id, "crash", 'Cannot create container.')
rmtree(container_path)
if ssh_port is not None:
self.ssh_ports.add(ssh_port)
return
# Store info
future_results = asyncio.Future()
self._containers_running[container_id] = message, container_path, future_results
self._container_for_job[message.job_id] = container_id
self._student_containers_for_job[message.job_id] = set()
if ssh_port is not None:
self.running_ssh_debug[container_id] = ssh_port
try:
# Start the container
await self._loop.run_in_executor(None, lambda: self._docker.start_container(container_id))
except Exception as e:
self._logger.warning("Cannot start container! %s", str(e), exc_info=True)
await self.send_job_result(message.job_id, "crash", 'Cannot start container')
rmtree(container_path)
if ssh_port is not None:
self.ssh_ports.add(ssh_port)
return
# Talk to the container
self._loop.create_task(self.handle_running_container(message.job_id, container_id, message.inputdata, debug, ssh_port,
environment_name, mem_limit, time_limit, hard_time_limit,
sockets_path, student_path, systemfiles_path,
future_results))
# Ask the "cgroup" thread to verify the timeout/memory limit
await ZMQUtils.send(self._killer_watcher_push.get_push_socket(), KWPRegisterContainer(container_id, mem_limit, time_limit, hard_time_limit))
# Tell the backend/client the job has started
await ZMQUtils.send(self._backend_socket, AgentJobStarted(message.job_id))
except:
self._logger.exception("Exception in handle_new_job")
0
Example 57
Project: ges Source File: ges.py
def assisted_start(options):
_help = r'''
ges.py - Git Enablement Server v1.1
Note only the folder that contains folders and object that you normally see
in .git folder is considered a "repo folder." This means that either a
"bare" folder name or a working folder's ".git" folder will be a "repo" folder
discussed in the examples below.
This server automatically creates "bare" repo folders on push.
Note, the folder does NOT have to have ".git" in the name to be a "repo" folder.
You can name bare repo folders whatever you like. If the signature (right files
and folders are found inside) matches a typical git repo, it's a "repo."
Options:
--content_path (Defaults to random temp folder)
Serving contents of folder path passed in. Accepts relative paths,
including things like "./../" and resolves them agains current path.
(If you set this to actual .git folder, you don't need to specify the
folder's name on URI as the git repo will be served at the root level
of the URI.)
If not specified, a random, temp folder is created in the OS-specific
temporary storage path. This folder will be NOT be deleted after
server exits unless the switch "--remove_temp" is used.
--remove_temp (Defaults to False)
When --content_path is not specified, this server will create a folder
in a temporary file storage location that is OS-specific and will NOT
remove it after the server shuts down.
This switch, if included on command line, enables automatic removal of
the created folder and all of its contents.
--uri_marker (Defaults to '')
Acts as a "virtual folder" - separator between decorative URI portion
and the actual (relative to path_prefix) path that will be appended
to path_prefix and used for pulling an actual file.
the URI does not have to start with contents of repo_uri_marker. It can
be preceeded by any number of "virtual" folders.
For --repo_uri_marker 'my' all of these will take you to the same repo:
http://localhost/my/HEAD
http://localhost/admysf/mylar/zxmy/my/HEAD
If you are using reverse proxy server, pick the virtual, decorative URI
prefix / path of your choice. This hanlder will cut and rebase the URI.
Default of '' means that no cutting marker is used, and whole URI after
FQDN is used to find file relative to path_prefix.
--port (Defaults to 8888)
--demo (Defaults to False)
You do not have to provide any arguments for this option. It's a switch.
If "--demo" is part of the command-line options, a sample tree of folders
with some repos will be extracted into the folder specified as content_path.
If --content_path was not specified (we use temp folder) and "--demo"
switch is present, we assume --remove_temp is on.
Examples:
ges.py
(no arguments)
A random temp folder is created on the file system and now behaves as the
root of the served git repos folder tree.
ges.py --demo
This server is shipped with a small demo tree of Git repositories. This
command deploys that tree into a temp folder and deletes that temp folder
after the server is shut down.
ges.py --content_path "~/somepath/repofolder" --uri_marker "myrepo"
Will serve chosen repo folder as http://localhost/myrepo/ or
http://localhost:8888/does/not/matter/what/you/type/here/myrepo/
This "repo uri marker" is useful for making a repo server appear as part of
a server applications structure while serving from behind a reverse proxy.
cd c:\myproject_workingfolder\.git
ges.py --port 80 --content_path '.'
This project's repo will be one and only served directly over
http://localhost/
'''
# options = dict([
# ['content_path',None],
# ['static_content_path', None],
# ['uri_marker',''],
# ['port', None],
# ['devel', False],
# ['demo',False],
# ['remove_temp',False]
# ])
# let's decide what port to serve on.
port = options['port']
if not port:
import socket
# let's see if we can reuse our preferred default of 8888
s = socket.socket()
try:
s.bind(('',8888))
ip, port = s.getsockname()
except:
pass
s.close()
del s
if not port:
# looks like our default of 8888 is already occupied.
# taking next available port.
s = socket.socket()
s.bind(('',0))
ip, port = s.getsockname()
s.close()
del s
options['port'] = port
# next we determine if the static server contents folder is visible to us.
if not options['static_content_path'] or not os.path.isfile(
os.path.join(
options['static_content_path'],
'static',
'favicon.ico'
)):
if sys.path[0] and os.path.isfile(os.path.join(sys.path[0],'static','favicon.ico')):
options['static_content_path'] = os.path.join(sys.path[0],'static')
else:
raise Exception('G.E.S.: Specified static content directory - "%s" - does not contain expected files. Please, provide correct "static_content_path" variable value.' % options['static_content_path'])
# now we pick a random temp folder for Git folders tree if none were specified.
if options['content_path']:
CONTENT_PATH_IS_TEMP = False
else:
import tempfile
import shutil
CONTENT_PATH_IS_TEMP = True
options['content_path'] = tempfile.mkdtemp()
if options['demo']:
import zipfile
demo_repos_zip = os.path.join(sys.path[0],'test','sample_tree_of_repos_v2.zip')
try:
zipfile.ZipFile(demo_repos_zip).extractall(options['content_path'])
except:
pass
if 'help' in options:
print _help
else:
app = assemble_ges_app(**options)
import wsgiserver
httpd = wsgiserver.CherryPyWSGIServer(('0.0.0.0',int(options['port'])),app)
if options['uri_marker']:
_s = '"/%s/".' % options['uri_marker']
example_URI = '''http://localhost:%s/whatever/you/want/here/%s/myrepo.git
(Note: "whatever/you/want/here" cannot include the "/%s/" segment)''' % (
options['port'],
options['uri_marker'],
options['uri_marker'])
else:
_s = 'not chosen.'
example_URI = 'http://localhost:%s/' % (options['port'])
print '''
===========================================================================
Run this command with "--help" option to see available command-line options
Chosen repo folders' base file system path:
%s
Starting GES server on port %s
URI segment indicating start of git repo foler name is %s
Application URI:
%s
Use Keyboard Interrupt key combination (usually CTRL+C) to stop the server
===========================================================================
''' % (os.path.abspath(options['content_path']),
options['port'],
_s,
example_URI)
# running with CherryPy's WSGI Server
try:
httpd.start()
except KeyboardInterrupt:
pass
finally:
httpd.stop()
if (CONTENT_PATH_IS_TEMP and options['remove_temp']) or (CONTENT_PATH_IS_TEMP and options['demo']):
shutil.rmtree(options['content_path'], True)
0
Example 58
Project: make-profile-pkg Source File: make_profile_pkg.py
def main():
usage = "%prog [options] path/to/mobileconfig/file"
o = optparse.OptionParser(usage=usage)
m_opts = optparse.OptionGroup(o, "Munki options")
m_opts.add_option("-m", "--munki-import", action="store_true",
default=False,
help=("Import resulting package into Munki. "))
m_opts.add_option("-d", "--munki-repo-destination", default=default_repo_destination,
help=("Destination directory in Munki repo. Defaults to '%s'. "
% default_repo_destination))
o.add_option_group(m_opts)
o.add_option("-o", "--output-dir", default=os.getcwd(),
help=("Output directory for built package and uninstall script. "
"Directory must already exist. Defaults to the current "
"working directory."))
o.add_option("-f", "--format-name", default=default_name_format_string,
metavar="FORMAT-STRING",
help=("A format string specifying the desired file/pkginfo name, which "
"may contain tokens that are substituted. Current tokens "
"supported are '%filename%' (name component of file's basename), "
"and '%id%' (profile's PayloadIdentifier key). "
"Defaults to '%filename%'."))
o.add_option("-p", "--installed-path", default=default_installed_path,
help=("Installed path for the profile. Defaults to '%s'. "
% default_installed_path))
o.add_option("--pkg-prefix", default=default_pkg_prefix,
help=("Installer pkg identifier prefix. Defaults to '%s'. "
% default_pkg_prefix))
o.add_option("-U", dest="username", metavar="USERNAME",
help=("Includes a '-U <username>' option that will be passed to the "
"`profiles` command. Please see the `profiles` manpage for "
"more details on the -U option. This is also only supported on "
"10.11 and up, and the package may fail if installed on an "
"earlier OS version."))
o.add_option("-v", "--version",
help=("Version of the built pkg. Defaults to 'YYYY.MM.DD' "
"derived from today's date."))
o.add_option("--delete-after-install", action="store_true",
default=False,
help=("Configure pkg postinstall script to remove mobileconfig file "
"after installation."))
o.add_option("--sign",
help=("Sign the resulting package with the specified identity."))
opts, args = o.parse_args()
if len(args) < 1:
o.print_help()
sys.exit(1)
if not opts.installed_path.startswith("/"):
print >> sys.stderr, (
"WARNING: Omitted leading slash for --installed-path %s, "
"automatically adding one." % opts.installed_path)
opts.installed_path = "/" + opts.installed_path
profile_path = args[0]
pkgbuild = "/usr/bin/pkgbuild"
munkiimport = "/usr/local/munki/munkiimport"
security = "/usr/bin/security"
req_executables = [pkgbuild]
if opts.munki_import:
req_executables.append(munkiimport)
for executable in req_executables:
if not os.path.isfile(executable) or not os.access(executable, os.X_OK):
sys.exit("A required exeuctable, '%s', could not be found "
"or is not executable!" % executable)
output_dir = opts.output_dir
if not os.path.isdir(output_dir) or not os.access(output_dir, os.W_OK):
sys.exit("Output directory '%s' either doesn't exist or is not writable!"
% output_dir)
# Grab the profile's identifier for use later in the uninstall_script
try:
pdata = plistlib.readPlist(profile_path)
except ExpatError as e:
print >> sys.stderr, (
"Profile is either malformed or signed. Attempting to "
"unsign the profile. Message: %s" % e.message)
try:
profile_data = subprocess.check_output([
security,
"cms",
"-D",
"-i", profile_path])
pdata = plistlib.readPlistFromString(profile_data)
except subprocess.CalledProcessError as e:
print >> sys.stderr, (
"Profile could not be unsigned.")
sys.exit("Error %s: %s" % (e.returncode, e.message))
try:
profile_identifier = pdata["PayloadIdentifier"]
except KeyError:
sys.exit("Expected 'PayloadIdentifier' key in profile, but none found!")
except ExpatError as e:
print >> sys.stderr, (
"Profile is malformed.")
sys.exit("Error: %s" % e.message)
# Grab other profile metadata for use in Munki's pkginfo
profile_display_name = pdata.get("PayloadDisplayName")
profile_description = pdata.get("PayloadDescription", '')
# Version
version = opts.version
if not version:
now = localtime()
version = "%04d.%02d.%02d" % (now.tm_year, now.tm_mon, now.tm_mday)
# Naming of item
profile_name = os.path.basename(profile_path).split(".mobileconfig")[0]
replaced_template = Template(re.sub("%(?P<token>.+?)%", "${\g<token>}", opts.format_name))
templatables = {
"filename": profile_name,
"id": profile_identifier
}
item_name = replaced_template.safe_substitute(templatables)
# Installer package-related
pkg_filename = "%s-%s.pkg" % (item_name, version)
pkg_identifier = "%s.%s" % (opts.pkg_prefix, item_name)
pkg_output_path = os.path.join(output_dir, pkg_filename)
root = tempfile.mkdtemp()
pkg_payload_destination = os.path.join(root, opts.installed_path.lstrip("/"))
profile_installed_path = os.path.join(
opts.installed_path, os.path.basename(profile_path))
os.makedirs(pkg_payload_destination)
shutil.copy(profile_path, pkg_payload_destination)
# -- postinstall script
script_root = tempfile.mkdtemp()
script_path = os.path.join(script_root, "postinstall")
config_profile = profile_name + '.mobileconfig'
additional_opts = ""
if opts.username:
additional_opts += "-U %s" % opts.username
install_script = """#!/bin/sh
if [ "$3" = "/" ] ; then
/usr/bin/profiles -I -F %s %s
else
/bin/mkdir -p "$3/private/var/db/ConfigurationProfiles/Setup"
/bin/cp %s %s
/bin/rm -f "$3/private/var/db/ConfigurationProfiles/Setup/.profileSetupDone"
fi
""" % (
quote(profile_installed_path),
quote(additional_opts),
"\"$3\"" + quote(profile_installed_path),
"\"$3\"" + quote('/private/var/db/ConfigurationProfiles/Setup/' + config_profile)
)
if opts.delete_after_install:
install_script += """\n/bin/rm -f %s""" % quote(profile_installed_path)
with open(script_path, "w") as fd:
fd.write(install_script)
os.chmod(script_path, 0755)
0
Example 59
Project: anima Source File: test_version_updater.py
def setUp(self):
"""setup the tests
"""
# -----------------------------------------------------------------
# start of the setUp
# create the environment variable and point it to a temp directory
db.setup()
db.init()
self.temp_repo_path = tempfile.mkdtemp()
self.user1 = User(
name='User 1',
login='user1',
email='[email protected]',
password='12345'
)
db.DBSession.add(self.user1)
db.DBSession.commit()
# login as self.user1
from stalker import LocalSession
local_session = LocalSession()
local_session.store_user(self.user1)
local_session.save()
self.repo1 = Repository(
name='Test Project Repository',
linux_path=self.temp_repo_path,
windows_path=self.temp_repo_path,
osx_path=self.temp_repo_path
)
self.status_new = Status.query.filter_by(code='NEW').first()
self.status_wip = Status.query.filter_by(code='WIP').first()
self.status_comp = Status.query.filter_by(code='CMPL').first()
self.task_template = FilenameTemplate(
name='Task Template',
target_entity_type='Task',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.asset_template = FilenameTemplate(
name='Asset Template',
target_entity_type='Asset',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.shot_template = FilenameTemplate(
name='Shot Template',
target_entity_type='Shot',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.sequence_template = FilenameTemplate(
name='Sequence Template',
target_entity_type='Sequence',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.structure = Structure(
name='Project Struture',
templates=[self.task_template, self.asset_template,
self.shot_template, self.sequence_template]
)
self.project_status_list = StatusList(
name='Project Statuses',
target_entity_type='Project',
statuses=[self.status_new, self.status_wip, self.status_comp]
)
self.image_format = ImageFormat(
name='HD 1080',
width=1920,
height=1080,
pixel_aspect=1.0
)
# create a test project
self.project = Project(
name='Test Project',
code='TP',
repository=self.repo1,
status_list=self.project_status_list,
structure=self.structure,
image_format=self.image_format
)
self.task_status_list =\
StatusList.query.filter_by(target_entity_type='Task').first()
self.asset_status_list =\
StatusList.query.filter_by(target_entity_type='Asset').first()
self.shot_status_list =\
StatusList.query.filter_by(target_entity_type='Shot').first()
self.sequence_status_list =\
StatusList.query.filter_by(target_entity_type='Sequence').first()
self.character_type = Type(
name='Character',
code='CHAR',
target_entity_type='Asset'
)
# create a test series of root task
self.task1 = Task(
name='Test Task 1',
project=self.project
)
self.task2 = Task(
name='Test Task 2',
project=self.project
)
self.task3 = Task(
name='Test Task 3',
project=self.project
)
# then a couple of child tasks
self.task4 = Task(
name='Test Task 4',
parent=self.task1
)
self.task5 = Task(
name='Test Task 5',
parent=self.task1
)
self.task6 = Task(
name='Test Task 6',
parent=self.task1
)
# create a root asset
self.asset1 = Asset(
name='Asset 1',
code='asset1',
type=self.character_type,
project=self.project
)
# create a child asset
self.asset2 = Asset(
name='Asset 2',
code='asset2',
type=self.character_type,
parent=self.task4
)
# create a root Sequence
self.sequence1 = Sequence(
name='Sequence1',
code='SEQ1',
project=self.project
)
# create a child Sequence
self.sequence2 = Sequence(
name='Sequence2',
code='SEQ2',
parent=self.task2
)
# create a root Shot
self.shot1 = Shot(
name='SH001',
code='SH001',
project=self.project
)
# create a child Shot (child of a Sequence)
self.shot2 = Shot(
name='SH002',
code='SH002',
parent=self.sequence1
)
# create a child Shot (child of a child Sequence)
self.shot3 = Shot(
name='SH003',
code='SH003',
parent=self.sequence2
)
# commit everything
db.DBSession.add_all([
self.repo1, self.status_new, self.status_wip, self.status_comp,
self.project_status_list, self.project, self.task_status_list,
self.asset_status_list, self.shot_status_list,
self.sequence_status_list, self.task1, self.task2, self.task3,
self.task4, self.task5, self.task6, self.asset1, self.asset2,
self.shot1, self.shot2, self.shot3, self.sequence1, self.sequence2,
self.task_template, self.asset_template, self.shot_template,
self.sequence_template
])
db.DBSession.commit()
# now create versions
def create_version(task, take_name):
"""Creates a new version
:param task: the task
:param take_name: the take_name name
:return: the version
"""
# just renew the scene
#pymel.core.newFile(force=True)
v = Version(task=task, take_name=take_name)
v.update_paths()
db.DBSession.add(v)
db.DBSession.commit()
#self.maya_env.save_as(v)
return v
# asset2
self.version1 = create_version(self.asset2, 'Main')
self.version2 = create_version(self.asset2, 'Main')
self.version3 = create_version(self.asset2, 'Main')
self.version3.description = 'Test Description'
self.version4 = create_version(self.asset2, 'Take1')
self.version5 = create_version(self.asset2, 'Take1')
self.version6 = create_version(self.asset2, 'Take1')
# task5
self.version7 = create_version(self.task5, 'Main')
self.version8 = create_version(self.task5, 'Main')
self.version9 = create_version(self.task5, 'Main')
self.version10 = create_version(self.task5, 'Take1')
self.version11 = create_version(self.task5, 'Take1')
self.version12 = create_version(self.task5, 'Take1')
# task6
self.version13 = create_version(self.task6, 'Main')
self.version14 = create_version(self.task6, 'Main')
self.version15 = create_version(self.task6, 'Main')
self.version16 = create_version(self.task6, 'Take1')
self.version17 = create_version(self.task6, 'Take1')
self.version18 = create_version(self.task6, 'Take1')
# shot3
self.version19 = create_version(self.shot3, 'Main')
self.version20 = create_version(self.shot3, 'Main')
self.version21 = create_version(self.shot3, 'Main')
self.version22 = create_version(self.shot3, 'Take1')
self.version23 = create_version(self.shot3, 'Take1')
self.version24 = create_version(self.shot3, 'Take1')
# task3
self.version25 = create_version(self.task3, 'Main')
self.version26 = create_version(self.task3, 'Main')
self.version27 = create_version(self.task3, 'Main')
self.version28 = create_version(self.task3, 'Take1')
self.version29 = create_version(self.task3, 'Take1')
self.version30 = create_version(self.task3, 'Take1')
# asset1
self.version31 = create_version(self.asset1, 'Main')
self.version32 = create_version(self.asset1, 'Main')
self.version33 = create_version(self.asset1, 'Main')
self.version34 = create_version(self.asset1, 'Take1')
self.version35 = create_version(self.asset1, 'Take1')
self.version36 = create_version(self.asset1, 'Take1')
# shot2
self.version37 = create_version(self.shot2, 'Main')
self.version38 = create_version(self.shot2, 'Main')
self.version39 = create_version(self.shot2, 'Main')
self.version40 = create_version(self.shot2, 'Take1')
self.version41 = create_version(self.shot2, 'Take1')
self.version42 = create_version(self.shot2, 'Take1')
# shot1
self.version43 = create_version(self.shot1, 'Main')
self.version44 = create_version(self.shot1, 'Main')
self.version45 = create_version(self.shot1, 'Main')
self.version46 = create_version(self.shot1, 'Take1')
self.version47 = create_version(self.shot1, 'Take1')
self.version48 = create_version(self.shot1, 'Take1')
# +- task1
# | |
# | +- task4
# | | |
# | | +- asset2
# | | +- Main
# | | | +- version1
# | | | +- version2 (P)
# | | | +- version3 (P)
# | | +- Take1
# | | +- version4 (P)
# | | +- version5
# | | +- version6 (P)
# | |
# | +- task5
# | | +- Main
# | | | +- version7
# | | | +- version8
# | | | +- version9
# | | +- Take1
# | | +- version10
# | | +- version11
# | | +- version12 (P)
# | |
# | +- task6
# | +- Main
# | | +- version13
# | | +- version14
# | | +- version15
# | +- Take1
# | +- version16 (P)
# | +- version17
# | +- version18 (P)
# |
# +- task2
# | |
# | +- sequence2
# | |
# | +- shot3
# | +- Main
# | | +- version19
# | | +- version20
# | | +- version21
# | +- Take1
# | +- version22
# | +- version23
# | +- version24
# |
# +- task3
# | +- Main
# | | +- version25
# | | +- version26
# | | +- version27
# | +- Take1
# | +- version28
# | +- version29
# | +- version30
# |
# +- asset1
# | +- Main
# | | +- version31
# | | +- version32
# | | +- version33
# | +- Take1
# | +- version34
# | +- version35
# | +- version36
# |
# +- sequence1
# | |
# | +- shot2
# | +- Main
# | | +- version37
# | | +- version38
# | | +- version39
# | +- Take1
# | +- version40
# | +- version41
# | +- version42
# |
# +- shot1
# +- Main
# | +- version43
# | +- version44
# | +- version45
# +- Take1
# +- version46
# +- version47
# +- version48
# Start Condition:
#
# version15
# version12
# version5
# version2 -> has new published version (version3)
# version5 -> Referenced a second time
# version2 -> has new published version (version3)
# version12 -> Referenced a second time
# version5
# version2 -> has new published version (version3)
# version5
# version2 -> has new published version (version3)
# version45 -> no change
# version48 -> no change
#
# Expected Final Result
# version15A -> Derived from version15
# version12A -> Derived from version12
# version5A -> Derived from version5
# version3 -> has new published version (version3)
# version5A -> Derived from version5
# version3 -> has new published version (version3)
# version12A -> Derived from version12 - The second reference
# version5A -> Derived from version5
# version3 -> has new published version (version3)
# version5A -> Derived from version5
# version3 -> has new published version (version3)
# version45 -> no change
# version48 -> no change
# create a deep relation
self.version2.is_published = True
self.version3.is_published = True
# new scene
# version5 references version2
self.version5.inputs.append(self.version2)
self.version5.is_published = True
# version12 references version5
self.version12.inputs.append(self.version5)
self.version12.is_published = True
# version45 references version48
self.version45.is_published = True
self.version48.is_published = True
self.version45.inputs.append(self.version48)
# version15 references version12 and version48
self.version15.inputs.append(self.version12)
self.version15.inputs.append(self.version45)
# reference_resolution
self.reference_resolution = {
'root': [self.version12, self.version45],
'leave': [self.version48, self.version45],
'update': [self.version2],
'create': [self.version5, self.version12]
}
# create a buffer for extra created files, which are to be removed
self.remove_these_files_buffer = []
self.test_environment = TestEnvironment(name='Test Environment')
self.test_environment._version = self.version15
if not QtGui.QApplication.instance():
logger.debug('creating a new QApplication')
self.app = QtGui.QApplication(sys.argv)
else:
logger.debug('using the present QApplication: %s' % QtGui.qApp)
# self.app = QtGui.qApp
self.app = QtGui.QApplication.instance()
self.dialog = version_updater.MainDialog(
environment=self.test_environment,
reference_resolution=self.reference_resolution
)
0
Example 60
Project: deep_recommend_system Source File: mnist_replica.py
def main(unused_argv):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
if FLAGS.job_name is None or FLAGS.job_name == "":
raise ValueError("Must specify an explicit `job_name`")
if FLAGS.task_index is None or FLAGS.task_index =="":
raise ValueError("Must specify an explicit `task_index`")
print("job name = %s" % FLAGS.job_name)
print("task index = %d" % FLAGS.task_index)
#Construct the cluster and start the server
ps_spec = FLAGS.ps_hosts.split(",")
worker_spec = FLAGS.worker_hosts.split(",")
# Get the number of workers.
num_workers = len(worker_spec)
cluster = tf.train.ClusterSpec({
"ps": ps_spec,
"worker": worker_spec})
if not FLAGS.existing_servers:
# Not using existing servers. Create an in-process server.
server = tf.train.Server(
cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
if FLAGS.job_name == "ps":
server.join()
is_chief = (FLAGS.task_index == 0)
if FLAGS.num_gpus > 0:
if FLAGS.num_gpus < num_workers:
raise ValueError("number of gpus is less than number of workers")
# Avoid gpu allocation conflict: now allocate task_num -> #gpu
# for each worker in the corresponding machine
gpu = (FLAGS.task_index % FLAGS.num_gpus)
worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
elif FLAGS.num_gpus == 0:
# Just allocate the CPU to worker server
cpu = 0
worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
# The ps use CPU and workers use corresponding GPU
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster)):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal(
[IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS),
name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal(
[FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with FLAGS.task_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
opt = tf.train.SyncReplicasOptimizerV2(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy, global_step=global_step)
if FLAGS.sync_replicas:
local_init_op = opt.local_step_init_op
if is_chief:
local_init_op = opt.chief_init_op
ready_for_local_init_op = opt.ready_for_local_init_op
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
sync_init_op = opt.get_init_tokens_op()
init_op = tf.global_variables_initializer()
train_dir = tempfile.mkdtemp()
if FLAGS.sync_replicas:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
recovery_wait_secs=1,
global_step=global_step)
else:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])
# The chief worker (task_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.task_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.task_index)
if FLAGS.existing_servers:
server_grpc_url = "grpc://" + worker_spec[FLAGS.task_index]
print("Using existing server at: %s" % server_grpc_url)
sess = sv.prepare_or_wait_for_session(server_grpc_url,
config=sess_config)
else:
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
print("Worker %d: Session initialization complete." % FLAGS.task_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op.
sess.run(sync_init_op)
sv.start_queue_runners(sess, [chief_queue_runner])
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
while True:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs, y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, FLAGS.task_index, local_step, step))
if step >= FLAGS.train_steps:
break
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
0
Example 61
Project: openmoltools Source File: forcefield_generators.py
def generateResidueTemplate(molecule, residue_atoms=None):
"""
Generate an residue template for simtk.openmm.app.ForceField using GAFF/AM1-BCC.
This requires the OpenEye toolkit.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule to be parameterized.
The molecule must have explicit hydrogens.
Net charge will be inferred from the net formal charge on each molecule.
Partial charges will be determined automatically using oequacpac and canonical AM1-BCC charging rules.
residue_atomset : set of OEAtom, optional, default=None
If not None, only the atoms in this set will be used to construct the residue template
Returns
-------
template : simtk.openmm.app.forcefield._TemplateData
Residue template for ForceField using atom types and parameters from `gaff.xml`.
additional_parameters_ffxml : str
Contents of ForceField `ffxml` file defining additional parameters from parmchk(2).
Notes
-----
The residue template will be named after the molecule title.
This method preserves stereochemistry during AM1-BCC charge parameterization.
Atom names in molecules will be assigned Tripos atom names if any are blank or not unique.
"""
# Set the template name based on the molecule title plus a globally unique UUID.
from uuid import uuid4
template_name = molecule.GetTitle() + '-' + str(uuid4())
# If any atom names are not unique, atom names
_ensureUniqueAtomNames(molecule)
# Compute net formal charge.
net_charge = _computeNetCharge(molecule)
# Generate canonical AM1-BCC charges and a reference conformation.
molecule = get_charges(molecule, strictStereo=False, keep_confs=1)
# DEBUG: This may be necessary.
molecule.SetTitle('MOL')
# Create temporary directory for running antechamber.
import tempfile
tmpdir = tempfile.mkdtemp()
prefix = 'molecule'
input_mol2_filename = os.path.join(tmpdir, prefix + '.tripos.mol2')
gaff_mol2_filename = os.path.join(tmpdir, prefix + '.gaff.mol2')
frcmod_filename = os.path.join(tmpdir, prefix + '.frcmod')
# Write Tripos mol2 file as antechamber input.
_writeMolecule(molecule, input_mol2_filename)
# Parameterize the molecule with antechamber.
run_antechamber(template_name, input_mol2_filename, charge_method=None, net_charge=net_charge, gaff_mol2_filename=gaff_mol2_filename, frcmod_filename=frcmod_filename)
# Read the resulting GAFF mol2 file as a ParmEd structure.
from openeye import oechem
ifs = oechem.oemolistream(gaff_mol2_filename)
ifs.SetFlavor(oechem.OEFormat_MOL2, oechem.OEIFlavor_MOL2_DEFAULT | oechem.OEIFlavor_MOL2_M2H | oechem.OEIFlavor_MOL2_Forcefield)
m2h = True
oechem.OEReadMolecule(ifs, molecule)
ifs.close()
# If residue_atoms = None, add all atoms to the residues
if residue_atoms == None:
residue_atoms = [ atom for atom in molecule.GetAtoms() ]
# Modify partial charges so that charge on residue atoms is integral.
residue_charge = 0.0
sum_of_absolute_charge = 0.0
for atom in residue_atoms:
charge = atom.GetPartialCharge()
residue_charge += charge
sum_of_absolute_charge += abs(charge)
excess_charge = residue_charge - net_charge
if sum_of_absolute_charge == 0.0:
sum_of_absolute_charge = 1.0
for atom in residue_atoms:
charge = atom.GetPartialCharge()
atom.SetPartialCharge( charge + excess_charge * (abs(charge) / sum_of_absolute_charge) )
# Create residue template.
template = ForceField._TemplateData(template_name)
for (index, atom) in enumerate(molecule.GetAtoms()):
atomname = atom.GetName()
typename = atom.GetType()
element = Element.getByAtomicNumber(atom.GetAtomicNum())
charge = atom.GetPartialCharge()
parameters = { 'charge' : charge }
atom_template = ForceField._TemplateAtomData(atomname, typename, element, parameters)
template.atoms.append(atom_template)
for bond in molecule.GetBonds():
if (bond.GetBgn() in residue_atoms) and (bond.GetEnd() in residue_atoms):
template.addBondByName(bond.GetBgn().GetName(), bond.GetEnd().GetName())
elif (bond.GetBgn() in residue_atoms) and (bond.GetEnd() not in residue_atoms):
template.addExternalBondByName(bond.GetBgn().GetName())
elif (bond.GetBgn() not in residue_atoms) and (bond.GetEnd() in residue_atoms):
template.addExternalBondByName(bond.GetEnd().GetName())
# Generate ffxml file contents for parmchk-generated frcmod output.
leaprc = StringIO('parm = loadamberparams %s' % frcmod_filename)
params = parmed.amber.AmberParameterSet.from_leaprc(leaprc)
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params)
ffxml = StringIO()
params.write(ffxml)
return template, ffxml.getvalue()
0
Example 62
def main(options, args):
log = logging.getLogger("superzippy")
packages = args[0:-1]
entry_point = args[-1]
# Append any requirements.txt files to the packages list.
packages += ["-r %s" % i for i in options.requirements]
# Create the virtualenv directory
virtualenv_dir = tempfile.mkdtemp()
_dirty_files.append(virtualenv_dir)
#### Create virtual environment
log.debug("Creating virtual environment at %s.", virtualenv_dir)
output_target = None if options.verbose >= 3 else DEVNULL
return_value = subprocess.call(
["virtualenv", virtualenv_dir],
stdout = output_target,
stderr = subprocess.STDOUT
)
if return_value != 0:
log.critical(
"virtualenv returned non-zero exit status (%d).", return_value
)
return 1
##### Install package and dependencies
pip_path = os.path.join(virtualenv_dir, "bin", "pip")
for i in packages:
log.debug("Installing package with `pip install %s`.", i)
command = [pip_path, "install"] + shlex.split(i)
return_value = subprocess.call(
command,
stdout = output_target,
stderr = subprocess.STDOUT
)
if return_value != 0:
log.critical("pip returned non-zero exit status (%d).", return_value)
return 1
if not packages:
log.warn("No packages specified.")
#### Uninstall extraneous packages (pip and setuptools)
return_value = subprocess.call(
[pip_path, "uninstall", "--yes", "pip", "setuptools"],
stdout = output_target,
stderr = subprocess.STDOUT
)
if return_value != 0:
log.critical("pip returned non-zero exit status (%d).",
return_value)
return 1
#### Move site packages over to build directory
# TODO: We should look at pip's source code and figure out how it decides
# where site-packages is and use the same algorithm.
build_dir = tempfile.mkdtemp()
_dirty_files.append(build_dir)
site_package_dir = None
for root, dirs, files in os.walk(virtualenv_dir):
if "site-packages" in dirs:
found = os.path.join(root, "site-packages")
# We'll only use the first one, but we want to detect them all.
if site_package_dir is not None:
log.warn(
"Multiple site-packages directories found. `%s` will be "
"used. `%s` was found afterwards.",
site_package_dir,
found
)
else:
site_package_dir = found
# A couple .pth files are consistently left over from the previous step,
# delete them.
extraneous_pth_files = ["easy-install.pth", "setuptools.pth"]
for i in extraneous_pth_files:
path = os.path.join(site_package_dir, i)
if os.path.exists(path):
os.remove(path)
shutil.move(site_package_dir, build_dir)
#### Perform any necessary raw copies.
raw_copies = options.raw_copy_rename
for i in options.raw_copy:
if i[-1] == "/":
i = i[0:-1]
raw_copies.append((i, os.path.basename(i)))
for file_path, dest_name in raw_copies:
log.debug(
"Performing raw copy of `%s`, destination name: `%s`.",
file_path,
dest_name
)
dest = os.path.join(build_dir, "site-packages", dest_name)
try:
shutil.copytree(file_path, dest)
except OSError as e:
if e.errno == errno.ENOTDIR:
shutil.copy(file_path, dest)
else:
raise
##### Install bootstrapper
log.debug("Adding bootstrapper to the archive.")
bootstrap_files = {
"__init__.py": "__init__.py",
"bootstrapper.py": "__main__.py",
"zipsite.py": "zipsite.py",
"module_locator.py": "module_locator.py"
}
for k, v in bootstrap_files.items():
source = pkg_resources.resource_stream("superzippy.bootstrapper", k)
dest = open(os.path.join(build_dir, v), "wb")
shutil.copyfileobj(source, dest)
source.close()
dest.close()
##### Install configuration
log.debug("Adding configuration file to archive.")
with open(os.path.join(build_dir, "superconfig.py"), "w") as f:
f.write("entry_point = '%s'" % entry_point)
##### Zip everything up into final file
log.debug("Zipping up %s.", build_dir)
if options.output:
output_file = options.output
elif packages:
last_package = shlex.split(packages[-1])[0]
if os.path.isdir(last_package):
# Figure out the name of the package the user pointed at on their
# system.
setup_program = subprocess.Popen(["/usr/bin/env", "python",
os.path.join(last_package, "setup.py"), "--name"],
stdout = subprocess.PIPE, stderr = DEVNULL)
if setup_program.wait() != 0:
log.critical("Could not determine name of package at %s.",
last_package)
return 1
# Grab the output of the setup program
package_name_raw = setup_program.stdout.read()
# Decode the output into text. Whatever our encoding is is
# probably the same as what the setup.py program spat out.
package_name_txt = package_name_raw.decode(
sys.stdout.encoding or "UTF-8")
# Strip any leading and trailing whitespace
package_name = package_name_txt.strip()
# Verify that what we got was a valid package name (this handles
# most cases where an error occurs in the setup.py program).
if re.match("[A-Za-z0-9_-]+", package_name) is None:
log.critical("Could nto determine name of package. setup.py "
"is reporting an illegal name of %s", package_name)
return 1
output_file = package_name + ".sz"
else:
# Just use the name of a package we're going to pull down from
# the cheese shop, but cut off any versioning information (ex:
# bla==2.3 will become bla).
for k, c in enumerate(last_package):
if c in ("=", ">", "<"):
output_file = last_package[0:k] + ".sz"
break
else:
output_file = last_package + ".sz"
else:
log.critical("No output file or packages specified.")
return 1
try:
zipdir.zip_directory(build_dir, output_file)
except IOError:
log.critical(
"Could not write to output file at '%s'.",
output_file,
exc_info = sys.exc_info()
)
return 1
#### Make that file executable
with open(output_file, "rb") as f:
data = f.read()
with open(output_file, "wb") as f:
f.write(b"#!/usr/bin/env python\n" + data)
os.chmod(output_file, 0o755)
return 0
0
Example 63
Project: pelisalacarta Source File: mct.py
def play(url, xlistitem, is_view=None, subtitle=""):
# -- Necesario para algunas webs ----------------------------
if not url.endswith(".torrent") and not url.startswith("magnet"):
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
# -- Crear dos carpetas en descargas para los archivos ------
save_path_videos = os.path.join( config.get_setting("downloadpath") , "torrent-videos" )
save_path_torrents = os.path.join( config.get_setting("downloadpath") , "torrent-torrents" )
if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents)
# -- Usar - archivo torrent desde web, meagnet o HD ---------
if not os.path.isfile(url) and not url.startswith("magnet"):
# -- http - crear archivo torrent -----------------------
data = url_get(url)
# -- El nombre del torrent será el que contiene en los --
# -- datos. -
re_name = urllib.unquote( scrapertools.get_match(data,':name\d+:(.*?)\d+:') )
torrent_file = filetools.join(save_path_torrents, filetools.encode(re_name + '.torrent'))
f = open(torrent_file,'wb')
f.write(data)
f.close()
elif os.path.isfile(url):
# -- file - para usar torrens desde el HD ---------------
torrent_file = url
else:
# -- magnet ---------------------------------------------
torrent_file = url
# -----------------------------------------------------------
# -- MCT - MiniClienteTorrent -------------------------------
ses = lt.session()
print "### Init session ########"
print lt.version
print "#########################"
ses.add_dht_router("router.bittorrent.com",6881)
ses.add_dht_router("router.utorrent.com",6881)
ses.add_dht_router("router.bitcomet.com",554)
ses.add_dht_router("dht.transmissionbt.com",6881)
trackers = [
"http://exodus.desync.com:6969/announce",
"udp://tracker.publicbt.com:80/announce",
"udp://tracker.openbittorrent.com:80/announce",
"http://tracker.torrentbay.to:6969/announce",
"http://fr33dom.h33t.com:3310/announce",
"http://tracker.pow7.com/announce",
"udp://tracker.ccc.de:80/announce",
"http://tracker.bittorrent.am:80/announce",
"http://denis.stalker.h3q.com:6969/announce",
"udp://tracker.prq.to:80/announce",
"udp://tracker.istole.it:80/announce",
"udp://open.demonii.com:1337",
"http://9.rarbg.com:2710/announce",
"http://announce.torrentsmd.com:6969/announce",
"http://bt.careland.com.cn:6969/announce",
"http://explodie.org:6969/announce",
"http://mgtracker.org:2710/announce",
"http://tracker.best-torrents.net:6969/announce",
"http://tracker.tfile.me/announce",
"http://tracker.torrenty.org:6969/announce",
"http://tracker1.wasabii.com.tw:6969/announce",
"udp://9.rarbg.com:2710/announce",
"udp://9.rarbg.me:2710/announce",
"udp://coppersurfer.tk:6969/announce",
"udp://tracker.btzoo.eu:80/announce",
"http://www.spanishtracker.com:2710/announce",
"http://www.todotorrents.com:2710/announce",
]
video_file = ""
# -- magnet2torrent -----------------------------------------
if torrent_file.startswith("magnet"):
try:
tempdir = tempfile.mkdtemp()
except IOError:
tempdir = os.path.join(save_path_torrents , "temp")
if not os.path.exists(tempdir): os.mkdir(tempdir)
params = {
'save_path': tempdir,
'trackers':trackers,
'storage_mode': lt.storage_mode_t.storage_mode_allocate,
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
h = lt.add_magnet_uri(ses, torrent_file, params)
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
while not h.has_metadata():
message, porcent, msg_file, s, download = getProgress(h, "Creando torrent desde magnet")
dp.update(porcent, message, msg_file)
if s.state == 1: download = 1
if dp.iscanceled():
dp.close()
remove_files( download, torrent_file, video_file, ses, h )
return
dp.close()
info = h.get_torrent_info()
data = lt.bencode( lt.create_torrent(info).generate() )
#torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
torrent_file = os.path.join(save_path_torrents, unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
f = open(torrent_file,'wb')
f.write(data)
f.close()
ses.remove_torrent(h)
shutil.rmtree(tempdir)
# -----------------------------------------------------------
# -- Archivos torrent ---------------------------------------
e = lt.bdecode(open(torrent_file, 'rb').read())
info = lt.torrent_info(e)
# -- El más gordo o uno de los más gordo se entiende que es -
# -- el vídeo o es el vídeo que se usará como referencia -
# -- para el tipo de archivo -
print "##### Archivos ## %s ##" % len(info.files())
_index_file, _video_file, _size_file = get_video_file(info)
_video_file_ext = os.path.splitext( _video_file )[1]
if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
print "##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } )
else:
print "##### storage_mode: none ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } )
# -----------------------------------------------------------
# -- Descarga secuencial - trozo 1, trozo 2, ... ------------
h.set_sequential_download(True)
h.force_reannounce()
h.force_dht_announce()
# -- Prioritarizar/Seleccionar archivo-----------------------
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1:
_index = _index_file
video_file = _video_file
video_size = _size_file
# -- Inicio de variables para 'pause' automático cuando el -
# -- el vídeo se acerca a una pieza sin completar -
is_greater_num_pieces = False
is_greater_num_pieces_plus = False
is_greater_num_pieces_pause = False
#porcent4first_pieces = int( video_size / 1073741824 )
porcent4first_pieces = int( video_size * 0.000000005 )
if porcent4first_pieces < 10: porcent4first_pieces = 10
if porcent4first_pieces > 100: porcent4first_pieces = 100
#num_pieces_to_resume = int( video_size / 1610612736 )
num_pieces_to_resume = int( video_size * 0.0000000025 )
if num_pieces_to_resume < 5: num_pieces_to_resume = 5
if num_pieces_to_resume > 25: num_pieces_to_resume = 25
print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume
# -- Prioritarizar o seleccionar las piezas del archivo que -
# -- se desea reproducir con 'file_priorities' -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
# -- Crear diálogo de progreso para el primer bucle ---------
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
_pieces_info = {}
# -- Doble bucle anidado ------------------------------------
# -- Descarga - Primer bucle -
while not h.is_seed():
s = h.status()
xbmc.sleep(100)
# -- Recuperar los datos del progreso -------------------
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
# -- Si hace 'checking' existe descarga -----------------
# -- 'download' Se usará para saber si hay datos -
# -- descargados para el diálogo de 'remove_files' -
if s.state == 1: download = 1
# -- Player - play --------------------------------------
# -- Comprobar si se han completado las piezas para el -
# -- inicio del vídeo ............... -
first_pieces = True
_p = ""
_c = 0
for i in range( piece_set[0], piece_set[porcent4first_pieces] ):
_p+= "[%s:%s]" % ( i, h.have_piece(i) )
first_pieces&= h.have_piece(i)
if h.have_piece(i): _c+= 1
_pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c,porcent4first_pieces), 'have': h.status().num_pieces, 'len': len(piece_set)}
_p = "##### first_pieces [%s/%s][%s]: " % ( _c, porcent4first_pieces, len(piece_set) ) + _p
print _p
# -- -------------------------------------------------- -
if is_view != "Ok" and first_pieces:
print "##### porcent [%.2f%%]" % (s.progress * 100)
is_view = "Ok"
dp.close()
# -- Player - Ver el vídeo --------------------------
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
#ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
ren_video_file = os.path.join( save_path_videos, video_file )
playlist.add( ren_video_file, xlistitem )
#playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
#playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
player = play_video( xbmc.PLAYER_CORE_AUTO )
player.play(playlist)
'''
# -- Player - Ver el vídeo --------------------------
player = play_video()
#player.play( os.path.join( save_path_videos, video_file ) )
player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
'''
#player.play( os.path.join( save_path_videos, video_file ) )
# -- Contador de cancelaciones para la ventana de -
# -- 'pause' automático -
is_greater_num_pieces_canceled = 0
continuous_pieces = 0
porcent_time = 0.00
current_piece = 0
# -- Impedir que kodi haga 'resume' a un archivo ----
# -- que se reprodució con anterioridad y que se -
# -- eliminó para impedir que intente la reprucción -
# -- en una pieza que aún no se ha completado y se -
# -- active 'pause' automático -
not_resume = True
# -- Bandera subTítulos
_sub = False
# -- Segundo bucle - Player - Control de eventos ----
while player.isPlaying():
xbmc.sleep(100)
# -- Añadir subTítulos
if subtitle!="" and not _sub:
_sub = True
player.setSubtitles(subtitle)
# -- Impedir que kodi haga 'resume' al inicio ---
# -- de la descarga de un archivo conocido -
if not_resume:
player.seekTime(0)
not_resume = False
#xbmc.sleep(1000)
# -- Control 'pause' automático -
continuous_pieces = count_completed_continuous_pieces(h, piece_set)
if xbmc.Player().isPlaying():
# -- Porcentage del progreso del vídeo ------
porcent_time = player.getTime() / player.getTotalTime() * 100
# -- Pieza que se está reproduciendo --------
current_piece = int( porcent_time / 100 * len(piece_set) )
# -- Banderas de control --------------------
is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume)
is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces)
is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set))
# -- Activa 'pause' automático --------------
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
is_greater_num_pieces_pause = True
player.pause()
# -- Log ------------------------------------
_TotalTime = player.getTotalTime()
_Time = player.getTime()
_print_log = "\n##### Player ##################################"
_print_log+= "\nTamaño del vídeo: %s" % video_size
_print_log+= "\nTotal piezas: %s" % len(piece_set)
_print_log+= "\nPiezas contiguas: %s" % continuous_pieces
_print_log+= "\n-----------------------------------------------"
_print_log+= "\nVídeo-Total segundos: %s" % _TotalTime
_print_log+= "\nVídeo-Progreso segundos: %s" % _Time
_print_log+= "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
_print_log+= "\n-----------------------------------------------"
_print_log+= "\ncurrent_piece: %s" % current_piece
_print_log+= "\nis_greater_num_pieces: %s" % is_greater_num_pieces
_print_log+= "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
_print_log+= "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
_print_log+= "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
_print_log+= "\nPieza que se está visionando: %.2f" % ( porcent_time / 100 * len(piece_set) )
_print_log+= "\nOffset que se está visionando: %.2f" % ( porcent_time / 100 * video_size )
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
_print_log+= "\n+++++++++++++++++++++++++++++++++++++++++++++++"
_print_log+= "\nPausa con:"
_print_log+= "\n current_piece = %s" % current_piece
_print_log+= "\n continuous_pieces = %s" % continuous_pieces
_print_log+= "\n###############################################"
print _print_log
# -------------------------------------------
_pieces_info = {'current': current_piece, 'continuous': continuous_pieces, 'have': h.status().num_pieces, 'len': len(piece_set)}
# -- Cerrar el diálogo de progreso --------------
if player.resumed:
dp.close()
# -- Mostrar el diálogo de progreso -------------
if player.paused:
# -- Crear diálogo si no existe -------------
if not player.statusDialogoProgress:
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
player.setDialogoProgress()
# -- Diálogos de estado en el visionado -----
if not h.is_seed():
# -- Recuperar los datos del progreso ---
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
dp.update(porcent, message, msg_file)
else:
dp.update(100, "Descarga completa: " + video_file)
# -- Se canceló el progreso en el visionado -
# -- Continuar -
if dp.iscanceled():
dp.close()
player.pause()
# -- Se canceló el progreso en el visionado -
# -- en la ventana de 'pause' automático. -
# -- Parar si el contador llega a 3 -
if dp.iscanceled() and is_greater_num_pieces_pause:
is_greater_num_pieces_canceled+= 1
if is_greater_num_pieces_canceled == 3:
player.stop()
# -- Desactiva 'pause' automático y ---------
# -- reinicia el contador de cancelaciones -
if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
dp.close()
player.pause()
is_greater_num_pieces_pause = False
is_greater_num_pieces_canceled = 0
# -- El usuario cancelo el visionado --------
# -- Terminar -
if player.ended:
# -- Diálogo eliminar archivos ----------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Kodi - Se cerró el visionado -----------------------
# -- Continuar | Terminar -
if is_view == "Ok" and not xbmc.Player().isPlaying():
if info.num_files() == 1:
# -- Diálogo continuar o terminar ---------------
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'XBMC-Kodi Cerró el vídeo.', '¿Continuar con la sesión?')
else: ok = False
# -- SI ---------------------------------------------
if ok:
# -- Continuar: ---------------------------------
is_view=None
else:
# -- Terminar: ----------------------------------
# -- Comprobar si el vídeo pertenece a una ------
# -- lista de archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos --------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Mostar progeso antes del visionado -----------------
if is_view != "Ok" :
dp.update(porcent, message, msg_file)
# -- Se canceló el progreso antes del visionado ---------
# -- Terminar -
if dp.iscanceled():
dp.close()
# -- Comprobar si el vídeo pertenece a una lista de -
# -- archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos ------------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -----
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Kodi - Error? - No debería llegar aquí -----------------
if is_view == "Ok" and not xbmc.Player().isPlaying():
dp.close()
# -- Diálogo eliminar archivos --------------------------
remove_files( download, torrent_file, video_file, ses, h )
return
0
Example 64
Project: kicad-pcblib Source File: download_ipc.py
def main ():
# Get args
from argparse import ArgumentParser
description = "Download FreePCB IPC libraries and convert to KiCad " + \
"format."
p = ArgumentParser (description=description)
p.add_argument ("-v", "--version", action="version",
version="%(prog)s " + VERSION)
p.add_argument ("src", metavar="SRC", type=str,
help="URL or path to IPC FreePCB zipfile")
p.add_argument ("dest", metavar="DEST", type=str,
help="Path to KiCad output")
p.add_argument ("fp2kicad", metavar="FP2KICAD", type=str,
help="Path to freepcb2kicad.py")
p.add_argument ("--no-confirm-license", dest="no_confirm_license",
action="store_const", const=True, default=False,
help="Do not ask the user to accept the GPL")
p.add_argument ("--3dmap", dest="threedmap", type=str,
help="Module-3D model map. See freepcb2kicad.py docuementation.")
p.add_argument ("--rounded-pads", dest="roundedpads", action="store_const", const="all", default=None)
p.add_argument ("--rounded-except-1", dest="roundedpads", action="store_const", const="allbut1", default=None)
p.add_argument ("--rounded-pad-exceptions", dest="rpexcept", type=str,
help="Rounded pad exception file. See freepcb2kicad.py for " + \
"docuementation.")
p.add_argument ("--rounded-center-exceptions", dest="rcexcept", type=str,
help="Rounded center pad exception file. See freepcb2kicad.py for " + \
"docuementation.")
p.add_argument ("--add-courtyard", dest="courtyard", type=str,
default=None,
help="Add a courtyard a fixed number of mm outside the bounding box")
p.add_argument ("--hash-time", dest="hashtime", action="store_const",
const=True, default=False,
help="Set a fake edit time on the footprints using a hash")
args = p.parse_args ()
if args.threedmap is not None:
FREEPCB2KICAD_ARGS.extend (["--3dmap", args.threedmap])
if args.rpexcept is not None:
FREEPCB2KICAD_ARGS.extend (["--rounded-pad-exceptions", args.rpexcept])
if args.rcexcept is not None:
FREEPCB2KICAD_ARGS.extend (["--rounded-center-exceptions", args.rcexcept])
if args.courtyard is not None:
FREEPCB2KICAD_ARGS.extend (["--add-courtyard", args.courtyard])
if args.roundedpads == "all":
FREEPCB2KICAD_ARGS.append ("--rounded-pads")
elif args.roundedpads == "allbut1":
FREEPCB2KICAD_ARGS.append ("--rounded-except-1")
if args.hashtime:
FREEPCB2KICAD_ARGS.append ("--hash-time")
# Download, if necessary, then open file
if args.src.startswith ("http:/"):
if not args.no_confirm_license:
confirm_license ()
url = urlopen (args.src)
print ("Downloading FreePCB library...")
try:
data = url.read ()
except Exception as e:
url.close ()
raise
else:
url.close ()
ipc_f = BytesIO (data) # data is bytes in Py3
else:
ipc_f = open (args.src, 'rb')
ipc_zip = zipfile.ZipFile (ipc_f)
# Create a temporary working directory, and extract the IPC files
# into it.
tempdir = tempfile.mkdtemp ()
# Wrap the rest of the code in an exception catcher so we can clean up
# the files.
try:
main_2 (args, tempdir, ipc_zip)
except:
try:
ipc_f.close ()
except Exception as e:
print (e)
try:
shutil.rmtree (tempdir)
except Exception as e:
print (e)
raise
else:
exceptions = []
try:
ipc_f.close ()
except Exception as e:
exceptions.append (e)
try:
shutil.rmtree (tempdir)
except Exception as e:
exceptions.append (e)
for exc in exceptions:
print (exc)
if exceptions:
raise Exception ("Errors occurred.")
0
Example 65
Project: defend_against_fruit Source File: virtualenv_util.py
def _stage_virtualenv(options, install_virtualenv=True):
'''Creates staging virtual environment in order to help with the "real"
installation. Returns path to staged virtual env. If install_virtualenv is
False, then unpack the virtual env. package, but don't actually create
a virtual environment.
'''
# Create a temporary directory to put the virtual env. in.
temp_dir = tempfile.mkdtemp()
try:
# Was a fixed virtualenv version specified? If not, we need to check
# the PyPI server for the latest version.
if options.virtualenv_version is None:
options.virtualenv_version, virtualenv_url = (
_get_newest_package_version_and_url(
options.pypi_pull_server,
options.virtualenv_package_name))
else:
virtualenv_url = '/'.join([
options.pypi_pull_server,
options.virtualenv_package_name,
'{}-{}.tar.gz'.format(
options.virtualenv_package_name,
options.virtualenv_version)])
virtualenv_tar_filename = os.path.basename(
urlparse.urlparse(virtualenv_url).path)
f_remote = urllib.urlopen(virtualenv_url)
f_local = open(os.path.join(temp_dir, virtualenv_tar_filename), 'wb')
f_local.write(f_remote.read())
f_local.close()
# If a download dir or download cache directory was specified,
# copy the virtualenv package file to that directory if it is not
# already there.
for directory in [options.download_dir, options.download_cache_dir]:
if directory is None:
directory = ''
virtualenv_tar_exists = os.path.isfile(
os.path.join(directory, virtualenv_tar_filename))
if directory and not virtualenv_tar_exists:
shutil.copy2(
os.path.join(temp_dir, virtualenv_tar_filename),
directory)
# Unpack the tarball to the temporary directory.
tarf = tarfile.open(
os.path.join(temp_dir, virtualenv_tar_filename),
'r:gz')
tarf.extractall(temp_dir)
tarf.close()
unpacked_tar_directory = os.path.join(
temp_dir, virtualenv_tar_filename.replace('.tar.gz', ''))
bootstrap_vm_directory = os.path.join(
temp_dir, VIRTUALENV_BOOTSTRAP_NAME)
# Create the bootstrap virtualenv in the temporary directory using the
# current python executable we are using plus the virtualenv stuff we
# unpacked.
if install_virtualenv:
arguments = [sys.executable,
os.path.join(unpacked_tar_directory, 'virtualenv.py'),
'--distribute',
bootstrap_vm_directory]
try:
if options.quiet:
subprocess.check_output(arguments, shell=True)
else:
subprocess.check_call(arguments, shell=True)
except subprocess.CalledProcessError as e:
if options.quiet:
print e.output
print 'Bootstrap VM create failed, return code', e.returncode
raise
# Get the right options to pass to pip to install virtualenv
# to the bootstrap environment. Again, this is necessary because
# pip does not support file:// index urls.
if urlparse.urlparse(options.pypi_pull_server).scheme == 'file':
install_options = [
'--no-index',
'--find-links',
options.pypi_pull_server]
else:
install_options = [
'-i',
options.pypi_pull_server]
# Install virtualenv into this bootstrap environment using pip,
# pointing at the right server.
subprocess.check_call(
[
os.path.join(bootstrap_vm_directory, 'Scripts', 'pip'),
'install'
]
+ install_options
+ [
'{}=={}'.format(
options.virtualenv_package_name,
options.virtualenv_version)
])
except Exception:
# Even though the calling code is normally responsible for cleaning
# up the temp dir, if an exception occurs, we do it here because we
# won't be able to return the temp_dir to the caller
_cleanup_virtualenv(temp_dir)
raise
# Return the bootstrap vm dir that was created
return temp_dir
0
Example 66
Project: dopey Source File: document.py
def load_ora(self, filename, feedback_cb=None):
"""Loads from an OpenRaster file"""
logger.info('load_ora: %r', filename)
t0 = time.time()
tempdir = tempfile.mkdtemp('mypaint')
if not isinstance(tempdir, unicode):
tempdir = tempdir.decode(sys.getfilesystemencoding())
z = zipfile.ZipFile(filename)
logger.debug('mimetype: %r', z.read('mimetype').strip())
xml = z.read('stack.xml')
image = ET.fromstring(xml)
stack = image.find('stack')
image_w = int(image.attrib['w'])
image_h = int(image.attrib['h'])
def get_pixbuf(filename):
t1 = time.time()
try:
fp = z.open(filename, mode='r')
except KeyError:
# support for bad zip files (saved by old versions of the GIMP ORA plugin)
fp = z.open(filename.encode('utf-8'), mode='r')
logger.warning('Bad OpenRaster ZIP file. There is an utf-8 '
'encoded filename that does not have the '
'utf-8 flag set: %r', filename)
res = self._pixbuf_from_stream(fp, feedback_cb)
fp.close()
logger.debug('%.3fs loading pixbuf %s', time.time() - t1, filename)
return res
def get_layers_list(root, x=0,y=0):
res = []
for item in root:
if item.tag == 'layer':
if 'x' in item.attrib:
item.attrib['x'] = int(item.attrib['x']) + x
if 'y' in item.attrib:
item.attrib['y'] = int(item.attrib['y']) + y
res.append(item)
elif item.tag == 'stack':
stack_x = int( item.attrib.get('x', 0) )
stack_y = int( item.attrib.get('y', 0) )
res += get_layers_list(item, stack_x, stack_y)
else:
logger.warning('ignoring unsupported tag %r', item.tag)
return res
self.clear() # this leaves one empty layer
no_background = True
selected_layer = None
for layer in get_layers_list(stack):
a = layer.attrib
if 'background_tile' in a:
assert no_background
try:
logger.debug("background tile: %r", a['background_tile'])
self.set_background(get_pixbuf(a['background_tile']))
no_background = False
continue
except tiledsurface.BackgroundError, e:
logger.warning('ORA background tile not usable: %r', e)
src = a.get('src', '')
if not src.lower().endswith('.png'):
logger.warning('Ignoring non-png layer %r', src)
continue
name = a.get('name', '')
x = int(a.get('x', '0'))
y = int(a.get('y', '0'))
opac = float(a.get('opacity', '1.0'))
compositeop = str(a.get('composite-op', DEFAULT_COMPOSITE_OP))
if compositeop not in VALID_COMPOSITE_OPS:
compositeop = DEFAULT_COMPOSITE_OP
selected = self.__xsd2bool(a.get("selected", 'false'))
locked = self.__xsd2bool(a.get("edit-locked", 'false'))
visible = not 'hidden' in a.get('visibility', 'visible')
self.add_layer(insert_idx=0, name=name)
t1 = time.time()
# extract the png form the zip into a file first
# the overhead for doing so seems to be neglegible (around 5%)
z.extract(src, tempdir)
tmp_filename = join(tempdir, src)
self.load_layer_from_png(tmp_filename, x, y, feedback_cb)
os.remove(tmp_filename)
layer = self.layers[0]
self.set_layer_opacity(helpers.clamp(opac, 0.0, 1.0), layer)
self.set_layer_compositeop(compositeop, layer)
self.set_layer_visibility(visible, layer)
self.set_layer_locked(locked, layer)
if selected:
selected_layer = layer
logger.debug('%.3fs loading and converting layer png',
time.time() - t1)
# strokemap
fname = a.get('mypaint_strokemap_v2', None)
if fname:
sio = StringIO(z.read(fname))
layer.load_strokemap_from_file(sio, x, y)
sio.close()
if len(self.layers) == 1:
# no assertion (allow empty docuements)
logger.error('Could not load any layer, docuement is empty.')
if len(self.layers) > 1:
# remove the still present initial empty top layer
self.select_layer(len(self.layers)-1)
self.remove_layer()
# this leaves the topmost layer selected
try:
ani_data = z.read('animation.xsheet')
self.ani.str_to_xsheet(ani_data)
except KeyError:
self.ani.load_xsheet(filename)
if selected_layer is not None:
for i, layer in zip(range(len(self.layers)), self.layers):
if layer is selected_layer:
self.select_layer(i)
break
# Set the frame size to that saved in the image.
self.update_frame(x=0, y=0, width=image_w, height=image_h,
user_initiated=False)
# Enable frame if the saved image size is something other than the
# calculated bounding box. Goal: if the user saves an "infinite
# canvas", it loads as an infinite canvas.
bbox_c = helpers.Rect(x=0, y=0, w=image_w, h=image_h)
bbox = self.get_bbox()
frame_enab = not (bbox_c==bbox or bbox.empty() or bbox_c.empty())
self.set_frame_enabled(frame_enab, user_initiated=False)
z.close()
# remove empty directories created by zipfile's extract()
for root, dirs, files in os.walk(tempdir, topdown=False):
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(tempdir)
logger.info('%.3fs load_ora total', time.time() - t0)
0
Example 67
def render_tikz(self,tikz,libs='',stringsubst=False):
hashkey = tikz.encode('utf-8')
fname = 'tikz-%s.png' % (sha(hashkey).hexdigest())
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, '_images', fname)
if path.isfile(outfn):
return relfn
if hasattr(self.builder, '_tikz_warned'):
return None
ensuredir(path.dirname(outfn))
curdir = getcwd()
latex = DOC_HEAD % libs
latex += self.builder.config.tikz_latex_preamble
if stringsubst:
tikz = tikz % {'wd': curdir}
latex += DOC_BODY % tikz
if isinstance(latex, str):
latex = latex.encode('utf-8')
if not hasattr(self.builder, '_tikz_tempdir'):
tempdir = self.builder._tikz_tempdir = tempfile.mkdtemp()
else:
tempdir = self.builder._tikz_tempdir
chdir(tempdir)
tf = open('tikz.tex', 'w')
tf.write(latex)
tf.close()
try:
try:
p = Popen(['pdflatex', '--interaction=nonstopmode', 'tikz.tex'],
stdout=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('LaTeX command cannot be run')
self.builder._tikz_warned = True
return None
finally:
chdir(curdir)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise TikzExtError('Error (tikz extension): latex exited with error:\n'
'[stderr]\n%s\n[stdout]\n%s' % (stderr, stdout))
chdir(tempdir)
# the following does not work for pdf patterns
# p1 = Popen(['convert', '-density', '120', '-colorspace', 'rgb',
# '-trim', 'tikz.pdf', outfn], stdout=PIPE, stderr=PIPE)
# stdout, stderr = p1.communicate()
try:
p = Popen(['pdftoppm', '-r', '120', 'tikz.pdf', 'tikz'],
stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno != ENOENT: # No such file or directory
raise
self.builder.warn('pdftoppm command cannot be run')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
stdout, stderr = p.communicate()
if p.returncode != 0:
self.builder._tikz_warned = True
raise TikzExtError('Error (tikz extension): pdftoppm exited with error:'
'\n[stderr]\n%s\n[stdout]\n%s' % (stderr, stdout))
if self.builder.config.tikz_proc_suite == 'ImageMagick':
convert_args = []
if self.builder.config.tikz_transparent:
convert_args = ['-fuzz', '2%', '-transparent', 'white']
try:
p1 = Popen(['convert', '-trim'] + convert_args +
['tikz-1.ppm', outfn],
stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno != ENOENT: # No such file or directory
raise
self.builder.warn('convert command cannot be run')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
stdout, stderr = p1.communicate()
if p1.returncode != 0:
self.builder._tikz_warned = True
chdir(curdir)
raise TikzExtError('Error (tikz extension): convert exited with '
'error:\n[stderr]\n%s\n[stdout]\n%s'
% (stderr, stdout))
elif self.builder.config.tikz_proc_suite == 'Netpbm':
try:
p1 = Popen(['pnmcrop', 'tikz-1.ppm'], stdout=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('pnmcrop command cannot be run:')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
pnm_args = []
if self.builder.config.tikz_transparent:
pnm_args = ['-transparent', 'white']
try:
p2 = Popen(['pnmtopng'] + pnm_args, stdin=p1.stdout,
stdout=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('pnmtopng command cannot be run:')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
pngdata, stderr2 = p2.communicate()
dummy, stderr1 = p1.communicate()
if p1.returncode != 0:
self.builder._tikz_warned = True
raise TikzExtError('Error (tikz extension): pnmcrop exited with '
'error:\n[stderr]\n%s' % (stderr1))
if p2.returncode != 0:
self.builder._tikz_warned = True
raise TikzExtError('Error (tikz extension): pnmtopng exited with '
'error:\n[stderr]\n%s' % (stderr2))
f = open(outfn,'wb')
f.write(pngdata)
f.close()
else:
self.builder._tikz_warned = True
chdir(curdir)
raise TikzExtError('Error (tikz extension): Invalid configuration '
'value for tikz_proc_suite')
chdir(curdir)
return relfn
0
Example 68
Project: pip-update-requirements Source File: install.py
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.download_dir:
warnings.warn(
"pip install --download has been deprecated and will be "
"removed in the future. Pip now has a download command that "
"should be used instead.",
RemovedInPip10Warning,
)
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
require_hashes=options.require_hashes,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
prefix=options.prefix_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
0
Example 69
Project: pystan Source File: model.py
def vb(self, data=None, pars=None, iter=10000,
seed=None, init='random', sample_file=None, diagnostic_file=None, verbose=False,
algorithm=None, **kwargs):
"""Call Stan's variational Bayes methods.
Parameters
----------
data : dict
A Python dictionary providing the data for the model. Variables
for Stan are stored in the dictionary as expected. Variable
names are the keys and the values are their associated values.
Stan only accepts certain kinds of values; see Notes.
pars : list of string, optional
A list of strings indicating parameters of interest. By default
all parameters specified in the model will be stored.
seed : int or np.random.RandomState, optional
The seed, a positive integer for random number generation. Only
one seed is needed when multiple chains are used, as the other
chain's seeds are generated from the first chain's to prevent
dependency among random number streams. By default, seed is
``random.randint(0, MAX_UINT)``.
sample_file : string, optional
File name specifying where samples for *all* parameters and other
saved quantities will be written. If not provided, samples will be
written to a temporary file and read back in. If the folder given is
not writable, a temporary directory will be used. When there are
multiple chains, an underscore and chain number are appended to the
file name. By default do not write samples to file.
diagnostic_file : string, optional
File name specifying where diagnostics for the variational fit
will be written.
iter : int, 10000 by default
Positive integer specifying how many iterations for each chain
including warmup.
algorithm : {'meanfield', 'fullrank'}
algorithm}{One of "meanfield" and "fullrank" indicating which
variational inference algorithm is used. meanfield: mean-field
approximation; fullrank: full-rank covariance. The default is
'meanfield'.
verbose : boolean, False by default
Indicates whether intermediate output should be piped to the
console. This output may be useful for debugging.
Other optional parameters, refer to the manuals for both CmdStan
and Stan.
- `iter`: the maximum number of iterations, defaults to 10000
- `grad_samples` the number of samples for Monte Carlo enumerate of
gradients, defaults to 1.
- `elbo_samples` the number of samples for Monte Carlo estimate of ELBO
(objective function), defaults to 100. (ELBO stands for "the evidence
lower bound".)
- `eta` positive stepsize weighting parameters for variational
inference but is ignored if adaptation is engaged, which is the case
by default.
- `adapt_engaged` flag indicating whether to automatically adapt the
stepsize and defaults to True.
- `tol_rel_obj`convergence tolerance on the relative norm of the
objective, defaults to 0.01.
- `eval_elbo`, evaluate ELBO every Nth iteration, defaults to 100
- `output_samples` number of posterior samples to draw and save,
defaults to 1000.
- `adapt_iter` number of iterations to adapt the stepsize if
`adapt_engaged` is True and ignored otherwise.
Returns
-------
results : dict
Dictionary containing information related to results.
Examples
--------
>>> from pystan import StanModel
>>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}')
>>> results = m.vb()
>>> # results saved on disk in format inspired by CSV
>>> print(results['args']['sample_file'])
"""
if data is None:
data = {}
algorithms = ("meanfield", "fullrank")
algorithm = "meanfield" if algorithm is None else algorithm
if algorithm not in algorithms:
raise ValueError("Algorithm must be one of {}".format(algorithms))
fit = self.fit_class(data)
m_pars = fit._get_param_names()
p_dims = fit._get_param_dims()
if isinstance(init, Number):
init = str(init)
elif isinstance(init, Callable):
init = init()
elif not isinstance(init, Iterable) and \
not isinstance(init, string_types):
raise ValueError("Wrong specification of initial values.")
seed = pystan.misc._check_seed(seed)
stan_args = dict(iter=iter,
init=init,
chain_id=1,
seed=seed,
method="variational",
algorithm=algorithm)
if sample_file is not None:
stan_args['sample_file'] = pystan.misc._writable_sample_file(sample_file)
else:
stan_args['sample_file'] = os.path.join(tempfile.mkdtemp(), 'output.csv')
if diagnostic_file is not None:
stan_args['diagnostic_file'] = diagnostic_file
# check that arguments in kwargs are valid
valid_args = {'elbo_samples', 'eta', 'adapt_engaged', 'eval_elbo',
'grad_samples', 'output_samples', 'adapt_iter',
'tol_rel_obj'}
for arg in kwargs:
if arg not in valid_args:
raise ValueError("Parameter `{}` is not recognized.".format(arg))
stan_args.update(kwargs)
stan_args = pystan.misc._get_valid_stan_args(stan_args)
ret, sample = fit._call_sampler(stan_args)
logger.warning('Automatic Differentiation Variational Inference (ADVI) is an EXPERIMENTAL ALGORITHM.')
logger.warning('ADVI samples may be found on the filesystem in the file `{}`'.format(sample.args['sample_file'].decode('utf8')))
return OrderedDict([('args', sample.args), ('mean_pars', sample.mean_pars)])
0
Example 70
Project: anima Source File: test_version_mover.py
def setUp(self):
"""sets up the test
"""
db.setup()
db.init()
self.test_repo_path = tempfile.mkdtemp()
# create test data
self.test_repo = Repository(
name='Test Repository',
linux_path=self.test_repo_path,
windows_path=self.test_repo_path,
osx_path=self.test_repo_path
)
self.test_task_template = FilenameTemplate(
name='Task Template',
target_entity_type='Task',
path='{{project.code}}/{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
self.test_structure = Structure(
name='Test Project Structure',
templates=[self.test_task_template]
)
self.status_new = Status.query.filter_by(code='NEW').first()
self.status_wip = Status.query.filter_by(code='WIP').first()
self.status_cmpl = Status.query.filter_by(code='CMPL').first()
self.test_project_status_list = StatusList(
name='Project Statuses',
statuses=[self.status_new, self.status_wip, self.status_cmpl],
target_entity_type='Project'
)
self.test_project1 = Project(
name='Test Project 1',
code='TP1',
repository=self.test_repo,
structure=self.test_structure,
status_list=self.test_project_status_list
)
db.DBSession.add(self.test_project1)
db.DBSession.commit()
# now create tasks
# root tasks
self.test_task1 = Task(
name='Task1',
project=self.test_project1
)
self.test_task2 = Task(
name='Task2',
project=self.test_project1
)
self.test_task3 = Task(
name='Task3',
project=self.test_project1
)
# child of Task1
self.test_task4 = Task(
name='Task4',
parent=self.test_task1
)
self.test_task5 = Task(
name='Task5',
parent=self.test_task1
)
self.test_task6 = Task(
name='Task6',
parent=self.test_task1
)
# child of Task2
self.test_task7 = Task(
name='Task7',
parent=self.test_task2
)
self.test_task8 = Task(
name='Task8',
parent=self.test_task2
)
self.test_task9 = Task(
name='Task9',
parent=self.test_task2
)
# child of Task10
self.test_task10 = Task(
name='Task10',
parent=self.test_task3
)
self.test_task11 = Task(
name='Task11',
parent=self.test_task3
)
self.test_task12 = Task(
name='Task12',
parent=self.test_task3
)
db.DBSession.add_all([
self.test_task1, self.test_task2, self.test_task3, self.test_task4,
self.test_task5, self.test_task6, self.test_task7, self.test_task8,
self.test_task9, self.test_task10, self.test_task11,
self.test_task12
])
# now create versions for each of leaf tasks
# Task4
# Main
self.test_version1 = self.create_version(self.test_task4,
take_name='Main')
self.test_version2 = self.create_version(self.test_task4,
take_name='Main')
self.test_version3 = self.create_version(self.test_task4,
take_name='Main')
# Take1
self.test_version4 = self.create_version(self.test_task4,
take_name='Take1')
self.test_version5 = self.create_version(self.test_task4,
take_name='Take1')
self.test_version6 = self.create_version(self.test_task4,
take_name='Take1')
# Take2
self.test_version7 = self.create_version(self.test_task4,
take_name='Take2')
self.test_version8 = self.create_version(self.test_task4,
take_name='Take2')
self.test_version9 = self.create_version(self.test_task4,
take_name='Take2')
# Task5
# Main
self.test_version10 = self.create_version(self.test_task5,
take_name='Main')
self.test_version11 = self.create_version(self.test_task5,
take_name='Main')
self.test_version12 = self.create_version(self.test_task5,
take_name='Main')
# Take1
self.test_version13 = self.create_version(self.test_task5,
take_name='Take1')
self.test_version14 = self.create_version(self.test_task5,
take_name='Take1')
self.test_version15 = self.create_version(self.test_task5,
take_name='Take1')
# Take2
self.test_version16 = self.create_version(self.test_task5,
take_name='Take2')
self.test_version17 = self.create_version(self.test_task5,
take_name='Take2')
self.test_version18 = self.create_version(self.test_task5,
take_name='Take2')
# Task6
# Main
self.test_version19 = self.create_version(self.test_task6,
take_name='Main')
self.test_version20 = self.create_version(self.test_task6,
take_name='Main')
self.test_version21 = self.create_version(self.test_task6,
take_name='Main')
# Take1
self.test_version22 = self.create_version(self.test_task6,
take_name='Take1')
self.test_version23 = self.create_version(self.test_task6,
take_name='Take1')
self.test_version24 = self.create_version(self.test_task6,
take_name='Take1')
# Take2
self.test_version25 = self.create_version(self.test_task6,
take_name='Take2')
self.test_version26 = self.create_version(self.test_task6,
take_name='Take2')
self.test_version27 = self.create_version(self.test_task6,
take_name='Take2')
# Task7
# Main
self.test_version28 = self.create_version(self.test_task7,
take_name='Main')
self.test_version29 = self.create_version(self.test_task7,
take_name='Main')
self.test_version30 = self.create_version(self.test_task7,
take_name='Main')
# Take1
self.test_version31 = self.create_version(self.test_task7,
take_name='Take1')
self.test_version32 = self.create_version(self.test_task7,
take_name='Take1')
self.test_version33 = self.create_version(self.test_task7,
take_name='Take1')
# Take2
self.test_version34 = self.create_version(self.test_task7,
take_name='Take2')
self.test_version35 = self.create_version(self.test_task7,
take_name='Take2')
self.test_version36 = self.create_version(self.test_task7,
take_name='Take2')
# Task8 - will have no versions
# Task9 - it is a destination task with versions
# Main
self.test_version37 = self.create_version(self.test_task9,
take_name='Main')
self.test_version38 = self.create_version(self.test_task9,
take_name='Main')
self.test_version39 = self.create_version(self.test_task9,
take_name='Main')
# Take1 - an existing take
self.test_version40 = self.create_version(self.test_task9,
take_name='Take1')
self.test_version41 = self.create_version(self.test_task9,
take_name='Take1')
self.test_version42 = self.create_version(self.test_task9,
take_name='Take1')
# Take3 - a non existing take
self.test_version43 = self.create_version(self.test_task9,
take_name='Take1')
self.test_version44 = self.create_version(self.test_task9,
take_name='Take1')
self.test_version45 = self.create_version(self.test_task9,
take_name='Take1')
if not QtGui.QApplication.instance():
self.app = QtGui.QApplication(sys.argv)
else:
# self.app = QtGui.qApp
self.app = QtGui.QApplication.instance()
self.dialog = VersionMover()
0
Example 71
Project: zerocloud Source File: test_queue.py
def do_setup(the_object_server):
utils.HASH_PATH_SUFFIX = 'endcap'
global _testdir, _test_servers, _test_sockets, \
_orig_container_listing_limit, _test_coros, _orig_SysLogHandler, \
_orig_POLICIES, _test_POLICIES
_orig_POLICIES = storage_policy._POLICIES
_orig_SysLogHandler = utils.SysLogHandler
utils.SysLogHandler = mock.MagicMock()
# Since we're starting up a lot here, we're going to test more than
# just chunked puts; we're also going to test parts of
# proxy_server.Application we couldn't get to easily otherwise.
_testdir = \
os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
mkdirs(_testdir)
rmtree(_testdir)
mkdirs(os.path.join(_testdir, 'sda1'))
mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
mkdirs(os.path.join(_testdir, 'sdb1'))
mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false',
'allowed_headers': 'content-encoding, x-object-manifest, '
'content-disposition, foo',
'disable_fallocate': 'true',
'allow_versions': 'True',
'zerovm_maxoutput': 1024 * 1024 * 10}
prolis = listen(('localhost', 0))
acc1lis = listen(('localhost', 0))
acc2lis = listen(('localhost', 0))
con1lis = listen(('localhost', 0))
con2lis = listen(('localhost', 0))
obj1lis = listen(('localhost', 0))
obj2lis = listen(('localhost', 0))
_test_sockets = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
account_devs = [
{'port': acc1lis.getsockname()[1]},
{'port': acc2lis.getsockname()[1]},
]
write_fake_ring(account_ring_path, *account_devs)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
container_devs = [
{'port': con1lis.getsockname()[1]},
{'port': con2lis.getsockname()[1]},
]
write_fake_ring(container_ring_path, *container_devs)
storage_policy._POLICIES = StoragePolicyCollection([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)])
obj_rings = {
0: ('sda1', 'sdb1'),
1: ('sdc1', 'sdd1'),
2: ('sde1', 'sdf1'),
}
for policy_index, devices in obj_rings.items():
policy = POLICIES[policy_index]
dev1, dev2 = devices
obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
obj_devs = [
{'port': obj1lis.getsockname()[1], 'device': dev1},
{'port': obj2lis.getsockname()[1], 'device': dev2},
]
write_fake_ring(obj_ring_path, *obj_devs)
prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(),
logger=debug_logger('proxy'))
for policy in POLICIES:
# make sure all the rings are loaded
prosrv.get_object_ring(policy.idx)
# don't loose this one!
_test_POLICIES = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
conf, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
conf, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
conf, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
queuesrv = queue.QueueMiddleware(prosrv, conf,
logger=prosrv.logger)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(queuesrv, conf,
logger=prosrv.logger)
prospa = spawn(wsgi.server, prolis, logging_prosv, nl)
acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl)
acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl)
con1spa = spawn(wsgi.server, con1lis, con1srv, nl)
con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
_test_servers = \
(queuesrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
# Create account
ts = normalize_timestamp(time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT', '/a',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
0
Example 72
Project: zest.releaser Source File: functional.py
def setup(test):
# Reset constants to original settings:
utils.AUTO_RESPONSE = False
utils.TESTMODE = False
partstestdir = os.getcwd() # Buildout's test run in parts/test
test.orig_dir = partstestdir
test.tempdir = tempfile.mkdtemp(prefix='testtemp')
test.orig_argv = sys.argv[1:]
sys.argv[1:] = []
# Monkey patch sys.exit
test.orig_exit = sys.exit
def _exit(code=None):
msg = "SYSTEM EXIT (code=%s)" % code
raise RuntimeError(msg)
sys.exit = _exit
# Monkey patch urllib for pypi access mocking.
test.orig_urlopen = urllib2.urlopen
test.mock_pypi_available = []
def _make_mock_urlopen(mock_pypi_available):
def _mock_urlopen(url):
# print "Mock opening", url
package = url.replace('https://pypi.python.org/simple/', '')
if package not in mock_pypi_available:
raise HTTPError(
url, 404,
'HTTP Error 404: Not Found (%s does not have any releases)'
% package, None, None)
else:
answer = ' '.join(mock_pypi_available)
return StringIO(answer)
return _mock_urlopen
urllib2.urlopen = _make_mock_urlopen(test.mock_pypi_available)
# Extract example project
example_tar = pkg_resources.resource_filename(
'zest.releaser.tests', 'example.tar')
tf = tarfile.TarFile(example_tar)
tf.extractall(path=test.tempdir)
sourcedir = os.path.join(test.tempdir, 'tha.example')
# Init svn repo.
repodir = os.path.join(test.tempdir, 'svnrepo')
# With newer svn versions (1.8), we need to add the --compatible-version
# argument, because some 'git svn' versions cannot handle higher versions.
# You get this error when doing 'git svn clone':
# "Expected FS format between '1' and '4'; found format '6'"
# But on svn 1.6, this option is not available so it crashes. So we try.
result = execute_command(
'svnadmin create --compatible-version=1.6 %s' % repodir)
if Fore.RED in result:
execute_command('svnadmin create %s' % repodir)
repo_url = 'file://' + repodir # TODO: urllib or so for windows
# Import example project
execute_command('svn mkdir %s/tha.example -m "mkdir"' % repo_url)
execute_command('svn mkdir %s/tha.example/tags -m "mkdir"' % repo_url)
execute_command(
'svn import %s %s/tha.example/trunk -m "import"' % (sourcedir,
repo_url))
# Subversion checkout
svnsourcedir = os.path.join(test.tempdir, 'tha.example-svn')
execute_command(
'svn co %s/tha.example/trunk %s' % (repo_url, svnsourcedir))
execute_command(
'svn propset svn:ignore "tha.example.egg-info *.pyc" %s/src ' %
svnsourcedir)
execute_command('svn up %s' % svnsourcedir)
execute_command('svn commit %s -m "ignoring egginfo"' % svnsourcedir)
# Mercurial initialization
hgsourcedir = os.path.join(test.tempdir, 'tha.example-hg')
shutil.copytree(sourcedir, hgsourcedir)
execute_command("hg init %s" % hgsourcedir)
with open(os.path.join(hgsourcedir, '.hgignore'), 'wb') as f:
f.write('tha.example.egg-info\n\\.pyc$\n'.encode('utf-8'))
execute_command("hg add %s" % hgsourcedir)
execute_command("hg commit -m 'init' %s" % hgsourcedir)
# Bazaar initialization
bzrsourcedir = os.path.join(test.tempdir, 'tha.example-bzr')
shutil.copytree(sourcedir, bzrsourcedir)
execute_command("bzr init %s" % bzrsourcedir)
with open(os.path.join(bzrsourcedir, '.bzrignore'), 'w') as f:
f.write('tha.example.egg-info\n*.pyc\n')
execute_command("bzr add %s" % bzrsourcedir)
execute_command("bzr commit -m 'init' %s" % bzrsourcedir)
# Git initialization
gitsourcedir = os.path.join(test.tempdir, 'tha.example-git')
shutil.copytree(sourcedir, gitsourcedir)
os.chdir(gitsourcedir)
execute_command("git init")
with open(os.path.join(gitsourcedir, '.gitignore'), 'w') as f:
f.write('tha.example.egg-info\n*.pyc\n')
execute_command("git add .")
execute_command("git commit -a -m 'init'")
os.chdir(test.orig_dir)
# Git svn initialization
gitsvnsourcedir = os.path.join(test.tempdir, 'tha.example-gitsvn')
execute_command(
'git svn clone -s %s/tha.example %s' % (repo_url, gitsvnsourcedir))
os.chdir(test.orig_dir)
def svnhead(*filename_parts):
filename = os.path.join(svnsourcedir, *filename_parts)
with open(filename) as f:
lines = f.readlines()
for line in lines[:5]:
print(line.strip())
def hghead(*filename_parts):
filename = os.path.join(hgsourcedir, *filename_parts)
with open(filename) as f:
lines = f.readlines()
for line in lines[:5]:
print(line.strip())
def bzrhead(*filename_parts):
filename = os.path.join(bzrsourcedir, *filename_parts)
with open(filename) as f:
lines = f.readlines()
for line in lines[:5]:
print(line.strip())
def githead(*filename_parts):
filename = os.path.join(gitsourcedir, *filename_parts)
with open(filename) as f:
lines = f.readlines()
for line in lines[:5]:
print(line.strip())
def add_changelog_entry():
# Replace '- Nothing changed yet.' by a different entry.
with open('CHANGES.txt') as f:
orig_changes = f.read()
new_changes = orig_changes.replace(
NOTHING_CHANGED_YET, '- Brown bag release.')
with open('CHANGES.txt', 'w') as f:
f.write(new_changes)
test.globs.update({'unicode_literals': unicode_literals,
'tempdir': test.tempdir,
'repo_url': repo_url,
'svnsourcedir': svnsourcedir,
'hgsourcedir': hgsourcedir,
'bzrsourcedir': bzrsourcedir,
'gitsourcedir': gitsourcedir,
'gitsvnsourcedir': gitsvnsourcedir,
'svnhead': svnhead,
'hghead': hghead,
'bzrhead': bzrhead,
'githead': githead,
'mock_pypi_available': test.mock_pypi_available,
'add_changelog_entry': add_changelog_entry,
})
0
Example 73
Project: storjnode Source File: queued_file_transfer.py
def test_queued():
from crochet import setup
setup()
# Alice sample node.
alice_wallet = BtcTxStore(testnet=False, dryrun=True)
alice_wif = alice_wallet.create_key()
alice_node_id = address_to_node_id(alice_wallet.get_address(alice_wif))
alice_dht = pyp2p.dht_msg.DHT(
node_id=alice_node_id,
networking=0
)
alice = FileTransfer(
pyp2p.net.Net(
net_type="direct",
node_type="passive",
nat_type="preserving",
passive_port=63400,
dht_node=alice_dht,
wan_ip="8.8.8.8",
debug=1
),
BandwidthLimit(),
wif=alice_wif,
store_config={tempfile.mkdtemp(): None},
)
# Bob sample node.
bob_wallet = BtcTxStore(testnet=False, dryrun=True)
bob_wif = bob_wallet.create_key()
bob_node_id = address_to_node_id(bob_wallet.get_address(bob_wif))
bob_dht = pyp2p.dht_msg.DHT(
node_id=bob_node_id,
networking=0
)
bob = FileTransfer(
pyp2p.net.Net(
net_type="direct",
node_type="passive",
nat_type="preserving",
passive_port=63401,
dht_node=bob_dht,
wan_ip="8.8.8.8",
debug=1
),
BandwidthLimit(),
wif=bob_wif,
store_config={tempfile.mkdtemp(): None}
)
# Simulate Alice + Bob "connecting"
alice_dht.add_relay_link(bob_dht)
bob_dht.add_relay_link(alice_dht)
# Accept all transfers.
def accept_handler(contract_id, src_unl, data_id, file_size):
return 1
# Add accept handler.
alice.handlers["accept"].add(accept_handler)
bob.handlers["accept"].add(accept_handler)
# Create file we're suppose to be uploading.
data_id = ("5feceb66ffc86f38d952786c6d696c"
"79c2dbc239dd4e91b46729d73a27fb57e9")
path = os.path.join(list(alice.store_config)[0], data_id)
if not os.path.exists(path):
with open(path, "w") as fp:
fp.write("0")
# Alice wants to upload data to Bob.
upload_contract_id = alice.data_request(
"download",
data_id,
0,
bob.net.unl.value
)
# Delete source file.
def callback_builder(path, alice, bob, data_id):
def callback(client, contract_id, con):
print("Upload succeeded")
print("Removing content and downloading back")
os.remove(path)
# Fix transfers.
bob.handlers["complete"] = []
# Synchronize cons and check con.unl.
time.sleep(1)
clients = {"alice": alice, "bob": bob}
for client in list({"alice": alice, "bob": bob}):
print()
print(client)
clients[client].net.synchronize()
nodes_out = clients[client].net.outbound
nodes_in = clients[client].net.inbound
for node in nodes_out + nodes_in:
print(node["con"].unl)
print(clients[client].cons)
# Queued transfer:
download_contract_id = alice.data_request(
"upload",
data_id,
0,
bob.net.unl.value
)
print("Download contract ID =")
print(download_contract_id)
# Indicate Bob's download succeeded.
def alice_callback(val):
print("Download succeeded")
global queue_succeeded
queue_succeeded = 1
def alice_errback(val):
print("Download failed! Error:")
print(val)
# Hook upload from bob.
d = alice.defers[download_contract_id]
d.addCallback(alice_callback)
d.addErrback(alice_errback)
return callback
# Register callback for bob (when he's downloaded the data.)
bob.handlers["complete"] = [
callback_builder(path, alice, bob, data_id)
]
# d = alice.defers[upload_contract_id]
# d.addCallback(callback_builder(path, alice, bob, data_id))
# Main event loop.
timeout = time.time() + 40
while not queue_succeeded and time.time() < timeout:
for client in [alice, bob]:
if client == alice:
_log.debug("Alice")
else:
_log.debug("Bob")
process_transfers(client)
time.sleep(1)
if not queue_succeeded:
print("\a")
for client in [alice, bob]:
client.net.stop()
assert(queue_succeeded == 1)
0
Example 74
Project: faf Source File: c2p.py
def run(self, cmdline, db):
build_ids = []
missing = []
self.log_info("Executing eu-unstrip")
child = safe_popen("eu-unstrip", "-n", "--core", cmdline.COREDUMP)
if child is None:
self.log_error("Failed to execute eu-unstrip")
return 1
for line in child.stdout.splitlines():
match = Coredump2Packages.UNSTRIP_LINE_PARSER.match(line)
if not match:
self.log_warn("Unable to parse line: {0}".format(line))
continue
if not all(c in string.printable for c in line):
self.log_warn("Skipping line with non-printable characters")
self.log_debug(line)
continue
if match.group(2):
if match.group(3).startswith("/"):
build_ids.append((match.group(2), match.group(3)))
elif (match.group(5) != "-" and
not match.group(5).startswith("[")):
build_ids.append((match.group(2), match.group(5)))
else:
build_ids.append((match.group(2), None))
else:
missing.append(match.group(3))
self.log_info("Mapping build-ids into debuginfo packages")
build_id_maps = {}
debuginfos = {}
for build_id, soname in build_ids:
debug_file = self._build_id_to_debug_file(build_id)
db_packages = get_packages_by_file(db, debug_file)
db_packages = [p for p in db_packages if p.has_lob("package")]
if len(db_packages) < 1:
self.log_warn("No debuginfo found for '{0}' ({1})"
.format(build_id, soname))
continue
else:
self.log_debug("Found {0} debuginfo packages for '{1}' ({2}): "
"{3}".format(len(db_packages), build_id, soname,
[p.nvra() for p in db_packages]))
if build_id not in build_id_maps:
build_id_maps[build_id] = set()
for db_package in db_packages:
pkgname = db_package.name
pkgnvra = db_package.nvra()
build_id_maps[build_id].add(pkgname)
if pkgname not in debuginfos:
debuginfos[pkgname] = {}
if pkgnvra not in debuginfos[pkgname]:
debuginfos[pkgname][pkgnvra] = { "count": 0,
"package": db_package }
debuginfos[pkgname][pkgnvra]["count"] += 1
for build_id, debug_pkgs in build_id_maps.items():
if len(debug_pkgs) > 1:
self.log_warn("Debuginfo conflict: '{0}' is provided by {1}"
.format(build_id, debug_pkgs))
build_id_maps[build_id] = debug_pkgs.pop()
result = set()
debuginfo_maps = {}
debuginfo_packages = []
for pkgname in sorted(debuginfos):
best = { "count": -1, "package": None }
for pkgnvra in debuginfos[pkgname]:
if debuginfos[pkgname][pkgnvra]["count"] > best["count"]:
best = debuginfos[pkgname][pkgnvra]
if best["package"]:
basename = best["package"].build.base_package_name
if basename in Coredump2Packages.SKIP_PACKAGES:
self.log_debug("Skipping '{0}'".format(basename))
continue
self.log_debug("Picking '{0}' for '{1}' with {2} build_id "
"matches".format(best["package"].nvra(),
best["package"].name,
best["count"]))
debuginfo_packages.append(best["package"])
debuginfo_maps[best["package"].name] = best["package"]
result.add(best["package"])
else:
#paranoia - never happens
self.log_warn("Unable to determine best version of '{0}'"
.format(pkgname))
self.log_info("Getting binary packages from debuginfos")
archs = {}
db_build_ids = [dp.build.id for dp in debuginfo_packages]
postprocess = set()
for build_id, soname in build_ids:
if build_id not in build_id_maps:
continue
if soname is None:
if (build_id in build_id_maps and
isinstance(build_id_maps[build_id], basestring) and
build_id_maps[build_id] in debuginfo_maps):
nvra = debuginfo_maps[build_id_maps[build_id]].nvra()
self.log_info("No shared object name for '{0}' ({1})"
.format(build_id, nvra))
db_build = debuginfo_maps[build_id_maps[build_id]].build
postprocess.add(db_build)
else:
debuginfo_name = build_id_maps[build_id]
if debuginfo_name in Coredump2Packages.SKIP_PACKAGES:
self.log_debug("Skipping {0}".format(debuginfo_name))
continue
db_arch = debuginfo_maps[debuginfo_name].arch
abspath = soname.startswith("/")
db_packages = get_packages_by_file_builds_arch(db,
soname,
db_build_ids,
db_arch,
abspath=abspath)
if abspath and len(db_packages) < 1:
new_soname = usrmove(soname)
db_packages = get_packages_by_file_builds_arch(db,
new_soname,
db_build_ids,
db_arch)
if len(db_packages) < 1:
self.log_warn("Unable to find binary package for '{0}' "
"({1})".format(build_id, soname))
continue
for db_package in db_packages:
result.add(db_package)
arch = db_arch.name
if arch not in archs:
archs[arch] = 0
archs[arch] += 1
if len(postprocess) > 0 and len(archs) > 0:
self.log_info("Post-processing records without shared object name")
arch = None
archmax = 0
for archname, archcount in archs.items():
if archcount > archmax:
archmax = archcount
arch = archname
self.log_info("Determined architecture: {0}".format(arch))
for db_build in postprocess:
basename = db_build.base_package_name
if basename in Coredump2Packages.SKIP_PACKAGES:
self.log_info("Skipping {0}".format(basename))
continue
for db_package in db_build.packages:
if db_package.arch.name == arch:
self.log_debug("Picking {0} for {1}"
.format(db_package.nvra(), basename))
result.add(db_package)
link = None
tmpdir = None
if cmdline.symlink_dir:
tmpdir = tempfile.mkdtemp(dir=cmdline.symlink_dir)
link = os.symlink
elif cmdline.hardlink_dir:
tmpdir = tempfile.mkdtemp(dir=cmdline.hardlink_dir)
link = os.link
for db_package in result:
if link is None:
print(db_package.nvra())
continue
path_from = db_package.get_lob_path("package")
path_to = os.path.join(tmpdir, "{0}.rpm".format(db_package.nvra()))
try:
link(path_from, path_to)
except OSError:
if cmdline.no_copy:
continue
shutil.copy2(path_from, path_to)
if tmpdir is not None:
print tmpdir
0
Example 75
Project: WAPT Source File: install.py
def run(self, options, args):
if (
options.no_install or
options.no_download or
(options.build_dir != build_prefix) or
options.no_clean
):
logger.deprecated('1.7', 'DEPRECATION: --no-install, --no-download, --build, '
'and --no-clean are deprecated. See https://github.com/pypa/pip/issues/906.')
if options.download_dir:
options.no_install = True
options.ignore_installed = True
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError("Can not perform a '--user' install. User site-packages are not visible in this virtualenv.")
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if os.path.exists(options.target_dir) and not os.path.isdir(options.target_dir):
raise CommandError("Target path exists but is not a directory, will not continue.")
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = self._build_package_finder(options, index_urls, session)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
download_cache=options.download_cache,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(name, default_vcs=options.default_vcs))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
opts = {'name': self.name}
if options.find_links:
msg = ('You must give at least one requirement to %(name)s '
'(maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warn(msg)
return
try:
if not options.no_download:
requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)
else:
requirement_set.locate_files()
if not options.no_install and not self.bundle:
requirement_set.install(install_options, global_options, root=options.root_path)
installed = ' '.join([req.name for req in
requirement_set.successfully_installed])
if installed:
logger.notify('Successfully installed %s' % installed)
elif not self.bundle:
downloaded = ' '.join([req.name for req in
requirement_set.successfully_downloaded])
if downloaded:
logger.notify('Successfully downloaded %s' % downloaded)
elif self.bundle:
requirement_set.create_bundle(self.bundle_filename)
logger.notify('Created bundle in %s' % self.bundle_filename)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if (not options.no_clean) and ((not options.no_install) or options.download_dir):
requirement_set.cleanup_files(bundle=self.bundle)
if options.target_dir:
if not os.path.exists(options.target_dir):
os.makedirs(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
shutil.move(
os.path.join(lib_dir, item),
os.path.join(options.target_dir, item)
)
shutil.rmtree(temp_target_dir)
return requirement_set
0
Example 76
Project: cgat Source File: runZinba.py
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-f", "--input-format", dest="input_format",
type="choice",
choices=("bed", "bam"),
help="input file format [default=%default].")
parser.add_option("-s", "--fragment-size", dest="fragment_size",
type="int",
help="fragment size, used for the extension parameter "
"in Zinba [default=%default].")
parser.add_option("-m", "--zinba-mappability-dir", dest="mappability_dir",
type="string",
help="mappability_dir [default=%default].")
parser.add_option("-b", "--bit-file", dest="bit_filename",
type="string",
help="2bit genome filename [default=%default].")
parser.add_option("-c", "--control-filename", dest="control_filename",
type="string",
help="filename of input/control data in bed format "
"[default=%default].")
parser.add_option("-i", "--zinba-index-dir", dest="index_dir", type="string",
help="index directory [default=%default].")
parser.add_option("-t", "--threads", dest="threads", type="int",
help="number of threads to use [default=%default].")
parser.add_option("-q", "--fdr-threshold", dest="fdr_threshold",
type="float",
help="fdr threshold [default=%default].")
parser.add_option("-a", "--zinba-alignability-threshold",
dest="alignability_threshold", type="int",
help="alignability threshold [default=%default].")
parser.add_option("-p", "--aggregate-by-contig", dest="per_contig",
action="store_true",
help="run analysis per chromosome [default=%default]")
parser.add_option("-w", "--temp-dir", dest="tempdir", type="string",
help="use existing directory as temporary directory "
"[default=%default].")
parser.add_option("--keep-temp", dest="keep_temp", action="store_true",
help="keep temporary directory [default=%default]")
parser.add_option("--action", dest="action", type="choice",
choices=("full", "count", "predict", "model"),
help="action to perform [default=%default]")
parser.add_option("--zinba-improvement", dest="improvement", type="float",
help="relative improvement of likelihood until "
"convergence [default=%default]")
parser.add_option("--min-insert-size", dest="min_insert_size", type="int",
help="minimum insert size for paired end data "
"[default=%default]")
parser.add_option("--max-insert-size", dest="max_insert_size", type="int",
help="maximum insert size for paired end data "
"[default=%default]")
parser.set_defaults(
input_format="bed",
fragment_size=200,
mappability_dir=None,
threads=1,
alignability_threshold=1,
bit_filename=None,
fdr_threshold=0.05,
tempdir=None,
winsize=250,
offset=125,
cnvWinSize=1e+05,
cnvOffset=2500,
per_contig=False,
keep_temp=False,
min_insert_size=0,
max_insert_size=1000,
filelist="files.list",
selectchr="chr19",
action="full",
improvement=0.00001,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(args) != 2:
raise ValueError(
"please specify a filename with sample data and an output file")
filename_sample, filename_output = args[0], args[1]
filename_sample = os.path.abspath(filename_sample)
filename_output = os.path.abspath(filename_output)
if options.control_filename:
filename_control = os.path.abspath(options.control_filename)
else:
filename_control = None
# load Zinba
R.library('zinba')
if not options.tempdir:
tmpdir = tempfile.mkdtemp()
else:
tmpdir = options.tempdir
E.info("changing to temporary directory %s" % tmpdir)
os.chdir(tmpdir)
if options.input_format == "bam":
E.info("converting bam files to bed")
if not os.path.exists(os.path.join(tmpdir, "sample.bed")):
filename_sample = bamToBed(
filename_sample,
os.path.join(tmpdir, "sample.bed"))
else:
E.info("using existing file %(tmpdir)s/sample.bed" %
locals())
filename_sample = os.path.join(
tmpdir, "sample.bed")
if filename_control:
if not os.path.exists(os.path.join(tmpdir, "control.bed")):
filename_control = bamToBed(
filename_control,
os.path.join(tmpdir, "control.bed"))
else:
E.info("using existing file %(tmpdir)s/control.bed" %
locals())
filename_control = os.path.join(
tmpdir, "control.bed")
fragment_size = options.fragment_size
threads = options.threads
bit_filename = options.bit_filename
mappability_dir = options.mappability_dir
fdr_threshold = options.fdr_threshold
tol = options.improvement
contigs = E.run(
"twoBitInfo %(bit_filename)s %(tmpdir)s/contig_sizes" % locals())
contig2size = dict(
[x.split() for x in IOTools.openFile(
os.path.join(tmpdir, "contig_sizes"))])
outdir = filename_output + "_files"
E.info('saving intermediate results in %s' % outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
filelist = os.path.join(outdir, filename_output + ".list")
modelfile = os.path.join(outdir, filename_output + ".model")
winfile = os.path.join(outdir, filename_output + ".wins")
winSize = 250
offset = 125
cnvWinSize = 100000
cnvOffset = 0
winGap = 0
peakconfidence = 1.0 - fdr_threshold
selectchr = options.selectchr
if not os.path.exists(os.path.join(tmpdir, "basecount")):
E.info("computing counts")
R('''basealigncount(inputfile='%(filename_sample)s',
outputfile='%(tmpdir)s/basecount',
extension=%(fragment_size)i,
filetype='bed',
twoBitFile='%(bit_filename)s' )
''' % locals())
else:
E.info("using existing counts")
# tried incremental updates
# for contig, size in contig2size.iteritems():
# for size in
# fn = os.path.join( tmpdir, "sample_%(contig)s_win%(size)ibp_offset(offset)ibp.txt" % locals() )
if options.action == "count":
E.info("computing window counts only - saving results in %s" % outdir)
R('''buildwindowdata(
seq='%(filename_sample)s',
align='%(mappability_dir)s',
input='%(filename_control)s',
twoBit='%(bit_filename)s',
winSize=%(winSize)i,
offset=%(offset)i,
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
filelist='%(filelist)s',
filetype='bed',
extension=%(fragment_size)s,
outdir='%(outdir)s/') ''' % locals())
elif options.action == "model":
# The important option is buildwin = 0
# parameterized for broad == FALSE and input present
# see zinba.R
# model selection only on chr19.
R('''run.zinba(
filelist='%(filelist)s',
formula=NULL,formulaE=NULL,formulaZ=NULL,
outfile='%(filename_output)s',
seq='%(filename_sample)s',
input='%(filename_control)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
extension=%(fragment_size)s,
winSize=%(winSize)i,
offset=%(offset)i,
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
basecountfile='%(tmpdir)s/basecount',
buildwin=0,
threshold=%(fdr_threshold)f,
pquant=1,
peakconfidence=%(peakconfidence)f,
winGap=%(winGap)i,
tol=%(tol)f,
initmethod="count",
method="mixture",
numProc=%(threads)i,
printFullOut=1,
interaction=FALSE,
selectmodel=TRUE,
selectchr='%(selectchr)s',
selectcovs=c("input_count"),
selecttype="complete",
FDR=TRUE)''' % locals())
elif options.action == "predict":
# The important option is buildwin = 0 and selectmodel = FALSE
# parameterized for broad == FALSE and input present
# see zinba.R
# model selection only on chr19.
if not os.path.exists(modelfile):
raise OSError("model file %s does not exist" % modelfile)
E.info("reading model from %s" % modelfile)
R('''
final=read.table('%(modelfile)s', header=T, sep="\t")
final=final[final$fail==0,]
bestBIC=which.min(final$BIC)
formula=as.formula(paste("exp_count~",final$formula[bestBIC]))
formulaE=as.formula(paste("exp_count~",final$formulaE[bestBIC]))
formulaZ=as.formula(paste("exp_count~",final$formulaZ[bestBIC]))
cat("Background formula is:\n\t")
print(formula)
cat("Enrichment formula is:\n\t")
print(formulaE)
cat("Zero-inflated formula is:\n\t")
print(formulaE)
''' % locals())
E.info("predicting peaks")
R('''run.zinba(
filelist='%(filelist)s',
outfile='%(filename_output)s',
seq='%(filename_sample)s',
input='%(filename_control)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
extension=%(fragment_size)s,
winSize=%(winSize)i,
offset=%(offset)i,
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
basecountfile='%(tmpdir)s/basecount',
buildwin=0,
threshold=%(fdr_threshold)f,
pquant=1,
winGap=%(winGap)i,
initmethod="count",
selectchr='%(selectchr)s',
tol=%(tol)f,
method="mixture",
numProc=%(threads)i,
printFullOut=1,
interaction=FALSE,
selectmodel=FALSE,
formula=formula,
formulaE=formulaE,
formulaZ=formulaZ,
peakconfidence=%(peakconfidence)f,
FDR=TRUE)''' % locals())
elif options.action == "per_contig":
E.info("processing per chromosome")
for contig, size in contig2size.items():
if contig not in ("chr16",):
continue
E.info("processing contig %s" % contig)
filename_sample_contig = filename_sample + "_%s" % contig
filename_control_contig = filename_control + "_%s" % contig
if not os.path.exists(filename_output + "_files"):
os.mkdir(filename_output + "_files")
filename_output_contig = os.path.join(
filename_output + "_files", contig)
filename_basecounts_contig = os.path.join(
tmpdir, "basecount_%s" % contig)
E.run(
"grep %(contig)s < %(filename_sample)s > %(filename_sample_contig)s" % locals())
E.run(
"grep %(contig)s < %(filename_control)s > %(filename_control_contig)s" % locals())
if not os.path.exists(filename_basecounts_contig):
E.info("computing counts")
R('''basealigncount( inputfile='%(filename_sample_contig)s',
outputfile='%(filename_basecounts_contig)s',
extension=%(fragment_size)i,
filetype='bed',
twoBitFile='%(bit_filename)s' )
''' % locals())
else:
E.info("using existing counts")
# run zinba, do not build window data
R('''zinba( refinepeaks=1,
seq='%(filename_sample_contig)s',
input='%(filename_control_contig)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
outfile='%(filename_output_contig)s',
extension=%(fragment_size)s,
basecountfile='%(filename_basecounts_contig)s',
numProc=%(threads)i,
threshold=%(fdr_threshold)f,
broad=FALSE,
printFullOut=0,
interaction=FALSE,
mode='peaks',
FDR=TRUE) ''' % locals())
elif options.action == "full":
# run zinba, build window data and refine peaks
# Note that zinba() uses 'chr22' to select model
# which is not present in mouse. So call run.zinba
# directly.
R('''run.zinba(
refinepeaks=1,
buildwin=1,
seq='%(filename_sample)s',
input='%(filename_control)s',
filetype='bed',
align='%(mappability_dir)s',
twoBit='%(bit_filename)s',
outfile='%(filename_output)s',
extension=%(fragment_size)s,
winSize=%(winSize)i,
offset=%(offset)i,
basecountfile='%(tmpdir)s/basecount',
numProc=%(threads)i,
threshold=%(fdr_threshold)f,
pquant=1,
winGap=%(winGap)i,
selectchr='%(selectchr)s',
interaction=FALSE,
method="mixture",
cnvWinSize=%(cnvWinSize)i,
cnvOffset=%(cnvOffset)i,
selectmodel=TRUE,
selectcovs=c("input_count"),
selecttype="complete",
initmethod="count",
printFullOut=1,
diff=0,
pWinSize=200,
peakconfidence=%(peakconfidence)f,
FDR=TRUE) ''' % locals())
if not (options.tempdir or options.keep_temp):
shutil.rmtree(tmpdir)
# write footer and output benchmark information.
E.Stop()
0
Example 77
Project: cgat Source File: bam2wiggle.py
def main(argv=None):
"""script main.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-o", "--output-format", dest="output_format",
type="choice",
choices=(
"bedgraph", "wiggle", "bigbed",
"bigwig", "bed"),
help="output format [default=%default]")
parser.add_option("-s", "--shift-size", dest="shift", type="int",
help="shift reads by a certain amount (ChIP-Seq) "
"[%default]")
parser.add_option("-e", "--extend", dest="extend", type="int",
help="extend reads by a certain amount "
"(ChIP-Seq) [%default]")
parser.add_option("-p", "--wiggle-span", dest="span", type="int",
help="span of a window in wiggle tracks "
"[%default]")
parser.add_option("-m", "--merge-pairs", dest="merge_pairs",
action="store_true",
help="merge paired-ended reads into a single "
"bed interval [default=%default].")
parser.add_option("--scale-base", dest="scale_base", type="float",
help="number of reads/pairs to scale bigwig file to. "
"The default is to scale to 1M reads "
"[default=%default]")
parser.add_option("--scale-method", dest="scale_method", type="choice",
choices=("none", "reads",),
help="scale bigwig output. 'reads' will normalize by "
"the total number reads in the bam file that are used "
"to construct the bigwig file. If --merge-pairs is used "
"the number of pairs output will be used for "
"normalization. 'none' will not scale the bigwig file"
"[default=%default]")
parser.add_option("--max-insert-size", dest="max_insert_size",
type="int",
help="only merge if insert size less that "
"# bases. 0 turns of this filter "
"[default=%default].")
parser.add_option("--min-insert-size", dest="min_insert_size",
type="int",
help="only merge paired-end reads if they are "
"at least # bases apart. "
"0 turns of this filter. [default=%default]")
parser.set_defaults(
samfile=None,
output_format="wiggle",
shift=0,
extend=0,
span=1,
merge_pairs=None,
min_insert_size=0,
max_insert_size=0,
scale_method='none',
scale_base=1000000,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv, add_output_options=True)
if len(args) >= 1:
options.samfile = args[0]
if len(args) == 2:
options.output_filename_pattern = args[1]
if not options.samfile:
raise ValueError("please provide a bam file")
# Read BAM file using Pysam
samfile = pysam.Samfile(options.samfile, "rb")
# Create temporary files / folders
tmpdir = tempfile.mkdtemp()
E.debug("temporary files are in %s" % tmpdir)
tmpfile_wig = os.path.join(tmpdir, "wig")
tmpfile_sizes = os.path.join(tmpdir, "sizes")
# Create dictionary of contig sizes
contig_sizes = dict(list(zip(samfile.references, samfile.lengths)))
# write contig sizes
outfile_size = IOTools.openFile(tmpfile_sizes, "w")
for contig, size in sorted(contig_sizes.items()):
outfile_size.write("%s\t%s\n" % (contig, size))
outfile_size.close()
# Shift and extend only available for bigwig format
if options.shift or options.extend:
if options.output_format != "bigwig":
raise ValueError(
"shift and extend only available for bigwig output")
# Output filename required for bigwig / bigbed computation
if options.output_format == "bigwig":
if not options.output_filename_pattern:
raise ValueError(
"please specify an output file for bigwig computation.")
# Define executable to use for binary conversion
if options.output_format == "bigwig":
executable_name = "wigToBigWig"
else:
raise ValueError("unknown output format `%s`" %
options.output_format)
# check required executable file is in the path
executable = IOTools.which(executable_name)
if not executable:
raise OSError("could not find %s in path." % executable_name)
# Open outout file
outfile = IOTools.openFile(tmpfile_wig, "w")
E.info("starting output to %s" % tmpfile_wig)
else:
outfile = IOTools.openFile(tmpfile_wig, "w")
E.info("starting output to stdout")
# Set up output write functions
if options.output_format in ("wiggle", "bigwig"):
# wiggle is one-based, so add 1, also step-size is 1, so need
# to output all bases
if options.span == 1:
outf = lambda outfile, contig, start, end, val: \
outfile.write(
"".join(["%i\t%i\n" % (x, val)
for x in range(start + 1, end + 1)]))
else:
outf = SpanWriter(options.span)
elif options.output_format == "bedgraph":
# bed is 0-based, open-closed
outf = lambda outfile, contig, start, end, val: \
outfile.write("%s\t%i\t%i\t%i\n" % (contig, start, end, val))
# initialise counters
ninput, nskipped, ncontigs = 0, 0, 0
# set output file name
output_filename_pattern = options.output_filename_pattern
if output_filename_pattern:
output_filename = os.path.abspath(output_filename_pattern)
# shift and extend or merge pairs. Output temporay bed file
if options.shift > 0 or options.extend > 0 or options.merge_pairs:
# Workflow 1: convert to bed intervals and use bedtools
# genomecov to build a coverage file.
# Convert to bigwig with UCSC tools bedGraph2BigWig
if options.merge_pairs:
# merge pairs using bam2bed
E.info("merging pairs to temporary file")
counter = _bam2bed.merge_pairs(
samfile,
outfile,
min_insert_size=options.min_insert_size,
max_insert_size=options.max_insert_size,
bed_format=3)
E.info("merging results: {}".format(counter))
if counter.output == 0:
raise ValueError("no pairs output after merging")
else:
# create bed file with shifted/extended tags
shift, extend = options.shift, options.extend
shift_extend = shift + extend
counter = E.Counter()
for contig in samfile.references:
E.debug("output for %s" % contig)
lcontig = contig_sizes[contig]
for read in samfile.fetch(contig):
pos = read.pos
if read.is_reverse:
start = max(0, read.pos + read.alen - shift_extend)
else:
start = max(0, read.pos + shift)
# intervals extending beyond contig are removed
if start >= lcontig:
continue
end = min(lcontig, start + extend)
outfile.write("%s\t%i\t%i\n" % (contig, start, end))
counter.output += 1
outfile.close()
if options.scale_method == "reads":
scale_factor = float(options.scale_base) / counter.output
E.info("scaling: method=%s scale_quantity=%i scale_factor=%f" %
(options.scale_method,
counter.output,
scale_factor))
scale = "-scale %f" % scale_factor
else:
scale = ""
# Convert bed file to coverage file (bedgraph)
tmpfile_bed = os.path.join(tmpdir, "bed")
E.info("computing coverage")
# calculate coverage - format is bedgraph
statement = """bedtools genomecov -bg -i %(tmpfile_wig)s %(scale)s
-g %(tmpfile_sizes)s > %(tmpfile_bed)s""" % locals()
E.run(statement)
# Convert bedgraph to bigwig
E.info("converting to bigwig")
tmpfile_sorted = os.path.join(tmpdir, "sorted")
statement = ("sort -k 1,1 -k2,2n %(tmpfile_bed)s > %(tmpfile_sorted)s;"
"bedGraphToBigWig %(tmpfile_sorted)s %(tmpfile_sizes)s "
"%(output_filename_pattern)s" % locals())
E.run(statement)
else:
# Workflow 2: use pysam column iterator to build a
# wig file. Then convert to bigwig of bedgraph file
# with UCSC tools.
def column_iter(iterator):
start = None
end = 0
n = None
for t in iterator:
if t.pos - end > 1 or n != t.n:
if start is not None:
yield start, end, n
start = t.pos
end = t.pos
n = t.n
end = t.pos
yield start, end, n
if options.scale_method != "none":
raise NotImplementedError(
"scaling not implemented for pileup method")
# Bedgraph track definition
if options.output_format == "bedgraph":
outfile.write("track type=bedGraph\n")
for contig in samfile.references:
# if contig != "chrX": continue
E.debug("output for %s" % contig)
lcontig = contig_sizes[contig]
# Write wiggle header
if options.output_format in ("wiggle", "bigwig"):
outfile.write("variableStep chrom=%s span=%i\n" %
(contig, options.span))
# Generate pileup per contig using pysam and iterate over columns
for start, end, val in column_iter(samfile.pileup(contig)):
# patch: there was a problem with bam files and reads
# overextending at the end. These are usually Ns, but
# need to check as otherwise wigToBigWig fails.
if lcontig <= end:
E.warn("read extending beyond contig: %s: %i > %i" %
(contig, end, lcontig))
end = lcontig
if start >= end:
continue
if val > 0:
outf(outfile, contig, start, end, val)
ncontigs += 1
# Close output file
if type(outf) == type(SpanWriter):
outf.flush(outfile)
else:
outfile.flush()
E.info("finished output")
# Report counters
E.info("ninput=%i, ncontigs=%i, nskipped=%i" %
(ninput, ncontigs, nskipped))
# Convert to binary formats
if options.output_format == "bigwig":
outfile.close()
E.info("starting %s conversion" % executable)
try:
retcode = subprocess.call(
" ".join((executable,
tmpfile_wig,
tmpfile_sizes,
output_filename_pattern)),
shell=True)
if retcode != 0:
E.warn("%s terminated with signal: %i" %
(executable, -retcode))
return -retcode
except OSError as msg:
E.warn("Error while executing bigwig: %s" % msg)
return 1
E.info("finished bigwig conversion")
else:
with open(tmpfile_wig) as inf:
sys.stdout.write(inf.read())
# Cleanup temp files
shutil.rmtree(tmpdir)
E.Stop()
0
Example 78
Project: tuskar Source File: lockutils.py
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
'for method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at '
'%(path)s for method '
'"%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
LOG.debug(_('Released file lock "%(lock)s" at '
'%(path)s for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to
# cleanup the locks left behind by unit
# tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
finally:
local.strong_store.locks_held.remove(name)
return retval
return inner
return wrap
0
Example 79
@cfy.command(name='upgrade',
short_help='Upgrade a manager to a new version [manager only]')
@cfy.argument('blueprint-path')
@cfy.options.inputs
@cfy.options.validate_only
@cfy.options.skip_validations
@cfy.options.install_plugins
@cfy.options.task_retries(5)
@cfy.options.task_retry_interval(30)
@cfy.options.task_thread_pool_size()
@cfy.options.verbose()
@cfy.assert_manager_active()
@cfy.pass_logger
@cfy.pass_client(skip_version_check=True)
def upgrade(blueprint_path,
inputs,
validate_only,
skip_validations,
install_plugins,
task_retries,
task_retry_interval,
task_thread_pool_size,
logger,
client):
"""Upgrade a manager to a newer version
Note that you must supply a simple-manager-blueprint to perform
the upgrade and provide it with the relevant inputs.
`BLUEPRINT_PATH` is the path of the manager blueprint to use for upgrade.
"""
manager_ip = profile.manager_ip
verify_and_wait_for_maintenance_mode_activation(client)
if skip_validations:
# The user expects that `--skip-validations` will also ignore
# bootstrap validations and not only creation_validations
utils.add_ignore_bootstrap_validations_input(inputs)
inputs = update_inputs(inputs)
env_name = 'manager-upgrade'
# init local workflow execution environment
working_env = local.initialize_blueprint(blueprint_path,
storage=None,
install_plugins=install_plugins,
name=env_name,
inputs=inputs)
logger.info('Upgrading manager...')
put_workflow_state_file(is_upgrade=True,
key_filename=inputs['ssh_key_filename'],
user=inputs['ssh_user'],
port=inputs['ssh_port'])
if not skip_validations:
logger.info('Executing upgrade validations...')
working_env.execute(workflow='execute_operation',
parameters={
'operation':
'cloudify.interfaces.validation.creation'},
task_retries=task_retries,
task_retry_interval=task_retry_interval,
task_thread_pool_size=task_thread_pool_size)
logger.info('Upgrade validation completed successfully')
if not validate_only:
try:
logger.info('Executing manager upgrade...')
working_env.execute('install',
task_retries=task_retries,
task_retry_interval=task_retry_interval,
task_thread_pool_size=task_thread_pool_size)
except Exception as e:
msg = 'Upgrade failed! ({0})'.format(e)
error = exceptions.CloudifyCliError(msg)
error.possible_solutions = [
"Rerun upgrade: `cfy upgrade`",
"Execute rollback: `cfy rollback`"
]
raise error
manager_node = next(node for node in working_env.storage.get_nodes()
if node.id == 'manager_configuration')
upload_resources = \
manager_node.properties['cloudify'].get('upload_resources', {})
dsl_resources = upload_resources.get('dsl_resources', ())
if dsl_resources:
fetch_timeout = upload_resources.get('parameters', {}) \
.get('fetch_timeout', 30)
fabric_env = bs.build_fabric_env(manager_ip,
inputs['ssh_user'],
inputs['ssh_key_filename'],
inputs['ssh_port'])
temp_dir = tempfile.mkdtemp()
try:
logger.info('Uploading dsl resources...')
bs.upload_dsl_resources(dsl_resources,
temp_dir=temp_dir,
fabric_env=fabric_env,
retries=task_retries,
wait_interval=task_retry_interval,
timeout=fetch_timeout)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
plugin_resources = upload_resources.get('plugin_resources', ())
if plugin_resources:
logger.warn('Plugins upload is not supported for upgrade. Plugins '
'{0} will not be uploaded'
.format(plugin_resources))
logger.info('Upgrade complete')
logger.info('Manager is up at {0}'.format(
profile.manager_ip))
0
Example 80
Project: sympy Source File: preview.py
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',),
disable_viewers=('evince', 'gimp', 'superior-dvi-viewer'))
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception.
You may also enter 'file' for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if 'filename'
is unset. However, if it was set, then 'preview' writes the genereted
file to this filename instead.
There is also support for writing to a BytesIO like object, which needs
to be passed to the 'outputbuffer' argument.
>>> from io import BytesIO
>>> obj = BytesIO()
>>> preview(x + y, output='png', viewer='BytesIO',
... outputbuffer=obj)
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom docuementclass or import certain set of LaTeX packages.
>>> preamble = "\\docuementclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{docuement}"
>>> preview(x + y, output='png', preamble=preamble)
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see subprocess.Popen).
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
"sample.tex" and run the default png viewer to display the resulting
bitmap, do
>>> preview(x + y, outputTexFile="sample.tex")
"""
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
# TODO add candidates for windows to list
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
path = find_executable(candidate)
if path is not None:
viewer = path
break
else:
raise SystemError(
"No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer == "file":
if filename is None:
SymPyDeprecationWarning(feature="Using viewer=\"file\" without a "
"specified filename", deprecated_since_version="0.7.3",
useinstead="viewer=\"file\" and filename=\"desiredname\"",
issue=7018).warn()
elif viewer == "StringIO":
SymPyDeprecationWarning(feature="The preview() viewer StringIO",
useinstead="BytesIO", deprecated_since_version="0.7.4",
issue=7083).warn()
viewer = "BytesIO"
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"StringIO\"")
elif viewer == "BytesIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"BytesIO\"")
elif viewer not in special and not find_executable(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
preamble = r"""\docuementclass[12pt]{article}
\pagestyle{empty}
%s
\begin{docuement}
""" % (package_includes)
else:
if len(packages) > 0:
raise ValueError("The \"packages\" keyword must not be set if a "
"custom LaTeX preamble was specified")
latex_main = preamble + '\n%s\n\n' + r"\end{docuement}"
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
try:
workdir = tempfile.mkdtemp()
with io.open(join(workdir, 'texput.tex'), 'w', encoding='utf-8') as fh:
fh.write(unicode(latex_main) % u_decode(latex_string))
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
if not find_executable('latex'):
raise RuntimeError("latex program is not installed")
try:
check_output(['latex', '-halt-on-error', '-interaction=nonstopmode',
'texput.tex'], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'latex' exited abnormally with the following output:\n%s" %
e.output)
if output != "dvi":
defaultoptions = {
"ps": [],
"pdf": [],
"png": ["-T", "tight", "-z", "9", "--truecolor"],
"svg": ["--no-fonts"],
}
commandend = {
"ps": ["-o", "texput.ps", "texput.dvi"],
"pdf": ["texput.dvi", "texput.pdf"],
"png": ["-o", "texput.png", "texput.dvi"],
"svg": ["-o", "texput.svg", "texput.dvi"],
}
if output == "svg":
cmd = ["dvisvgm"]
else:
cmd = ["dvi" + output]
if not find_executable(cmd[0]):
raise RuntimeError("%s is not installed" % cmd[0])
try:
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions[output])
cmd.extend(commandend[output])
except KeyError:
raise SystemError("Invalid output format: %s" % output)
try:
check_output(cmd, cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s' exited abnormally with the following output:\n%s" %
(' '.join(cmd), e.output))
src = "texput.%s" % (output)
if viewer == "file":
if filename is None:
buffer = BytesIO()
with open(join(workdir, src), 'rb') as fh:
buffer.write(fh.read())
return buffer
else:
shutil.move(join(workdir,src), filename)
elif viewer == "BytesIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for preview.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(join(workdir, src), decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
config = gl.Config(double_buffer=False)
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="sympy",
resizable=False,
config=config
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
try:
check_output([viewer, src], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s %s' exited abnormally with the following output:\n%s" %
(viewer, src, e.output))
finally:
try:
shutil.rmtree(workdir) # delete directory
except OSError as e:
if e.errno != 2: # code 2 - no such file or directory
raise
0
Example 81
Project: elijah-openstack Source File: cloudlet_client.py
def request_import_basevm(server_address, token,
endpoint, glance_endpoint,
import_filepath, basevm_name):
def _create_param(filepath, image_name, image_type, disk_size, mem_size):
properties = {
"image_type": "snapshot",
"image_location": "snapshot",
CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET: "True",
CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE: image_type,
CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID: base_hashvalue,
}
param = {
"name": "%s" % image_name,
"data": open(filepath, "rb"),
"size": os.path.getsize(filepath),
"is_public": True,
"disk_format": "raw",
"container_format": "bare",
"min_disk": disk_size,
"min_ram": mem_size,
"properties": properties,
}
return param
(base_hashvalue, disk_name, memory_name, diskhash_name, memoryhash_name) = \
PackagingUtil._get_basevm_attribute(import_filepath)
# check duplicated base VM
image_list = get_list(server_address, token, endpoint, "images")
for image in image_list:
properties = image.get("metadata", None)
if properties is None or len(properties) == 0:
continue
if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \
CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
continue
base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID)
if base_sha256_uuid == base_hashvalue:
msg = "Duplicated base VM is already exists on the system\n"
msg += "Image UUID of duplicated Base VM: %s\n" % image['id']
raise CloudletClientError(msg)
# decompress files
temp_dir = mkdtemp(prefix="cloudlet-base-")
sys.stdout.write(
"Decompressing zipfile(%s) to temp dir(%s)\n" %
(import_filepath, temp_dir))
zipbase = zipfile.ZipFile(
_FileFile("file:///%s" % os.path.abspath(import_filepath)), 'r')
zipbase.extractall(temp_dir)
disk_path = os.path.join(temp_dir, disk_name)
memory_path = os.path.join(temp_dir, memory_name)
diskhash_path = os.path.join(temp_dir, diskhash_name)
memoryhash_path = os.path.join(temp_dir, memoryhash_name)
# create new flavor if nothing matches
memory_header = elijah_memory_util._QemuMemoryHeader(open(memory_path))
libvirt_xml_str = memory_header.xml
cpu_count, memory_size_mb = get_resource_size(libvirt_xml_str)
disk_gb = int(math.ceil(os.path.getsize(disk_path)/1024/1024/1024))
flavor_list = get_list(server_address, token, endpoint, "flavors")
flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count,
memory_size_mb, disk_gb)
if flavor_id == None:
flavor_name = "cloudlet-flavor-%s" % basevm_name
flavor_ref, flavor_id = create_flavor(server_address,
token,
endpoint,
cpu_count,
memory_size_mb,
disk_gb,
flavor_name)
sys.stdout.write("Create new flavor for the base VM\n")
# upload Base VM
disk_param = _create_param(disk_path, basevm_name + "-disk",
CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK,
disk_gb, memory_size_mb)
memory_param = _create_param(memory_path, basevm_name + "-memory",
CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM,
disk_gb, memory_size_mb)
diskhash_param = _create_param(diskhash_path, basevm_name + "-diskhash",
CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH,
disk_gb, memory_size_mb)
memoryhash_param = _create_param(memoryhash_path, basevm_name + "-memhash",
CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH,
disk_gb, memory_size_mb)
url = "://".join((glance_endpoint.scheme, glance_endpoint.netloc))
gclient = glance_client.Client('1', url, token=token, insecure=True)
sys.stdout.write("upload base memory to glance\n")
glance_memory = gclient.images.create(**memory_param)
sys.stdout.write("upload base disk hash to glance\n")
glance_diskhash = gclient.images.create(**diskhash_param)
sys.stdout.write("upload base memory hash to glance\n")
glance_memoryhash = gclient.images.create(**memoryhash_param)
# upload Base disk at the last to have references for other image files
glance_ref = {
CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM: glance_memory.id,
CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK_HASH: glance_diskhash.id,
CLOUDLET_TYPE.IMAGE_TYPE_BASE_MEM_HASH: glance_memoryhash.id,
CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE:
libvirt_xml_str.replace("\n", "") # API cannot send '\n'
}
disk_param['properties'].update(glance_ref)
sys.stdout.write("upload base disk to glance\n")
glance_disk = gclient.images.create(**disk_param)
# delete temp dir
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
return glance_disk
0
Example 82
def setup_experiment(debug=True, verbose=False, app=None):
"""Check the app and, if it's compatible with Wallace, freeze its state."""
print_header()
# Verify that the package is usable.
log("Verifying that directory is compatible with Wallace...")
if not verify_package(verbose=verbose):
raise AssertionError(
"This is not a valid Wallace app. " +
"Fix the errors and then try running 'wallace verify'.")
# Verify that the Postgres server is running.
try:
psycopg2.connect(database="x", user="postgres", password="nada")
except psycopg2.OperationalError, e:
if "could not connect to server" in str(e):
raise RuntimeError("The Postgres server isn't running.")
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
# Check that the demo-specific requirements are satisfied.
try:
with open("requirements.txt", "r") as f:
dependencies = f.readlines()
except:
dependencies = []
pkg_resources.require(dependencies)
# Generate a unique id for this experiment.
id = "w" + str(uuid.uuid4())[0:28]
# If the user provided an app name, use it everywhere that's user-facing.
if app:
id_long = id
id = str(app)
log("Running as experiment " + id + "...")
# Copy this directory into a temporary folder, ignoring .git
dst = os.path.join(tempfile.mkdtemp(), id)
to_ignore = shutil.ignore_patterns(
".git/*",
"*.db",
"snapshots",
"data",
"server.log"
)
shutil.copytree(os.getcwd(), dst, ignore=to_ignore)
click.echo(dst)
# Save the experiment id
with open(os.path.join(dst, "experiment_id.txt"), "w") as file:
if app:
file.write(id_long)
else:
file.write(id)
# Zip up the temporary directory and place it in the cwd.
if not debug:
log("Freezing the experiment package...")
shutil.make_archive(
os.path.join("snapshots", id + "-code"), "zip", dst)
# Change directory to the temporary folder.
cwd = os.getcwd()
os.chdir(dst)
# Check directories.
if not os.path.exists("static/scripts"):
os.makedirs("static/scripts")
if not os.path.exists("templates"):
os.makedirs("templates")
if not os.path.exists("static/css"):
os.makedirs("static/css")
# Rename experiment.py to wallace_experiment.py to aviod psiTurk conflict.
os.rename(
os.path.join(dst, "experiment.py"),
os.path.join(dst, "wallace_experiment.py"))
# Copy files into this experiment package.
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"custom.py")
shutil.copy(src, os.path.join(dst, "custom.py"))
heroku_files = [
"Procfile",
"requirements.txt",
"psiturkapp.py",
"worker.py",
"clock.py",
]
for filename in heroku_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
filename)
shutil.copy(src, os.path.join(dst, filename))
clock_on = config.getboolean('Server Parameters', 'clock_on')
# If the clock process has been disabled, overwrite the Procfile.
if not clock_on:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"heroku",
"Procfile_no_clock")
shutil.copy(src, os.path.join(dst, "Procfile"))
frontend_files = [
"static/css/wallace.css",
"static/scripts/wallace.js",
"static/scripts/reqwest.min.js",
"templates/error_wallace.html",
"templates/launch.html",
"templates/complete.html",
"static/robots.txt"
]
for filename in frontend_files:
src = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"frontend",
filename)
shutil.copy(src, os.path.join(dst, filename))
time.sleep(0.25)
os.chdir(cwd)
return (id, dst)
0
Example 83
Project: trtools Source File: rmagic.py
@skip_doctest
@magic_arguments()
@argument(
'-i', '--input', action='append',
help='Names of input variable from shell.user_ns to be assigned to R variables of the same names after calling self.pyconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-o', '--output', action='append',
help='Names of variables to be pushed from rpy2 to shell.user_ns after executing cell body and applying self.Rconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-w', '--width', type=int,
help='Width of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-h', '--height', type=int,
help='Height of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-d', '--dataframe', action='append',
help='Convert these objects to data.frames and return as structured arrays.'
)
@argument(
'-u', '--units', type=int,
help='Units of png plotting device sent as an argument to *png* in R. One of ["px", "in", "cm", "mm"].'
)
@argument(
'-p', '--pointsize', type=int,
help='Pointsize of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-b', '--bg',
help='Background of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-n', '--noreturn',
help='Force the magic to not return anything.',
action='store_true',
default=False
)
@argument(
'code',
nargs='*',
)
@needs_local_scope
@line_cell_magic
def R(self, line, cell=None, local_ns=None):
'''
Execute code in R, and pull some of the results back into the Python namespace.
In line mode, this will evaluate an expression and convert the returned value to a Python object.
The return value is determined by rpy2's behaviour of returning the result of evaluating the
final line.
Multiple R lines can be executed by joining them with semicolons::
In [9]: %R X=c(1,4,5,7); sd(X); mean(X)
Out[9]: array([ 4.25])
As a cell, this will run a block of R code, without bringing anything back by default::
In [10]: %%R
....: Y = c(2,4,3,9)
....: print(summary(lm(Y~X)))
....:
Call:
lm(formula = Y ~ X)
Residuals:
1 2 3 4
0.88 -0.24 -2.28 1.64
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.0800 2.3000 0.035 0.975
X 1.0400 0.4822 2.157 0.164
Residual standard error: 2.088 on 2 degrees of freedom
Multiple R-squared: 0.6993,Adjusted R-squared: 0.549
F-statistic: 4.651 on 1 and 2 DF, p-value: 0.1638
In the notebook, plots are published as the output of the cell.
%R plot(X, Y)
will create a scatter plot of X bs Y.
If cell is not None and line has some R code, it is prepended to
the R code in cell.
Objects can be passed back and forth between rpy2 and python via the -i -o flags in line::
In [14]: Z = np.array([1,4,5,10])
In [15]: %R -i Z mean(Z)
Out[15]: array([ 5.])
In [16]: %R -o W W=Z*mean(Z)
Out[16]: array([ 5., 20., 25., 50.])
In [17]: W
Out[17]: array([ 5., 20., 25., 50.])
The return value is determined by these rules:
* If the cell is not None, the magic returns None.
* If the cell evaluates as False, the resulting value is returned
unless the final line prints something to the console, in
which case None is returned.
* If the final line results in a NULL value when evaluated
by rpy2, then None is returned.
* No attempt is made to convert the final value to a structured array.
Use the --dataframe flag or %Rget to push / return a structured array.
* If the -n flag is present, there is no return value.
* A trailing ';' will also result in no return value as the last
value in the line is an empty string.
The --dataframe argument will attempt to return structured arrays.
This is useful for dataframes with
mixed data types. Note also that for a data.frame,
if it is returned as an ndarray, it is transposed::
In [18]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [19]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [20]: %%R -o datar
datar = datapy
....:
In [21]: datar
Out[21]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [22]: %%R -d datar
datar = datapy
....:
In [23]: datar
Out[23]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
The --dataframe argument first tries colnames, then names.
If both are NULL, it returns an ndarray (i.e. unstructured)::
In [1]: %R mydata=c(4,6,8.3); NULL
In [2]: %R -d mydata
In [3]: mydata
Out[3]: array([ 4. , 6. , 8.3])
In [4]: %R names(mydata) = c('a','b','c'); NULL
In [5]: %R -d mydata
In [6]: mydata
Out[6]:
array((4.0, 6.0, 8.3),
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
In [7]: %R -o mydata
In [8]: mydata
Out[8]: array([ 4. , 6. , 8.3])
'''
args = parse_argstring(self.R, line)
# arguments 'code' in line are prepended to
# the cell lines
if cell is None:
code = ''
return_output = True
line_mode = True
else:
code = cell
return_output = False
line_mode = False
code = ' '.join(args.code) + code
# if there is no local namespace then default to an empty dict
if local_ns is None:
local_ns = {}
if args.input:
for input in ','.join(args.input).split(','):
try:
val = local_ns[input]
except KeyError:
val = self.shell.user_ns[input]
self.r.assign(input, self.pyconverter(val))
png_argdict = dict([(n, getattr(args, n)) for n in ['units', 'height', 'width', 'bg', 'pointsize']])
png_args = ','.join(['%s=%s' % (o,v) for o, v in list(png_argdict.items()) if v is not None])
# execute the R code in a temporary directory
tmpd = tempfile.mkdtemp()
self.r('png("%s/Rplots%%03d.png",%s)' % (tmpd, png_args))
text_output = ''
if line_mode:
for line in code.split(';'):
text_result, result = self.eval(line)
text_output += text_result
if text_result:
# the last line printed something to the console so we won't return it
return_output = False
else:
text_result, result = self.eval(code)
text_output += text_result
self.r('dev.off()')
# read out all the saved .png files
images = [open(imgfile, 'rb').read() for imgfile in glob("%s/Rplots*png" % tmpd)]
# now publish the images
# mimicking IPython/zmq/pylab/backend_inline.py
fmt = 'png'
mimetypes = { 'png' : 'image/png', 'svg' : 'image/svg+xml' }
mime = mimetypes[fmt]
# publish the printed R objects, if any
display_data = []
if text_output:
display_data.append(('RMagic.R', {'text/plain':text_output}))
# flush text streams before sending figures, helps a little with output
for image in images:
# synchronization in the console (though it's a bandaid, not a real sln)
sys.stdout.flush(); sys.stderr.flush()
display_data.append(('RMagic.R', {mime: image}))
# kill the temporary directory
rmtree(tmpd)
# try to turn every output into a numpy array
# this means that output are assumed to be castable
# as numpy arrays
if args.output:
for output in ','.join(args.output).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=False)})
if args.dataframe:
for output in ','.join(args.dataframe).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=True)})
for tag, disp_d in display_data:
publish_display_data(tag, disp_d)
# this will keep a reference to the display_data
# which might be useful to other objects who happen to use
# this method
if self.cache_display_data:
self.display_cache = display_data
# if in line mode and return_output, return the result as an ndarray
if return_output and not args.noreturn:
if result != ri.NULL:
return self.Rconverter(result, dataframe=False)
0
Example 84
Project: pystan Source File: model.py
def __init__(self, file=None, charset='utf-8', model_name="anon_model",
model_code=None, stanc_ret=None, boost_lib=None,
eigen_lib=None, verbose=False, obfuscate_model_name=True,
extra_compile_args=None):
if stanc_ret is None:
stanc_ret = pystan.api.stanc(file=file,
charset=charset,
model_code=model_code,
model_name=model_name,
verbose=verbose,
obfuscate_model_name=obfuscate_model_name)
if not isinstance(stanc_ret, dict):
raise ValueError("stanc_ret must be an object returned by stanc.")
stanc_ret_keys = {'status', 'model_code', 'model_cppname',
'cppcode', 'model_name'}
if not all(n in stanc_ret_keys for n in stanc_ret):
raise ValueError("stanc_ret lacks one or more of the keys: "
"{}".format(str(stanc_ret_keys)))
elif stanc_ret['status'] != 0: # success == 0
raise ValueError("stanc_ret is not a successfully returned "
"dictionary from stanc.")
self.model_cppname = stanc_ret['model_cppname']
self.model_name = stanc_ret['model_name']
self.model_code = stanc_ret['model_code']
self.model_cppcode = stanc_ret['cppcode']
msg = "COMPILING THE C++ CODE FOR MODEL {} NOW."
logger.info(msg.format(self.model_name))
if verbose:
msg = "OS: {}, Python: {}, Cython {}".format(sys.platform,
sys.version,
Cython.__version__)
logger.info(msg)
if boost_lib is not None:
# FIXME: allow boost_lib, eigen_lib to be specified
raise NotImplementedError
if eigen_lib is not None:
raise NotImplementedError
# module_name needs to be unique so that each model instance has its own module
nonce = abs(hash((self.model_name, time.time())))
self.module_name = 'stanfit4{}_{}'.format(self.model_name, nonce)
lib_dir = tempfile.mkdtemp()
pystan_dir = os.path.dirname(__file__)
include_dirs = [
lib_dir,
pystan_dir,
os.path.join(pystan_dir, "stan", "src"),
os.path.join(pystan_dir, "stan", "lib", "stan_math_2.12.0"),
os.path.join(pystan_dir, "stan", "lib", "stan_math_2.12.0", "lib", "eigen_3.2.9"),
os.path.join(pystan_dir, "stan", "lib", "stan_math_2.12.0", "lib", "boost_1.60.0"),
os.path.join(pystan_dir, "stan", "lib", "stan_math_2.12.0", "lib", "cvodes_2.8.2", "include"),
np.get_include(),
]
model_cpp_file = os.path.join(lib_dir, self.model_cppname + '.hpp')
with io.open(model_cpp_file, 'w', encoding='utf-8') as outfile:
outfile.write(self.model_cppcode)
pyx_file = os.path.join(lib_dir, self.module_name + '.pyx')
pyx_template_file = os.path.join(pystan_dir, 'stanfit4model.pyx')
with io.open(pyx_template_file, 'r', encoding='utf-8') as infile:
s = infile.read()
template = string.Template(s)
with io.open(pyx_file, 'w', encoding='utf-8') as outfile:
s = template.safe_substitute(model_cppname=self.model_cppname)
outfile.write(s)
stan_macros = [
('BOOST_RESULT_OF_USE_TR1', None),
('BOOST_NO_DECLTYPE', None),
('BOOST_DISABLE_ASSERTS', None),
]
# compile stan models with optimization (-O2)
# (stanc is compiled without optimization (-O0) currently, see #33)
if extra_compile_args is None:
extra_compile_args = [
'-O2',
'-ftemplate-depth-256',
'-Wno-unused-function',
'-Wno-uninitialized',
]
if platform.platform().startswith('Win'):
extra_compile_args = ['/EHsc', '-DBOOST_DATE_TIME_NO_LIB']
distutils.log.set_verbosity(verbose)
extension = Extension(name=self.module_name,
language="c++",
sources=[pyx_file],
define_macros=stan_macros,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args)
cython_include_dirs = ['.', pystan_dir]
build_extension = _get_build_extension()
build_extension.extensions = cythonize([extension],
include_path=cython_include_dirs,
quiet=not verbose)
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
redirect_stderr = not verbose and pystan.misc._has_fileno(sys.stderr)
if redirect_stderr:
# silence stderr for compilation
orig_stderr = pystan.misc._redirect_stderr()
try:
build_extension.run()
finally:
if redirect_stderr:
# restore stderr
os.dup2(orig_stderr, sys.stderr.fileno())
self.module = load_module(self.module_name, lib_dir)
self.module_filename = os.path.basename(self.module.__file__)
# once the module is in memory, we no longer need the file on disk
# but we do need a copy of the file for pickling and the module name
with io.open(os.path.join(lib_dir, self.module_filename), 'rb') as f:
self.module_bytes = f.read()
shutil.rmtree(lib_dir, ignore_errors=True)
self.fit_class = getattr(self.module, "StanFit4Model")
0
Example 85
def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
"""
This method does the actual pilot submission to the Grid RB
The logic is as follows:
- If there are no available RB it return error
- If there is no VOMS extension in the proxy, return error
- It creates a temp directory
- Prepare a JDL
it has some part common to gLite and LCG (the payload description)
it has some part specific to each middleware
"""
taskQueueID = taskQueueDict['TaskQueueID']
# ownerDN = taskQueueDict['OwnerDN']
credDict = proxy.getCredentials()['Value']
ownerDN = credDict['identity']
ownerGroup = credDict[ 'group' ]
if not self.resourceBrokers:
# Since we can exclude RBs from the list, it may become empty
return S_ERROR( ERROR_RB )
# Need to get VOMS extension for the later interactions with WMS
ret = gProxyManager.getVOMSAttributes( proxy )
if not ret['OK']:
self.log.error( ERROR_VOMS, ret['Message'] )
return S_ERROR( ERROR_VOMS )
if not ret['Value']:
return S_ERROR( ERROR_VOMS )
workingDirectory = tempfile.mkdtemp( prefix = 'TQ_%s_' % taskQueueID, dir = workDir )
self.log.verbose( 'Using working Directory:', workingDirectory )
# Write JDL
retDict = self._prepareJDL( taskQueueDict, workingDirectory, pilotOptions, pilotsPerJob,
ceMask, submitPrivatePilot, privateTQ )
jdl = retDict['JDL']
pilotRequirements = retDict['Requirements']
rb = retDict['RB']
if not jdl:
try:
shutil.rmtree( workingDirectory )
except:
pass
return S_ERROR( ERROR_JDL )
# Check that there are available queues for the Job:
if self.enableListMatch:
availableCEs = []
now = Time.dateTime()
availableCEs = self.listMatchCache.get( pilotRequirements )
if availableCEs is None:
availableCEs = self._listMatch( proxy, jdl, taskQueueID, rb )
if availableCEs != False:
self.log.verbose( 'LastListMatch', now )
self.log.verbose( 'AvailableCEs ', availableCEs )
self.listMatchCache.add( pilotRequirements, self.listMatchDelay * 60,
value = availableCEs ) # it is given in minutes
if not availableCEs:
try:
shutil.rmtree( workingDirectory )
except:
pass
return S_ERROR( ERROR_CE + ' TQ: %d' % taskQueueID )
# Now we are ready for the actual submission, so
self.log.verbose( 'Submitting Pilots for TaskQueue', taskQueueID )
# FIXME: what is this?? If it goes on the super class, it is doomed
submitRet = self._submitPilot( proxy, pilotsPerJob, jdl, taskQueueID, rb )
try:
shutil.rmtree( workingDirectory )
except:
pass
if not submitRet:
return S_ERROR( 'Pilot Submission Failed for TQ %d ' % taskQueueID )
# pilotReference, resourceBroker = submitRet
submittedPilots = 0
if pilotsPerJob != 1 and len( submitRet ) != pilotsPerJob:
# Parametric jobs are used
for pilotReference, resourceBroker in submitRet:
pilotReference = self._getChildrenReferences( proxy, pilotReference, taskQueueID )
submittedPilots += len( pilotReference )
pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
ownerGroup, resourceBroker, self.gridMiddleware,
pilotRequirements )
else:
for pilotReference, resourceBroker in submitRet:
pilotReference = [pilotReference]
submittedPilots += len( pilotReference )
pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
ownerGroup, resourceBroker, self.gridMiddleware, pilotRequirements )
# add some sleep here
time.sleep( 0.1 * submittedPilots )
if pilotsToSubmit > pilotsPerJob:
# Additional submissions are necessary, need to get a new token and iterate.
pilotsToSubmit -= pilotsPerJob
result = gProxyManager.requestToken( ownerDN, ownerGroup, max( pilotsToSubmit, self.maxJobsInFillMode ) )
if not result[ 'OK' ]:
self.log.error( ERROR_TOKEN, result['Message'] )
result = S_ERROR( ERROR_TOKEN )
result['Value'] = submittedPilots
return result
( token, numberOfUses ) = result[ 'Value' ]
for option in pilotOptions:
if option.find( '-o /Security/ProxyToken=' ) == 0:
pilotOptions.remove( option )
pilotOptions.append( '-o /Security/ProxyToken=%s' % token )
pilotsPerJob = max( 1, min( pilotsPerJob, int( numberOfUses / self.maxJobsInFillMode ) ) )
result = self._submitPilots( workDir, taskQueueDict, pilotOptions,
pilotsToSubmit, ceMask,
submitPrivatePilot, privateTQ,
proxy, pilotsPerJob )
if not result['OK']:
if 'Value' not in result:
result['Value'] = 0
result['Value'] += submittedPilots
return result
submittedPilots += result['Value']
return S_OK( submittedPilots )
0
Example 86
Project: galah Source File: zip_bulk_submissions.py
def _zip_bulk_submissions(archive_id, requester, assignment, email = ""):
archive_id = ObjectId(archive_id)
archive_file = temp_directory = ""
# Find any expired archives and remove them
deleted_files = []
for i in Archive.objects(expires__lt = datetime.datetime.today()):
deleted_files.append(i.file_location)
if i.file_location:
try:
os.remove(i.file_location)
except OSError as e:
logger.warning(
"Could not remove expired archive at %s: %s.",
i.file_location, str(e)
)
i.delete()
if deleted_files:
logger.info("Deleted archives %s.", str(deleted_files))
# This is the archive object we will eventually add to the database
new_archive = Archive(
id = archive_id,
requester = requester,
archive_type = "assignment_package"
)
temp_directory = archive_file = None
try:
# Form the query
query = {"assignment": ObjectId(assignment)}
# Only mention email in the query if it's not None or the empty
# string, otherwise mongo will look for submissions that list the
# user as None or the empty string (which should be exactly none of
# the submission in the system).
if email:
query["user"] = email
else:
# Otherwise, we need to be careful not to get teacher/TA submissions.
assn = Assignment.objects.get(id = ObjectId(assignment))
students = User.objects(
account_type="student",
classes = assn.for_class
)
query["user__in"] = [i.id for i in students]
# Grab all the submissions
submissions = list(Submission.objects(**query))
if not submissions:
logger.info("No submissions found matching query.")
return
# Organize all the submissions by user name, as this will closely
# match the structure of the archive we will build.
submission_map = {}
for i in submissions:
if i.user in submission_map:
submission_map[i.user].append(i)
else:
submission_map[i.user] = [i]
# Create a temporary directory we will create our archive in.
temp_directory = tempfile.mkdtemp()
# Create our directory tree. Instead of making new folders for each
# submission and copying the user's files over however, we will
# create symlinks to save space and time.
for user, user_submissions in submission_map.items():
# Create a directory for the user
os.makedirs(os.path.join(temp_directory, user))
# Create symlinks for all his submissions. Each symlink is
# named after the submission date.
for i in user_submissions:
time_stamp = i.timestamp.strftime("%Y-%m-%d-%H-%M-%S")
symlink_path = \
os.path.join(temp_directory, user, time_stamp)
# In the highly unlikely event that two of the same user's
# submissions have the same exact time stamp, we'll need to
# add a marker to the end of the timestamp.
marker = 0
while os.path.exists(symlink_path +
("-%d" % marker if marker > 0 else "")):
marker += 1
if marker > 0:
symlink_path += "-%d" % marker
original_path = i.getFilePath()
# Detect if the submission's files are still on the filesystem
if os.path.isdir(original_path):
# Create a symlink pointing to the actual submission
# directory with the name we gnerated
os.symlink(original_path, symlink_path)
else:
# Create an empty text file marking the fact that a
# submissions existed but is no longer available.
open(symlink_path, "w").close()
# Create the actual archive file.
# TODO: Create it in galah's /var/ directory
file_descriptor, archive_file = tempfile.mkstemp(suffix = ".zip")
os.close(file_descriptor)
# Run zip and do the actual archiving. Will block until it's finished.
zipdir(temp_directory, archive_file)
new_archive.file_location = archive_file
new_archive.expires = \
datetime.datetime.today() + config["TEACHER_ARCHIVE_LIFETIME"]
new_archive.save(force_insert = True)
except Exception as e:
# If we created a temporary archive file we need to delete it.
new_archive.file_location = None
if archive_file:
os.remove(archive_file)
new_archive.error_string = str(e)
new_archive.save(force_insert = True)
raise
finally:
if temp_directory:
shutil.rmtree(temp_directory)
0
Example 87
Project: tfmesos Source File: mnist_replica.py
def main(unused_argv):
ps_hosts = FLAGS.ps_hosts.split(",")
worker_hosts = FLAGS.worker_hosts.split(",")
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
server = tf.train.Server(cluster,
job_name=FLAGS.job_name,
task_index=FLAGS.worker_index)
if FLAGS.job_name == "ps":
server.join()
sys.exit(0)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
num_workers = len(worker_hosts)
worker_grpc_url = 'grpc://' + worker_hosts[0]
print("Worker GRPC URL: %s" % worker_grpc_url)
print("Worker index = %d" % FLAGS.worker_index)
print("Number of workers = %d" % num_workers)
is_chief = (FLAGS.worker_index == 0)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
# Construct device setter object
device_setter = tf.train.replica_device_setter(cluster=cluster)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
with tf.device(device_setter):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS), name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal([FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with FLAGS.worker_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ *
tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,
replica_id=FLAGS.worker_index,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy,
global_step=global_step)
if FLAGS.sync_replicas and is_chief:
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
init_tokens_op = opt.get_init_tokens_op()
init_op = tf.initialize_all_variables()
train_dir = tempfile.mkdtemp()
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.worker_index])
# The chief worker (worker_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.worker_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.worker_index)
with sv.prepare_or_wait_for_session(worker_grpc_url, config=sess_config) as sess:
print("Worker %d: Session initialization complete." % FLAGS.worker_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op
print("Starting chief queue runner and running init_tokens_op")
sv.start_queue_runners(sess, [chief_queue_runner])
sess.run(init_tokens_op)
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
step = 0
while not sv.should_stop() and step < FLAGS.train_steps:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs,
y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
if is_chief:
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, FLAGS.worker_index, local_step, step))
sv.stop()
if is_chief:
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images,
y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
0
Example 88
Project: sphinx-contrib Source File: tikz.py
def render_tikz(self,tikz,libs='',stringsubst=False):
hashkey = tikz.encode('utf-8')
fname = 'tikz-%s.png' % (sha(hashkey).hexdigest())
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, '_images', fname)
if path.isfile(outfn):
return relfn
if hasattr(self.builder, '_tikz_warned'):
return None
ensuredir(path.dirname(outfn))
curdir = getcwd()
latex = DOC_HEAD % libs
latex += self.builder.config.tikz_latex_preamble
if stringsubst:
tikz = tikz % {'wd': curdir}
latex += DOC_BODY % tikz
if isinstance(latex, unicode):
latex = latex.encode('utf-8')
if not hasattr(self.builder, '_tikz_tempdir'):
tempdir = self.builder._tikz_tempdir = tempfile.mkdtemp()
else:
tempdir = self.builder._tikz_tempdir
chdir(tempdir)
tf = open('tikz.tex', 'wb')
tf.write(latex)
tf.close()
try:
try:
p = Popen(['pdflatex', '--interaction=nonstopmode', 'tikz.tex'],
stdout=PIPE, stderr=PIPE)
except OSError, err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('LaTeX command cannot be run')
self.builder._tikz_warned = True
return None
finally:
chdir(curdir)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise TikzExtError('Error (tikz extension): latex exited with error:\n'
'[stderr]\n%s\n[stdout]\n%s' % (stderr, stdout))
chdir(tempdir)
# the following does not work for pdf patterns
# p1 = Popen(['convert', '-density', '120', '-colorspace', 'rgb',
# '-trim', 'tikz.pdf', outfn], stdout=PIPE, stderr=PIPE)
# stdout, stderr = p1.communicate()
try:
p = Popen(['pdftoppm', '-r', '120', 'tikz.pdf', 'tikz'],
stdout=PIPE, stderr=PIPE)
except OSError, e:
if e.errno != ENOENT: # No such file or directory
raise
self.builder.warn('pdftoppm command cannot be run')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
stdout, stderr = p.communicate()
if p.returncode != 0:
self.builder._tikz_warned = True
raise TikzExtError('Error (tikz extension): pdftoppm exited with error:'
'\n[stderr]\n%s\n[stdout]\n%s' % (stderr, stdout))
if self.builder.config.tikz_proc_suite == 'ImageMagick':
convert_args = []
if self.builder.config.tikz_transparent:
convert_args = ['-fuzz', '2%', '-transparent', 'white']
try:
p1 = Popen(['convert', '-trim'] + convert_args +
['tikz-1.ppm', outfn],
stdout=PIPE, stderr=PIPE)
except OSError, e:
if e.errno != ENOENT: # No such file or directory
raise
self.builder.warn('convert command cannot be run')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
stdout, stderr = p1.communicate()
if p1.returncode != 0:
self.builder._tikz_warned = True
chdir(curdir)
raise TikzExtError('Error (tikz extension): convert exited with '
'error:\n[stderr]\n%s\n[stdout]\n%s'
% (stderr, stdout))
elif self.builder.config.tikz_proc_suite == 'Netpbm':
try:
p1 = Popen(['pnmcrop', 'tikz-1.ppm'], stdout=PIPE, stderr=PIPE)
except OSError, err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('pnmcrop command cannot be run:')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
pnm_args = []
if self.builder.config.tikz_transparent:
pnm_args = ['-transparent', 'white']
try:
p2 = Popen(['pnmtopng'] + pnm_args, stdin=p1.stdout,
stdout=PIPE, stderr=PIPE)
except OSError, err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('pnmtopng command cannot be run:')
self.builder.warn(err)
self.builder._tikz_warned = True
chdir(curdir)
return None
pngdata, stderr2 = p2.communicate()
dummy, stderr1 = p1.communicate()
if p1.returncode != 0:
self.builder._tikz_warned = True
raise TikzExtError('Error (tikz extension): pnmcrop exited with '
'error:\n[stderr]\n%s' % (stderr1))
if p2.returncode != 0:
self.builder._tikz_warned = True
raise TikzExtError('Error (tikz extension): pnmtopng exited with '
'error:\n[stderr]\n%s' % (stderr2))
f = open(outfn,'wb')
f.write(pngdata)
f.close()
else:
self.builder._tikz_warned = True
chdir(curdir)
raise TikzExtError('Error (tikz extension): Invalid configuration '
'value for tikz_proc_suite')
chdir(curdir)
return relfn
0
Example 89
Project: HealthStarter Source File: install.py
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.download_dir:
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
0
Example 90
Project: viper Source File: pe.py
def resources(self):
# Use this function to retrieve resources for the given PE instance.
# Returns all the identified resources with indicators and attributes.
def get_resources(pe):
resources = []
if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
count = 1
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
try:
resource = {}
if resource_type.name is not None:
name = str(resource_type.name)
else:
name = str(pefile.RESOURCE_TYPE.get(resource_type.struct.Id))
if name is None:
name = str(resource_type.struct.Id)
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
filetype = get_type(data)
md5 = get_md5(data)
language = pefile.LANG.get(resource_lang.data.lang, None)
sublanguage = pefile.get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
offset = ('%-8s' % hex(resource_lang.data.struct.OffsetToData)).strip()
size = ('%-8s' % hex(resource_lang.data.struct.Size)).strip()
resource = [count, name, offset, md5, size, filetype, language, sublanguage]
# Dump resources if requested to and if the file currently being
# processed is the opened session file.
# This is to avoid that during a --scan all the resources being
# scanned are dumped as well.
if (self.args.open or self.args.dump) and pe == self.pe:
if self.args.dump:
folder = self.args.dump
else:
folder = tempfile.mkdtemp()
resource_path = os.path.join(folder, '{0}_{1}_{2}'.format(__sessions__.current.file.md5, offset, name))
resource.append(resource_path)
with open(resource_path, 'wb') as resource_handle:
resource_handle.write(data)
resources.append(resource)
count += 1
except Exception as e:
self.log('error', e)
continue
return resources
if not self.__check_session():
return
# Obtain resources for the currently opened file.
resources = get_resources(self.pe)
if not resources:
self.log('warning', "No resources found")
return
headers = ['#', 'Name', 'Offset', 'MD5', 'Size', 'File Type', 'Language', 'Sublanguage']
if self.args.dump or self.args.open:
headers.append('Dumped To')
self.log('table', dict(header=headers, rows=resources))
# If instructed, open a session on the given resource.
if self.args.open:
for resource in resources:
if resource[0] == self.args.open:
__sessions__.new(resource[8])
return
# If instructed to perform a scan across the repository, start looping
# through all available files.
elif self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
# Retrieve list of samples stored locally and available in the
# database.
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
# Skip if it's the same file.
if sample.sha256 == __sessions__.current.file.sha256:
continue
# Obtain path to the binary.
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
# Open PE instance.
try:
cur_pe = pefile.PE(sample_path)
except:
continue
# Obtain the list of resources for the current iteration.
cur_resources = get_resources(cur_pe)
matched_resources = []
# Loop through entry's resources.
for cur_resource in cur_resources:
# Loop through opened file's resources.
for resource in resources:
# If there is a common resource, add it to the list.
if cur_resource[3] == resource[3]:
matched_resources.append(resource[3])
# If there are any common resources, add the entry to the list
# of matched samples.
if len(matched_resources) > 0:
matches.append([sample.name, sample.md5, '\n'.join(r for r in matched_resources)])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'MD5', 'Resource MD5'], rows=matches))
0
Example 91
Project: barman Source File: recovery_executor.py
def _xlog_copy(self, required_xlog_files, wal_dest, remote_command):
"""
Restore WAL segments
:param required_xlog_files: list of all required WAL files
:param wal_dest: the destination directory for xlog recover
:param remote_command: default None. The remote command to recover
the xlog, in case of remote backup.
"""
# List of required WAL files partitioned by containing directory
xlogs = collections.defaultdict(list)
# add '/' suffix to ensure it is a directory
wal_dest = '%s/' % wal_dest
# Map of every compressor used with any WAL file in the archive,
# to be used during this recovery
compressors = {}
compression_manager = self.backup_manager.compression_manager
# Fill xlogs and compressors maps from required_xlog_files
for wal_info in required_xlog_files:
hashdir = xlog.hash_dir(wal_info.name)
xlogs[hashdir].append(wal_info)
# If a compressor is required, make sure it exists in the cache
if wal_info.compression is not None and \
wal_info.compression not in compressors:
compressors[wal_info.compression] = \
compression_manager.get_compressor(
compression=wal_info.compression)
rsync = RsyncPgData(
path=self.server.path,
ssh=remote_command,
bwlimit=self.config.bandwidth_limit,
network_compression=self.config.network_compression)
# If compression is used and this is a remote recovery, we need a
# temporary directory where to spool uncompressed files,
# otherwise we either decompress every WAL file in the local
# destination, or we ship the uncompressed file remotely
if compressors:
if remote_command:
# Decompress to a temporary spool directory
wal_decompression_dest = tempfile.mkdtemp(
prefix='barman_xlog-')
else:
# Decompress directly to the destination directory
wal_decompression_dest = wal_dest
# Make sure wal_decompression_dest exists
mkpath(wal_decompression_dest)
else:
# If no compression
wal_decompression_dest = None
if remote_command:
# If remote recovery tell rsync to copy them remotely
# add ':' prefix to mark it as remote
wal_dest = ':%s' % wal_dest
total_wals = sum(map(len, xlogs.values()))
partial_count = 0
for prefix in sorted(xlogs):
batch_len = len(xlogs[prefix])
partial_count += batch_len
source_dir = os.path.join(self.config.wals_directory, prefix)
_logger.info(
"Starting copy of %s WAL files %s/%s from %s to %s",
batch_len,
partial_count,
total_wals,
xlogs[prefix][0],
xlogs[prefix][-1])
# If at least one compressed file has been found, activate
# compression check and decompression for each WAL files
if compressors:
for segment in xlogs[prefix]:
dst_file = os.path.join(wal_decompression_dest,
segment.name)
if segment.compression is not None:
compressors[segment.compression].decompress(
os.path.join(source_dir, segment.name),
dst_file)
else:
shutil.copy2(os.path.join(source_dir, segment.name),
dst_file)
if remote_command:
try:
# Transfer the WAL files
rsync.from_file_list(
list(segment.name for segment in xlogs[prefix]),
wal_decompression_dest, wal_dest)
except CommandFailedException as e:
msg = ("data transfer failure while copying WAL files "
"to directory '%s'") % (wal_dest[1:],)
raise DataTransferFailure.from_command_error(
'rsync', e, msg)
# Cleanup files after the transfer
for segment in xlogs[prefix]:
file_name = os.path.join(wal_decompression_dest,
segment.name)
try:
os.unlink(file_name)
except OSError as e:
output.warning(
"Error removing temporary file '%s': %s",
file_name, e)
else:
try:
rsync.from_file_list(
list(segment.name for segment in xlogs[prefix]),
"%s/" % os.path.join(self.config.wals_directory,
prefix),
wal_dest)
except CommandFailedException as e:
msg = "data transfer failure while copying WAL files " \
"to directory '%s'" % (wal_dest[1:],)
raise DataTransferFailure.from_command_error(
'rsync', e, msg)
_logger.info("Finished copying %s WAL files.", total_wals)
# Remove local decompression target directory if different from the
# destination directory (it happens when compression is in use during a
# remote recovery
if wal_decompression_dest and wal_decompression_dest != wal_dest:
shutil.rmtree(wal_decompression_dest)
0
Example 92
def run_benchmarks(self, env, show_stderr=False, quick=False, profile=False,
skip=None):
"""
Run all of the benchmarks in the given `Environment`.
Parameters
----------
env : Environment object
Environment in which to run the benchmarks.
show_stderr : bool, optional
When `True`, display any stderr emitted by the benchmark.
quick : bool, optional
When `True`, run each benchmark function exactly once.
This is useful to quickly find errors in the benchmark
functions, without taking the time necessary to get
accurate timings.
profile : bool, optional
When `True`, run the benchmark through the `cProfile`
profiler.
skip : set, optional
Benchmark names to skip.
Returns
-------
dict : result
Returns a dictionary where the keys are benchmark names
and the values are dictionaries containing information
about running that benchmark.
Each of the values in the dictionary has the following
keys:
- `result`: The numeric value of the benchmark (usually
the runtime in seconds for a timing benchmark), but may
be an arbitrary JSON data structure. For parameterized tests,
this is a dictionary with keys 'params' and 'result', where
the value of 'params' contains a list of lists of parameter values,
and 'result' is a list of results, corresponding to itertools.product
iteration over parameters.
Set to `None` if the benchmark failed.
- `profile`: If `profile` is `True`, this key will exist,
and be a byte string containing the cProfile data.
"""
log.info("Benchmarking {0}".format(env.name))
with log.indent():
benchmarks = sorted(list(six.iteritems(self)))
# Remove skipped benchmarks
if skip:
benchmarks = [
(name, benchmark) for (name, benchmark) in
benchmarks if name not in skip]
# Organize benchmarks by the setup_cache_key
benchmark_order = {}
benchmark_timeout = {}
for name, benchmark in benchmarks:
key = benchmark.get('setup_cache_key')
benchmark_order.setdefault(key, []).append((name, benchmark))
# setup_cache timeout
benchmark_timeout[key] = max(benchmark.get('setup_cache_timeout',
benchmark['timeout']),
benchmark_timeout.get(key, 0))
times = {}
for setup_cache_key, benchmark_set in six.iteritems(benchmark_order):
tmpdir = tempfile.mkdtemp()
try:
if setup_cache_key is not None:
timeout = benchmark_timeout[setup_cache_key]
log.info("Setting up {0}".format(setup_cache_key))
out, err, errcode = env.run(
[BENCHMARK_RUN_SCRIPT, 'setup_cache',
os.path.abspath(self._benchmark_dir),
benchmark_set[0][0]],
dots=False, display_error=False,
return_stderr=True, valid_return_codes=None,
cwd=tmpdir, timeout=timeout)
if errcode:
# Dump program output
if show_stderr and err:
with log.indent():
log.error(err)
for name, benchmark in benchmark_set:
# TODO: Store more information about failure
timestamp = datetime.datetime.utcnow()
times[name] = {'result': None,
'stderr': err,
'started_at': timestamp,
'ended_at': timestamp}
continue
for name, benchmark in benchmark_set:
times[name] = run_benchmark(
benchmark, self._benchmark_dir, env,
show_stderr=show_stderr,
quick=quick, profile=profile,
cwd=tmpdir)
finally:
shutil.rmtree(tmpdir, True)
return times
0
Example 93
Project: anima Source File: test_version_creator.py
@classmethod
def setUpClass(cls):
"""setup once
"""
# remove the transaction manager
db.DBSession.remove()
cls.repo_path = tempfile.mkdtemp()
defaults.local_storage_path = tempfile.mktemp()
db.setup({
'sqlalchemy.url': 'sqlite:///:memory:',
'sqlalchemy.echo': 'false'
})
db.init()
# create Power Users Group
cls.power_users_group = Group(name='Power Users')
db.DBSession.add(cls.power_users_group)
db.DBSession.commit()
# create a LocalSession first
cls.admin = User.query.all()[0]
cls.lsession = LocalSession()
cls.lsession.store_user(cls.admin)
cls.lsession.save()
# create a repository
cls.test_repo1 = Repository(
name='Test Repository',
windows_path='T:/TestRepo/',
linux_path='/mnt/T/TestRepo/',
osx_path='/Volumes/T/TestRepo/'
)
cls.test_structure1 = Structure(
name='Test Project Structure',
templates=[],
custom_template=''
)
cls.status_new = Status.query.filter_by(code='NEW').first()
cls.status_wip = Status.query.filter_by(code='WIP').first()
cls.status_cmpl = Status.query.filter_by(code='CMPL').first()
cls.project_status_list = StatusList(
name='Project Statuses',
statuses=[cls.status_new, cls.status_wip, cls.status_cmpl],
target_entity_type=Project
)
# create a couple of projects
cls.test_project1 = Project(
name='Project 1',
code='P1',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.test_project2 = Project(
name='Project 2',
code='P2',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.test_project3 = Project(
name='Project 3',
code='P3',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.projects = [
cls.test_project1,
cls.test_project2,
cls.test_project3
]
cls.test_user1 = User(
name='Test User',
# groups=[self.power_users_group],
login='tuser',
email='[email protected]',
password='secret'
)
db.DBSession.add(cls.test_user1)
db.DBSession.commit()
cls.admin.projects.append(cls.test_project1)
cls.admin.projects.append(cls.test_project2)
cls.admin.projects.append(cls.test_project3)
cls.test_user1.projects.append(cls.test_project1)
cls.test_user1.projects.append(cls.test_project2)
cls.test_user1.projects.append(cls.test_project3)
# project 1
cls.test_task1 = Task(
name='Test Task 1',
project=cls.test_project1,
resources=[cls.admin],
)
cls.test_task2 = Task(
name='Test Task 2',
project=cls.test_project1,
resources=[cls.admin],
)
cls.test_task3 = Task(
name='Test Task 2',
project=cls.test_project1,
resources=[cls.admin],
)
# project 2
cls.test_task4 = Task(
name='Test Task 4',
project=cls.test_project2,
resources=[cls.admin],
)
cls.test_task5 = Task(
name='Test Task 5',
project=cls.test_project2,
resources=[cls.admin],
)
cls.test_task6 = Task(
name='Test Task 6',
parent=cls.test_task5,
resources=[cls.admin],
)
cls.test_task7 = Task(
name='Test Task 7',
parent=cls.test_task5,
resources=[],
)
cls.test_task8 = Task(
name='Test Task 8',
parent=cls.test_task5,
resources=[],
)
cls.test_task9 = Task(
name='Test Task 9',
parent=cls.test_task5,
resources=[],
)
# +-> Project 1
# | |
# | +-> Task1
# | |
# | +-> Task2
# | |
# | +-> Task3
# |
# +-> Project 2
# | |
# | +-> Task4
# | |
# | +-> Task5
# | |
# | +-> Task6
# | |
# | +-> Task7 (no resource)
# | |
# | +-> Task8 (no resource)
# | |
# | +-> Task9 (no resource)
# |
# +-> Project 3
# record them all to the db
db.DBSession.add_all([
cls.admin, cls.test_project1, cls.test_project2, cls.test_project3,
cls.test_task1, cls.test_task2, cls.test_task3, cls.test_task4,
cls.test_task5, cls.test_task6, cls.test_task7, cls.test_task8,
cls.test_task9
])
db.DBSession.commit()
cls.all_tasks = [
cls.test_task1, cls.test_task2, cls.test_task3, cls.test_task4,
cls.test_task5, cls.test_task6, cls.test_task7, cls.test_task8,
cls.test_task9
]
# create versions
cls.test_version1 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
db.DBSession.add(cls.test_version1)
db.DBSession.commit()
cls.test_version2 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
db.DBSession.add(cls.test_version2)
db.DBSession.commit()
cls.test_version3 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
cls.test_version3.is_published = True
db.DBSession.add(cls.test_version3)
db.DBSession.commit()
cls.test_version4 = Version(
cls.test_task1,
take_name='Main@GPU',
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
cls.test_version4.is_published = True
db.DBSession.add(cls.test_version4)
db.DBSession.commit()
if not QtGui.QApplication.instance():
logger.debug('creating a new QApplication')
cls.app = QtGui.QApplication(sys.argv)
else:
logger.debug('using the present QApplication: %s' % QtGui.qApp)
# self.app = QtGui.qApp
cls.app = QtGui.QApplication.instance()
# cls.test_environment = TestEnvironment()
cls.dialog = version_creator.MainDialog()
0
Example 94
Project: qiime Source File: test_beta_diversity.py
def single_file_beta(
self, otu_table_string, tree_string, missing_sams=None,
use_metric_list=False):
""" running single_file_beta should give same result using --rows"""
if missing_sams is None:
missing_sams = []
# setup
fd, input_path = mkstemp(suffix='.txt')
os.close(fd)
in_fname = os.path.split(input_path)[1]
f = open(input_path, 'w')
f.write(otu_table_string)
f.close()
fd, tree_path = mkstemp(suffix='.tre')
os.close(fd)
f = open(tree_path, 'w')
f.write(tree_string)
f.close()
metrics = list_known_nonphylogenetic_metrics()
metrics.extend(list_known_phylogenetic_metrics())
output_dir = mkdtemp()
# new metrics that don't trivially parallelize must be dealt with
# carefully
warnings.filterwarnings('ignore', 'dissimilarity binary_dist_chisq is\
not parallelized, calculating the whole matrix...')
warnings.filterwarnings('ignore', 'dissimilarity dist_chisq is not\
parallelized, calculating the whole matrix...')
warnings.filterwarnings('ignore', 'dissimilarity dist_gower is not\
parallelized, calculating the whole matrix...')
warnings.filterwarnings('ignore', 'dissimilarity dist_hellinger is\
not parallelized, calculating the whole matrix...')
warnings.filterwarnings('ignore', 'unifrac had no information for\
sample M*')
self.files_to_remove.extend([input_path, tree_path])
self.folders_to_remove.append(output_dir)
os.mkdir(output_dir + '/ft/')
for metric in metrics:
# do it
if use_metric_list:
single_file_beta(input_path, [metric], tree_path, output_dir,
rowids=None)
else:
single_file_beta(input_path, metric, tree_path, output_dir,
rowids=None)
sams, dmtx = parse_distmat(open(output_dir + '/' +
metric + '_' + in_fname))
# do it by rows
for i in range(len(sams)):
if sams[i] in missing_sams:
continue
rows = sams[i]
row_outname = output_dir + '/' + metric + '_' +\
in_fname
if use_metric_list:
single_file_beta(input_path, [metric], tree_path,
output_dir, rowids=rows)
else:
single_file_beta(input_path, metric, tree_path, output_dir,
rowids=rows)
col_sams, row_sams, row_dmtx = parse_matrix(open(row_outname))
self.assertEqual(row_dmtx.shape, (len(rows.split(',')),
len(sams)))
# make sure rows same as full
for j in range(len(rows.split(','))):
for k in range(len(sams)):
row_v1 = row_dmtx[j, k]
full_v1 =\
dmtx[sams.index(row_sams[j]),
sams.index(col_sams[k])]
npt.assert_almost_equal(row_v1, full_v1)
# full tree run:
if 'full_tree' in str(metric).lower():
continue
# do it by rows with full tree
for i in range(len(sams)):
if sams[i] in missing_sams:
continue
rows = sams[i]
row_outname = output_dir + '/ft/' + metric + '_' +\
in_fname
if use_metric_list:
single_file_beta(input_path, [metric], tree_path,
output_dir + '/ft/', rowids=rows, full_tree=True)
else:
single_file_beta(input_path, metric, tree_path,
output_dir + '/ft/', rowids=rows, full_tree=True)
col_sams, row_sams, row_dmtx = parse_matrix(open(row_outname))
self.assertEqual(row_dmtx.shape, (len(rows.split(',')),
len(sams)))
# make sure rows same as full
for j in range(len(rows.split(','))):
for k in range(len(sams)):
row_v1 = row_dmtx[j, k]
full_v1 =\
dmtx[sams.index(row_sams[j]),
sams.index(col_sams[k])]
npt.assert_almost_equal(row_v1, full_v1)
# do it with full tree
if use_metric_list:
single_file_beta(input_path, [metric], tree_path,
output_dir + '/ft/', rowids=None, full_tree=True)
else:
single_file_beta(input_path, metric, tree_path,
output_dir + '/ft/', rowids=None, full_tree=True)
sams_ft, dmtx_ft = parse_distmat(open(output_dir + '/ft/' +
metric + '_' + in_fname))
self.assertEqual(sams_ft, sams)
npt.assert_almost_equal(dmtx_ft, dmtx)
0
Example 95
Project: littlechef Source File: runner.py
def _readconfig():
"""Configures environment variables"""
config = ConfigParser.SafeConfigParser()
try:
found = config.read(littlechef.CONFIGFILE)
except ConfigParser.ParsingError as e:
abort(str(e))
if not len(found):
try:
found = config.read(['config.cfg', 'auth.cfg'])
except ConfigParser.ParsingError as e:
abort(str(e))
if len(found):
print('\nDeprecationWarning: deprecated config file name \'{0}\'.'
' Use {1}'.format(found[0], littlechef.CONFIGFILE))
else:
abort('No {0} file found in the current '
'directory'.format(littlechef.CONFIGFILE))
in_a_kitchen, missing = _check_appliances()
missing_str = lambda m: ' and '.join(', '.join(m).rsplit(', ', 1))
if not in_a_kitchen:
abort("Couldn't find {0}. "
"Are you executing 'fix' outside of a kitchen?\n"
"To create a new kitchen in the current directory "
" type 'fix new_kitchen'".format(missing_str(missing)))
# We expect an ssh_config file here,
# and/or a user, (password/keyfile) pair
try:
env.ssh_config_path = config.get('userinfo', 'ssh-config')
except ConfigParser.NoSectionError:
abort('You need to define a "userinfo" section'
' in the config file. Refer to the README for help '
'(http://github.com/tobami/littlechef)')
except ConfigParser.NoOptionError:
env.ssh_config_path = None
if env.ssh_config_path:
env.ssh_config = _SSHConfig()
env.ssh_config_path = os.path.expanduser(env.ssh_config_path)
env.use_ssh_config = True
try:
env.ssh_config.parse(open(env.ssh_config_path))
except IOError:
abort("Couldn't open the ssh-config file "
"'{0}'".format(env.ssh_config_path))
except Exception:
abort("Couldn't parse the ssh-config file "
"'{0}'".format(env.ssh_config_path))
else:
env.ssh_config = None
# check for a gateway
try:
env.gateway = config.get('connection', 'gateway')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.gateway = None
# check for http_proxy which will be put into solo.rb
try:
env.http_proxy = config.get('connection', 'http_proxy')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.http_proxy = None
try:
env.https_proxy = config.get('connection', 'https_proxy')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.https_proxy = None
try:
env.remove_data_bags = config.get('userinfo', 'remove_data_bags')
except ConfigParser.NoOptionError:
env.remove_data_bags = False
# Check for an encrypted_data_bag_secret file and set the env option
try:
env.encrypted_data_bag_secret = config.get('userinfo',
'encrypted_data_bag_secret')
except ConfigParser.NoOptionError:
env.encrypted_data_bag_secret = None
if env.encrypted_data_bag_secret:
env.encrypted_data_bag_secret = os.path.expanduser(
env.encrypted_data_bag_secret)
try:
open(env.encrypted_data_bag_secret)
except IOError as e:
abort("Failed to open encrypted_data_bag_secret file at "
"'{0}'".format(env.encrypted_data_bag_secret))
try:
sudo_prefix = config.get('ssh', 'sudo_prefix', raw=True)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
pass
else:
env.sudo_prefix = sudo_prefix
try:
env.user = config.get('userinfo', 'user')
except ConfigParser.NoOptionError:
if not env.ssh_config_path:
msg = 'You need to define a user in the "userinfo" section'
msg += ' of {0}. Refer to the README for help'
msg += ' (http://github.com/tobami/littlechef)'
abort(msg.format(littlechef.CONFIGFILE))
user_specified = False
else:
user_specified = True
try:
env.password = config.get('userinfo', 'password') or None
except ConfigParser.NoOptionError:
pass
try:
# If keypair-file is empty, assign None or fabric will try to read key
env.key_filename = config.get('userinfo', 'keypair-file') or None
except ConfigParser.NoOptionError:
pass
if (user_specified and not env.password and not env.key_filename
and not env.ssh_config):
abort('You need to define a password, keypair file, or ssh-config '
'file in {0}'.format(littlechef.CONFIGFILE))
# Node's Chef Solo working directory for storing cookbooks, roles, etc.
try:
env.node_work_path = os.path.expanduser(config.get('kitchen',
'node_work_path'))
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
env.node_work_path = littlechef.node_work_path
else:
if not env.node_work_path:
abort('The "node_work_path" option cannot be empty')
# Follow symlinks
try:
env.follow_symlinks = config.getboolean('kitchen', 'follow_symlinks')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
env.follow_symlinks = False
try:
env.berksfile = config.get('kitchen', 'berksfile')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
env.berksfile = None
else:
try:
env.berksfile_cookbooks_directory = config.get('kitchen', 'berksfile_cookbooks_directory')
littlechef.cookbook_paths.append(env.berksfile_cookbooks_directory)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
if env.berksfile:
env.berksfile_cookbooks_directory = tempfile.mkdtemp('littlechef-berks')
littlechef.cookbook_paths.append(env.berksfile_cookbooks_directory)
else:
env.berksfile_cookbooks_directory = None
chef.ensure_berksfile_cookbooks_are_installed()
# Upload Directory
try:
env.sync_packages_dest_dir = config.get('sync-packages',
'dest-dir')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.sync_packages_dest_dir = None
# Local Directory
try:
env.sync_packages_local_dir = config.get('sync-packages',
'local-dir')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.sync_packages_local_dir = None
try:
env.autodeploy_chef = config.get('userinfo', 'autodeploy_chef') or None
except ConfigParser.NoOptionError:
env.autodeploy_chef = None
0
Example 96
Project: sdaps Source File: generic.py
def create_stamp_pdf(survey, output_filename, questionnaire_ids):
sheets = 1 if questionnaire_ids is None else len(questionnaire_ids)
questionnaire_length = survey.questionnaire.page_count
have_pdftk = False
# Test if pdftk is present, if it is we can use it to be faster
try:
result = subprocess.Popen(['pdftk', '--version'], stdout=subprocess.PIPE)
# Just assume pdftk is there, if it was executed sucessfully
if result is not None:
have_pdftk = True
except OSError:
pass
if not have_pdftk:
try:
import pyPdf
except:
log.error(_(u'You need to have either pdftk or pyPdf installed. pdftk is the faster method.'))
sys.exit(1)
# Write the "stamp" out to tmp.pdf if are using pdftk.
if have_pdftk:
stampsfile = file(survey.path('tmp.pdf'), 'wb')
else:
stampsfile = StringIO.StringIO()
canvas = \
reportlab.pdfgen.canvas.Canvas(stampsfile,
bottomup=False,
pagesize=(survey.defs.paper_width * mm,
survey.defs.paper_height * mm))
# bottomup = False =>(0, 0) is the upper left corner
print ungettext(u'Creating stamp PDF for %i sheet', u'Creating stamp PDF for %i sheets', sheets) % sheets
log.progressbar.start(sheets)
for i in range(sheets):
if questionnaire_ids is not None:
id = questionnaire_ids.pop(0)
for j in range(questionnaire_length):
if survey.defs.style == "classic":
draw_corner_marks(survey, canvas)
draw_corner_boxes(survey, canvas, j)
if not survey.defs.duplex or j % 2:
if questionnaire_ids is not None:
draw_questionnaire_id(canvas, survey, id)
if survey.defs.print_survey_id:
draw_survey_id(canvas, survey)
elif survey.defs.style == "code128":
draw_corner_marks(survey, canvas)
if not survey.defs.duplex or j % 2:
if questionnaire_ids is not None:
draw_code128_questionnaire_id(canvas, survey, id)
# Survey ID has to be printed in CODE128 mode, because it
# contains the page number and rotation.
draw_code128_sdaps_info(canvas, survey, j + 1)
if survey.global_id is not None:
draw_code128_global_id(canvas, survey)
elif survey.defs.style == "qr":
draw_corner_marks(survey, canvas)
if not survey.defs.duplex or j % 2:
if questionnaire_ids is not None:
draw_qr_questionnaire_id(canvas, survey, id)
# Survey ID has to be printed in QR mode, because it
# contains the page number and rotation.
draw_qr_sdaps_info(canvas, survey, j + 1)
if survey.global_id is not None:
draw_qr_global_id(canvas, survey)
elif survey.defs.style == "custom":
# Only draw corner marker
draw_corner_marks(survey, canvas)
pass
else:
raise AssertionError()
canvas.showPage()
log.progressbar.update(i + 1)
canvas.save()
print ungettext(u'%i sheet; %f seconds per sheet', u'%i sheet; %f seconds per sheet', log.progressbar.max_value) % (
log.progressbar.max_value,
float(log.progressbar.elapsed_time) /
float(log.progressbar.max_value)
)
if have_pdftk:
stampsfile.close()
# Merge using pdftk
print _("Stamping using pdftk")
tmp_dir = tempfile.mkdtemp()
if sheets == 1:
# Shortcut if we only have one sheet.
# In this case form data in the PDF will *not* break, in
# the other code path it *will* break.
print _(u"pdftk: Overlaying the original PDF with the markings.")
subprocess.call(['pdftk',
survey.path('questionnaire.pdf'),
'multistamp',
survey.path('tmp.pdf'),
'output',
output_filename])
else:
for page in xrange(1, questionnaire_length + 1):
print ungettext(u"pdftk: Splitting out page %d of each sheet.", u"pdftk: Splitting out page %d of each sheet.", page) % page
args = []
args.append('pdftk')
args.append(survey.path('tmp.pdf'))
args.append('cat')
cur = page
for i in range(sheets):
args.append('%d' % cur)
cur += questionnaire_length
args.append('output')
args.append(os.path.join(tmp_dir, 'stamp-%d.pdf' % page))
subprocess.call(args)
print _(u"pdftk: Splitting the questionnaire for watermarking.")
subprocess.call(['pdftk', survey.path('questionnaire.pdf'),
'dump_data', 'output',
os.path.join(tmp_dir, 'doc_data.txt')])
for page in xrange(1, questionnaire_length + 1):
subprocess.call(['pdftk', survey.path('questionnaire.pdf'), 'cat',
'%d' % page, 'output',
os.path.join(tmp_dir, 'watermark-%d.pdf' % page)])
if sheets == 1:
for page in xrange(1, questionnaire_length + 1):
print ungettext(u"pdftk: Watermarking page %d of all sheets.", u"pdftk: Watermarking page %d of all sheets.", page) % page
subprocess.call(['pdftk',
os.path.join(tmp_dir, 'stamp-%d.pdf' % page),
'background',
os.path.join(tmp_dir, 'watermark-%d.pdf' % page),
'output',
os.path.join(tmp_dir, 'watermarked-%d.pdf' % page)])
else:
for page in xrange(1, questionnaire_length + 1):
print ungettext(u"pdftk: Watermarking page %d of all sheets.", u"pdftk: Watermarking page %d of all sheets.", page) % page
subprocess.call(['pdftk',
os.path.join(tmp_dir, 'stamp-%d.pdf' % page),
'background',
os.path.join(tmp_dir, 'watermark-%d.pdf' % page),
'output',
os.path.join(tmp_dir, 'watermarked-%d.pdf' % page)])
args = []
args.append('pdftk')
for page in xrange(1, questionnaire_length + 1):
char = chr(ord('A') + page - 1)
args.append('%s=' % char + os.path.join(tmp_dir, 'watermarked-%d.pdf' % page))
args.append('cat')
for i in range(sheets):
for page in xrange(1, questionnaire_length + 1):
char = chr(ord('A') + page - 1)
args.append('%s%d' % (char, i + 1))
args.append('output')
args.append(os.path.join(tmp_dir, 'final.pdf'))
print _(u"pdftk: Assembling everything into the final PDF.")
subprocess.call(args)
subprocess.call(['pdftk', os.path.join(tmp_dir, 'final.pdf'),
'update_info', os.path.join(tmp_dir, 'doc_data.txt'),
'output', output_filename])
# Remove tmp.pdf
os.unlink(survey.path('tmp.pdf'))
# Remove all the temporary files
shutil.rmtree(tmp_dir)
else:
# Merge using pyPdf
stamped = pyPdf.PdfFileWriter()
stamped._info.getObject().update({
pyPdf.generic.NameObject('/Producer'): pyPdf.generic.createStringObject(u'sdaps'),
pyPdf.generic.NameObject('/Title'): pyPdf.generic.createStringObject(survey.title),
})
subject = []
for key, value in survey.info.iteritems():
subject.append(u'%(key)s: %(value)s' % {'key': key, 'value': value})
subject = u'\n'.join(subject)
stamped._info.getObject().update({
pyPdf.generic.NameObject('/Subject'): pyPdf.generic.createStringObject(subject),
})
stamps = pyPdf.PdfFileReader(stampsfile)
del stampsfile
questionnaire = pyPdf.PdfFileReader(
file(survey.path('questionnaire.pdf'), 'rb')
)
print _(u'Stamping using pyPdf. For faster stamping, install pdftk.')
log.progressbar.start(sheets)
for i in range(sheets):
for j in range(questionnaire_length):
s = stamps.getPage(i * questionnaire_length + j)
if not have_pdftk:
q = questionnaire.getPage(j)
s.mergePage(q)
stamped.addPage(s)
log.progressbar.update(i + 1)
stamped.write(open(output_filename, 'wb'))
print ungettext(u'%i sheet; %f seconds per sheet', u'%i sheet; %f seconds per sheet',
log.progressbar.max_value) % (
log.progressbar.max_value,
float(log.progressbar.elapsed_time) /
float(log.progressbar.max_value))
0
Example 97
Project: luigi Source File: hadoop.py
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
packages = [luigi] + self.modules + job.extra_modules() + list(_attached_packages)
# find the module containing the job
packages.append(__import__(job.__module__, None, None, 'dummy'))
# find the path to out runner.py
runner_path = mrrunner.__file__
# assume source is next to compiled
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
base_tmp_dir = configuration.get_config().get('core', 'tmp-dir', None)
if base_tmp_dir:
warnings.warn("The core.tmp-dir configuration item is"
" deprecated, please use the TMPDIR"
" environment variable if you wish"
" to control where luigi.contrib.hadoop may"
" create temporary files and directories.")
self.tmp_dir = os.path.join(base_tmp_dir, 'hadoop_job_%016x' % random.getrandbits(64))
os.makedirs(self.tmp_dir)
else:
self.tmp_dir = tempfile.mkdtemp()
logger.debug("Tmp dir: %s", self.tmp_dir)
# build arguments
config = configuration.get_config()
python_executable = config.get('hadoop', 'python-executable', 'python')
runner_arg = 'mrrunner.pex' if job.package_binary is not None else 'mrrunner.py'
command = '{0} {1} {{step}}'.format(python_executable, runner_arg)
map_cmd = command.format(step='map')
cmb_cmd = command.format(step='combiner')
red_cmd = command.format(step='reduce')
output_final = job.output().path
# atomic output: replace output with a temporary work directory
if self.end_job_with_atomic_move_dir:
illegal_targets = (
luigi.s3.S3FlagTarget, luigi.contrib.gcs.GCSFlagTarget)
if isinstance(job.output(), illegal_targets):
raise TypeError("end_job_with_atomic_move_dir is not supported"
" for {}".format(illegal_targets))
output_hadoop = '{output}-temp-{time}'.format(
output=output_final,
time=datetime.datetime.now().isoformat().replace(':', '-'))
else:
output_hadoop = output_final
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', self.streaming_jar]
# 'libjars' is a generic option, so place it first
libjars = [libjar for libjar in self.libjars]
for libjar in self.libjars_in_hdfs:
run_cmd = luigi.contrib.hdfs.load_hadoop_cmd() + ['fs', '-get', libjar, self.tmp_dir]
logger.debug(subprocess.list2cmdline(run_cmd))
subprocess.call(run_cmd)
libjars.append(os.path.join(self.tmp_dir, os.path.basename(libjar)))
if libjars:
arglist += ['-libjars', ','.join(libjars)]
# 'archives' is also a generic option
if self.archives:
arglist += ['-archives', ','.join(self.archives)]
# Add static files and directories
extra_files = get_extra_files(job.extra_files())
files = []
for src, dst in extra_files:
dst_tmp = '%s_%09d' % (dst.replace('/', '_'), random.randint(0, 999999999))
files += ['%s#%s' % (src, dst_tmp)]
# -files doesn't support subdirectories, so we need to create the dst_tmp -> dst manually
job.add_link(dst_tmp, dst)
if files:
arglist += ['-files', ','.join(files)]
jobconfs = job.jobconfs()
for k, v in six.iteritems(self.jobconfs):
jobconfs.append('%s=%s' % (k, v))
for conf in jobconfs:
arglist += ['-D', conf]
arglist += self.streaming_args
arglist += ['-mapper', map_cmd]
if job.combiner != NotImplemented:
arglist += ['-combiner', cmb_cmd]
if job.reducer != NotImplemented:
arglist += ['-reducer', red_cmd]
packages_fn = 'mrrunner.pex' if job.package_binary is not None else 'packages.tar'
files = [
runner_path if job.package_binary is None else None,
os.path.join(self.tmp_dir, packages_fn),
os.path.join(self.tmp_dir, 'job-instance.pickle'),
]
for f in filter(None, files):
arglist += ['-file', f]
if self.output_format:
arglist += ['-outputformat', self.output_format]
if self.input_format:
arglist += ['-inputformat', self.input_format]
allowed_input_targets = (
luigi.contrib.hdfs.HdfsTarget,
luigi.s3.S3Target,
luigi.contrib.gcs.GCSTarget)
for target in luigi.task.flatten(job.input_hadoop()):
if not isinstance(target, allowed_input_targets):
raise TypeError('target must one of: {}'.format(
allowed_input_targets))
arglist += ['-input', target.path]
allowed_output_targets = (
luigi.contrib.hdfs.HdfsTarget,
luigi.s3.S3FlagTarget,
luigi.contrib.gcs.GCSFlagTarget)
if not isinstance(job.output(), allowed_output_targets):
raise TypeError('output must be one of: {}'.format(
allowed_output_targets))
arglist += ['-output', output_hadoop]
# submit job
if job.package_binary is not None:
shutil.copy(job.package_binary, os.path.join(self.tmp_dir, 'mrrunner.pex'))
else:
create_packages_archive(packages, os.path.join(self.tmp_dir, 'packages.tar'))
job.dump(self.tmp_dir)
run_and_track_hadoop_job(arglist, tracking_url_callback=job.set_tracking_url)
if self.end_job_with_atomic_move_dir:
luigi.contrib.hdfs.HdfsTarget(output_hadoop).move_dir(output_final)
self.finish()
0
Example 98
Project: plugin.video.streamondemand Source File: mct.py
def play(url, xlistitem, is_view=None, subtitle=""):
# -- Necesario para algunas webs ----------------------------
if not url.endswith(".torrent") and not url.startswith("magnet"):
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
# -- Crear dos carpetas en descargas para los archivos ------
save_path_videos = os.path.join( config.get_setting("downloadpath") , "torrent-videos" )
save_path_torrents = os.path.join( config.get_setting("downloadpath") , "torrent-torrents" )
if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents)
# -- Usar - archivo torrent desde web, meagnet o HD ---------
if not os.path.isfile(url) and not url.startswith("magnet"):
# -- http - crear archivo torrent -----------------------
data = url_get(url)
# -- El nombre del torrent será el que contiene en los --
# -- datos. -
re_name = urllib.unquote( scrapertools.get_match(data,':name\d+:(.*?)\d+:') )
#torrent_file = os.path.join(save_path_torrents, re_name+'.torrent')
torrent_file = filetools.join(save_path_torrents, unicode(re_name, "'utf-8'", errors="replace")+'.torrent')
f = open(torrent_file,'wb')
f.write(data)
f.close()
elif os.path.isfile(url):
# -- file - para usar torrens desde el HD ---------------
torrent_file = url
else:
# -- magnet ---------------------------------------------
torrent_file = url
# -----------------------------------------------------------
# -- MCT - MiniClienteTorrent -------------------------------
ses = lt.session()
print "### Init session ########"
print lt.version
print "#########################"
ses.add_dht_router("router.bittorrent.com",6881)
ses.add_dht_router("router.utorrent.com",6881)
ses.add_dht_router("router.bitcomet.com",554)
ses.add_dht_router("dht.transmissionbt.com",6881)
trackers = [
"http://exodus.desync.com:6969/announce",
"udp://tracker.publicbt.com:80/announce",
"udp://tracker.openbittorrent.com:80/announce",
"http://tracker.torrentbay.to:6969/announce",
"http://fr33dom.h33t.com:3310/announce",
"http://tracker.pow7.com/announce",
"udp://tracker.ccc.de:80/announce",
"http://tracker.bittorrent.am:80/announce",
"http://denis.stalker.h3q.com:6969/announce",
"udp://tracker.prq.to:80/announce",
"udp://tracker.istole.it:80/announce",
"udp://open.demonii.com:1337",
"http://9.rarbg.com:2710/announce",
"http://announce.torrentsmd.com:6969/announce",
"http://bt.careland.com.cn:6969/announce",
"http://explodie.org:6969/announce",
"http://mgtracker.org:2710/announce",
"http://tracker.best-torrents.net:6969/announce",
"http://tracker.tfile.me/announce",
"http://tracker.torrenty.org:6969/announce",
"http://tracker1.wasabii.com.tw:6969/announce",
"udp://9.rarbg.com:2710/announce",
"udp://9.rarbg.me:2710/announce",
"udp://coppersurfer.tk:6969/announce",
"udp://tracker.btzoo.eu:80/announce",
"http://www.spanishtracker.com:2710/announce",
"http://www.todotorrents.com:2710/announce",
]
video_file = ""
# -- magnet2torrent -----------------------------------------
if torrent_file.startswith("magnet"):
tempdir = tempfile.mkdtemp()
params = {
'save_path': tempdir,
'trackers':trackers,
'storage_mode': lt.storage_mode_t.storage_mode_allocate,
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
h = lt.add_magnet_uri(ses, torrent_file, params)
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
while not h.has_metadata():
message, porcent, msg_file, s, download = getProgress(h, "Creating torrent from magnet")
dp.update(porcent, message, msg_file)
if s.state == 1: download = 1
if dp.iscanceled():
dp.close()
remove_files( download, torrent_file, video_file, ses, h )
return
dp.close()
info = h.get_torrent_info()
data = lt.bencode( lt.create_torrent(info).generate() )
#torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
torrent_file = os.path.join(save_path_torrents, unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
f = open(torrent_file,'wb')
f.write(data)
f.close()
ses.remove_torrent(h)
shutil.rmtree(tempdir)
# -----------------------------------------------------------
# -- Archivos torrent ---------------------------------------
e = lt.bdecode(open(torrent_file, 'rb').read())
info = lt.torrent_info(e)
# -- El más gordo o uno de los más gordo se entiende que es -
# -- el vídeo o es el vídeo que se usará como referencia -
# -- para el tipo de archivo -
print "##### Archivos ## %s ##" % len(info.files())
_index_file, _video_file, _size_file = get_video_file(info)
_video_file_ext = os.path.splitext( _video_file )[1]
if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
print "##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } )
else:
print "##### storage_mode: none ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } )
# -----------------------------------------------------------
# -- Descarga secuencial - trozo 1, trozo 2, ... ------------
h.set_sequential_download(True)
h.force_reannounce()
h.force_dht_announce()
# -- Prioritarizar/Seleccionar archivo-----------------------
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1:
_index = _index_file
video_file = _video_file
video_size = _size_file
# -- Inicio de variables para 'pause' automático cuando el -
# -- el vídeo se acerca a una pieza sin completar -
is_greater_num_pieces = False
is_greater_num_pieces_plus = False
is_greater_num_pieces_pause = False
#porcent4first_pieces = int( video_size / 1073741824 )
porcent4first_pieces = int( video_size * 0.000000005 )
if porcent4first_pieces < 10: porcent4first_pieces = 10
if porcent4first_pieces > 100: porcent4first_pieces = 100
#num_pieces_to_resume = int( video_size / 1610612736 )
num_pieces_to_resume = int( video_size * 0.0000000025 )
if num_pieces_to_resume < 5: num_pieces_to_resume = 5
if num_pieces_to_resume > 25: num_pieces_to_resume = 25
print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume
# -- Prioritarizar o seleccionar las piezas del archivo que -
# -- se desea reproducir con 'file_priorities' -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
# -- Crear diálogo de progreso para el primer bucle ---------
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
_pieces_info = {}
# -- Doble bucle anidado ------------------------------------
# -- Descarga - Primer bucle -
while not h.is_seed():
s = h.status()
xbmc.sleep(100)
# -- Recuperar los datos del progreso -------------------
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
# -- Si hace 'checking' existe descarga -----------------
# -- 'download' Se usará para saber si hay datos -
# -- descargados para el diálogo de 'remove_files' -
if s.state == 1: download = 1
# -- Player - play --------------------------------------
# -- Comprobar si se han completado las piezas para el -
# -- inicio del vídeo ............... -
first_pieces = True
_p = ""
_c = 0
for i in range( piece_set[0], piece_set[porcent4first_pieces] ):
_p+= "[%s:%s]" % ( i, h.have_piece(i) )
first_pieces&= h.have_piece(i)
if h.have_piece(i): _c+= 1
_pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c,porcent4first_pieces), 'have': h.status().num_pieces, 'len': len(piece_set)}
_p = "##### first_pieces [%s/%s][%s]: " % ( _c, porcent4first_pieces, len(piece_set) ) + _p
print _p
# -- -------------------------------------------------- -
if is_view != "Ok" and first_pieces:
print "##### porcent [%.2f%%]" % (s.progress * 100)
is_view = "Ok"
dp.close()
# -- Player - Ver el vídeo --------------------------
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
#ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
ren_video_file = os.path.join( save_path_videos, video_file )
playlist.add( ren_video_file, xlistitem )
#playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
#playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
player = play_video( xbmc.PLAYER_CORE_AUTO )
player.play(playlist)
'''
# -- Player - Ver el vídeo --------------------------
player = play_video()
#player.play( os.path.join( save_path_videos, video_file ) )
player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
'''
#player.play( os.path.join( save_path_videos, video_file ) )
# -- Contador de cancelaciones para la ventana de -
# -- 'pause' automático -
is_greater_num_pieces_canceled = 0
continuous_pieces = 0
porcent_time = 0.00
current_piece = 0
# -- Impedir que kodi haga 'resume' a un archivo ----
# -- que se reprodució con anterioridad y que se -
# -- eliminó para impedir que intente la reprucción -
# -- en una pieza que aún no se ha completado y se -
# -- active 'pause' automático -
not_resume = True
# -- Bandera subTítulos
_sub = False
# -- Segundo bucle - Player - Control de eventos ----
while player.isPlaying():
xbmc.sleep(100)
# -- Añadir subTítulos
if subtitle!="" and not _sub:
_sub = True
player.setSubtitles(subtitle)
# -- Impedir que kodi haga 'resume' al inicio ---
# -- de la descarga de un archivo conocido -
if not_resume:
player.seekTime(0)
not_resume = False
#xbmc.sleep(1000)
# -- Control 'pause' automático -
continuous_pieces = count_completed_continuous_pieces(h, piece_set)
if xbmc.Player().isPlaying():
# -- Porcentage del progreso del vídeo ------
porcent_time = player.getTime() / player.getTotalTime() * 100
# -- Pieza que se está reproduciendo --------
current_piece = int( porcent_time / 100 * len(piece_set) )
# -- Banderas de control --------------------
is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume)
is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces)
is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set))
# -- Activa 'pause' automático --------------
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
is_greater_num_pieces_pause = True
player.pause()
# -- Log ------------------------------------
_TotalTime = player.getTotalTime()
_Time = player.getTime()
_print_log = "\n##### Player ##################################"
_print_log+= "\nTamaño del vídeo: %s" % video_size
_print_log+= "\nTotal piezas: %s" % len(piece_set)
_print_log+= "\nPiezas contiguas: %s" % continuous_pieces
_print_log+= "\n-----------------------------------------------"
_print_log+= "\nVídeo-Total segundos: %s" % _TotalTime
_print_log+= "\nVídeo-Progreso segundos: %s" % _Time
_print_log+= "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
_print_log+= "\n-----------------------------------------------"
_print_log+= "\ncurrent_piece: %s" % current_piece
_print_log+= "\nis_greater_num_pieces: %s" % is_greater_num_pieces
_print_log+= "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
_print_log+= "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
_print_log+= "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
_print_log+= "\nPieza que se está visionando: %.2f" % ( porcent_time / 100 * len(piece_set) )
_print_log+= "\nOffset que se está visionando: %.2f" % ( porcent_time / 100 * video_size )
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
_print_log+= "\n+++++++++++++++++++++++++++++++++++++++++++++++"
_print_log+= "\nPausa con:"
_print_log+= "\n current_piece = %s" % current_piece
_print_log+= "\n continuous_pieces = %s" % continuous_pieces
_print_log+= "\n###############################################"
print _print_log
# -------------------------------------------
_pieces_info = {'current': current_piece, 'continuous': continuous_pieces, 'have': h.status().num_pieces, 'len': len(piece_set)}
# -- Cerrar el diálogo de progreso --------------
if player.resumed:
dp.close()
# -- Mostrar el diálogo de progreso -------------
if player.paused:
# -- Crear diálogo si no existe -------------
if not player.statusDialogoProgress:
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
player.setDialogoProgress()
# -- Diálogos de estado en el visionado -----
if not h.is_seed():
# -- Recuperar los datos del progreso ---
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
dp.update(porcent, message, msg_file)
else:
dp.update(100, "Download completo: " + video_file)
# -- Se canceló el progreso en el visionado -
# -- Continuar -
if dp.iscanceled():
dp.close()
player.pause()
# -- Se canceló el progreso en el visionado -
# -- en la ventana de 'pause' automático. -
# -- Parar si el contador llega a 3 -
if dp.iscanceled() and is_greater_num_pieces_pause:
is_greater_num_pieces_canceled+= 1
if is_greater_num_pieces_canceled == 3:
player.stop()
# -- Desactiva 'pause' automático y ---------
# -- reinicia el contador de cancelaciones -
if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
dp.close()
player.pause()
is_greater_num_pieces_pause = False
is_greater_num_pieces_canceled = 0
# -- El usuario cancelo el visionado --------
# -- Terminar -
if player.ended:
# -- Diálogo eliminar archivos ----------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Kodi - Se cerró el visionado -----------------------
# -- Continuar | Terminar -
if is_view == "Ok" and not xbmc.Player().isPlaying():
if info.num_files() == 1:
# -- Diálogo continuar o terminar ---------------
d = xbmcgui.Dialog()
ok = d.yesno('streamondemand-MCT', 'Kodi ha chiuso il video.', 'Continuare con la sessione?')
else: ok = False
# -- SI ---------------------------------------------
if ok:
# -- Continuar: ---------------------------------
is_view=None
else:
# -- Terminar: ----------------------------------
# -- Comprobar si el vídeo pertenece a una ------
# -- lista de archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos --------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
# -- Mostar progeso antes del visionado -----------------
if is_view != "Ok" :
dp.update(porcent, message, msg_file)
# -- Se canceló el progreso antes del visionado ---------
# -- Terminar -
if dp.iscanceled():
dp.close()
# -- Comprobar si el vídeo pertenece a una lista de -
# -- archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos ------------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -----
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('streamondemand-MCT')
# -- Kodi - Error? - No debería llegar aquí -----------------
if is_view == "Ok" and not xbmc.Player().isPlaying():
dp.close()
# -- Diálogo eliminar archivos --------------------------
remove_files( download, torrent_file, video_file, ses, h )
return
0
Example 99
Project: virtaal Source File: tutorial.py
def create_localized_tutorial():
"""Save on disk a tutorial POT file with comments using current locale."""
# All the entries in the tutorial.
#
# It is a tuple of entries, in which entry is in the form of a tuple with a
# comment for the translator, a string (or list of source strings) and an
# optional string context (blank string if not provided).
tutorial_entries = (
# Translators: Don't translate the "Welcome" word.
(_(u"Welcome to the Virtaal tutorial. You can do the first translation by "
u"typing just the translation for \"Welcome\". Then press Enter."),
u"Welcome",
u""),
(_(u"Translate this slightly longer message. If a spell checker is "
u"available, spelling mistakes are indicated similarly to word "
u"processors. Make sure the correct language is selected in the bottom "
u"right of the window."),
u"With this file you can learn about translation using Virtaal",
u""),
(_(u"This tutorial will show you some of the things you might want to pay "
u"attention to while translating software programs. It will help you "
u"to avoid some problems and produce translations of a higher "
u"quality."),
u"Quality is important",
u""),
(_(u"Some of the advice will only be relevant to some languages. For "
u"example, if your language does not use the Latin alphabet, some of "
u"the advice might not be relevant to translation in your language. "
u"For many languages there are established translation rules."),
u"Languages are different",
u""),
(_(u"The correct use of capital letters are important in many languages. "
u"Translate this message with careful attention to write \"Virtaal\" "
u"with a capital letter."),
u"The product we use is called Virtaal",
u""),
(_(u"In this message the English uses a capital letter for almost every "
u"word. Almost no other language uses this style. Unless your language "
u"definitely needs to follow the English style (also called Title "
u"Case), translate this by following the normal capitalisation rules "
u"for your language. If your language does not use capital letters, "
u"simply translate it normally."),
u"Download the File Now",
u""),
(_(u"If you translated the previous message you should see a window with "
u"that translation and a percentage indicating how similar the source "
u"strings (English) are. It is Virtaal's translation memory at work. "
u"Press Ctrl+1 to copy the suggested translation to the current "
u"translation. Remember to always review suggestions before you use "
u"them."),
u"Download the files now",
u""),
(_(u"This is a simple message that starts with a capital letter in "
u"English. If your language uses capital letters, you almost "
u"definitely want to start your translation with a capital letter as "
u"well."),
u"Time",
u""),
(_(u"This is a simple message that starts with a lower case letter in "
u"English. If your language uses capital letters, you almost "
u"definitely want to start your translation with a lower case letter "
u"as well."),
u"later",
u""),
(_(u"This message is a question. Make sure that you use the correct "
u"question mark in your translation as well."),
u"What is your name?",
u""),
(_(u"This message is a label as part of a form. Note how it ends with a "
u"colon (:)."),
u"Name:",
u""),
(_(u"If the source will remain mostly or completely unchanged it is "
u"convenient to copy the entire source string with Alt+Down. Here is "
u"almost nothing to translate, so just press Alt+Down and make "
u"corrections if necessary."),
u"<b><a href=\"http://virtaal.org/\">Virtaal</a></b>",
u""),
(_(u"Placeables are special parts of the text, like the © symbol, that "
u"can be automatically highlighted and easily inserted into the "
u"translation. Select the © with Alt+Right and transfer it to the "
u"target with Alt+Down."),
u"© Virtaal Team",
u""),
(_(u"Recognised placeables include special symbols, numbers, variable "
u"placeholders, acronyms and many more. Move to each one with "
u"Alt+Right and transfer it down with Alt+Down."),
u"© 2009 contributors",
u""),
(_(u"This message ends with ... to indicate that clicking on this text "
u"will cause a dialogue to appear instead of just performing an "
u"action. Be sure to end your message with ... as well."),
u"Save As...",
u""),
(_(u"This message ends with a special character that looks like three "
u"dots. Select the special character with Alt+Right and copy it to "
u"your translation with Alt+Down. Don't just type three dot "
u"characters."),
u"Save As…",
u""),
(_(u"This message has two sentences. Translate them and make sure you "
u"start each with a capital letter if your language uses them, and end "
u"each sentence properly."),
u"Always try your best. Many people are available to learn from.",
u""),
(_(u"This message marks the word \"now\" as important with bold tags. "
u"These tags can be transferred from the source with Alt+Right and "
u"Alt+Down. Leave the <b> and </b> in the translation around the part "
u"that corresponds to \"now\". Read more about XML markup here: "
u"http://en.wikipedia.org/wiki/XML"),
u"Restart the program <b>now</b>",
u""),
(_(u"This message is very similar to the previous message. Use the "
u"suggestion of the previous translation with Ctrl+1. Note how the "
u"only difference is that this one ends with a full stop after the "
u"last tag."),
u"Restart the program <b>now</b>.",
u""),
(_(u"In this message \"%d\" is a placeholder (variable) that represents a "
u"number. Make sure your translation contains \"%d\" somewhere. In "
u"this case it refers a number of files. When this message is used the "
u"\"%d\" will be replaced with a number e.g. 'Number of files copied: "
u"5'. Note that \"%d\" does not refer to a percentage."),
u"Number of files copied: %d",
u""),
(_(u"In this message, \"%d\" refers again to the number of files, but "
u"note how the \"(s)\" is used to show that we don't know how many it "
u"will be. This is often hard to translate well. If you encounter this "
u"in software translation, you might want to hear from developers if "
u"this can be avoided. Read more about this and decide how to do it in "
u"your language: http://docs.translatehouse.org/projects/"
u"localization-guide/en/latest/guide/translation/plurals.html"),
u"%d file(s) will be downloaded",
u""),
# Entry with plurals.
(_(u"In this message the proper way of translating plurals are seen. You "
u"need to enter between 1 and 6 different versions of the translation "
u"to ensure the correct grammar in your language. Read more about this "
u"here: http://docs.translatehouse.org/projects/localization-guide/en/"
u"latest/guide/translation/plurals.html"),
[
u"%d file will be downloaded",
u"%d files will be downloaded",
],
u""),
(_(u"In this message, \"%s\" is a placeholder (variable) that represents "
u"a file name. Make sure your translation contains %s somewhere. When "
u"this message is used, the %s will be replaced with a file name e.g. "
u"'The file will be saved as example.odt'. Note that \"%s\" does not "
u"refer to a percentage."),
u"The file will be saved as %s",
u""),
(_(u"In this message the variable is surrounded by double quotes. Make "
u"sure your translation contains the variable %s and surround it "
u"similarly with quotes in the way required by your language. If your "
u"language uses the same quotes as English, type it exactly as shown "
u"for the English. If your language uses different quoting characters "
u"you can just type them around the variable."),
u"The file \"%s\" was not saved",
u""),
(_(u"In this message, \"%(name)s\" is a placeholder (variable). Note that "
u"the 's' is part of the variable, and the whole variable from '%' to "
u"the 's' should appear unchanged somewhere in your translation. These "
u"type of variables give you an idea of what they will contain. In "
u"this case, it will contain a name."),
u"Welcome back, %(name)s",
u""),
(_(u"In this message the user of the software is asked to do something. "
u"Make sure you translate it by being as polite or respectful as is "
u"necessary for your culture."),
u"Please enter your password here",
u""),
(_(u"In this message there is reference to \"Linux\" (a product name). "
u"Many languages will not translate it, but your language might use a "
u"transliteration if you don't use the Latin script for your "
u"language."),
u"This software runs on Linux",
u""),
(_(u"This message contains the URL (web address) of the project website. "
u"It must be transferred as a placeable or typed over exactly."),
u"Visit the project website at http://virtaal.org/",
u""),
(_(u"This message refers to a website with more information. Sometimes "
u"you might be allowed or encouraged to change the URL (web address) "
u"to a website in your language. In this case, replace the \"en\" at "
u"the start of the address to your language code so that the address "
u"points to the corresponding article in your language about XML."),
u"For more information about XML, visit http://en.wikipedia.org/wiki/XML",
u""),
# Entry with context message.
(_(u"This translation contains an ambiguous word - it has two possible "
u"meanings. Make sure you can see the context information showing that "
u"this is a verb (an action as in \"click here to view it\")."),
u"View",
u"verb"),
# Entry with context message.
(_(u"This translation contains an ambiguous word - it has two possible "
u"meanings. Make sure you can see the context information showing that "
u"this is a noun (a thing as in \"click to change the view to full "
u"screen\"). If Virtaal gives your previous translation as a "
u"suggestion, take care to only use it if it is definitely appropriate "
u"in this case as well."),
u"View",
u"noun"),
(_(u"An accelerator key is a key on your keyboard that you can press to "
u"quickly access a menu or function. It is also called a hot key, "
u"access key or mnemonic. In program interfaces they are shown as an "
u"underlined letter in the text label. In the translatable text they "
u"are marked using some character like the underscore here, but other "
u"characters are used for this as well. In this case the the "
u"accelerator key is \"f\" since the underscore is before this letter "
u"and it means that this accelerator could be triggered by pressing "
u"Alt+F."),
u"_File",
u""),
(_(u"In this entry you can see other kind of accelerator."),
u"&File",
u""),
(_(u"And another kind of accelerator."),
u"~File",
u""),
# Entry with context message.
(_(u"Virtaal is able to provide suggestions from several terminology "
u"glossaries and provides easy shortcuts to allow paste them in the "
u"translation field. Right now Virtaal has only one empty terminology "
u"glossary, but you can start filling it. In order to do that select "
u"the original text, press Ctrl+T, provide a translation for your "
u"language, and save."),
u"Filter",
u"verb"),
# Entry with context message.
(_(u"In the previous entry you have created one terminology entry for the "
u"\"filter\" verb. Now do the same for \"filter\" noun."),
u"Filter",
u"noun"),
(_(u"If you have created any terminology in the previous entries you may "
u"now see some of the words with a green background (or other color "
u"depending on your theme). This means that Virtaal has terminology "
u"suggestions for that word. Use Alt+Right to select the highlighted "
u"word, and then press Alt+Down. If only one suggestion is provided "
u"then Alt+Down just copies the suggestion to the translation field. "
u"But if several suggestions are available Alt+Down shows a suggestion "
u"list which you can navigate using Down and Up keys. Once you have "
u"selected the desired suggestion press Enter to copy it to the "
u"translation field."),
u"Filter the list by date using the \"filter by date\" filter.",
u""),
(_(u"This message has two lines. Make sure that your translation also "
u"contains two lines. You can separate lines with Shift+Enter or copy "
u"newline placeables (displayed as ¶)."),
(u"A camera has been connected to your computer.\nNo photos were found "
u"on the camera."),
u""),
(_(u"This message contains tab characters to separate some headings. Make "
u"sure you separate your translations in the same way."),
u"Heading 1\tHeading 2\tHeading 3",
u""),
(_(u"This message contains a large number that is formatted according to "
u"the American convention. Translate this but make sure to format the "
u"number according to your language's convention. You might need to "
u"change the comma (,) and full stop (.) to other characters, and you "
u"also might need to use a different number system. Make sure that you "
u"understand the American formatting: this number is bigger than one "
u"thousand."),
u"It will take 1,234.56 hours to do",
u""),
(_(u"This message refers to miles. If the programmers encourage it, you "
u"might want to change this to kilometres in your translation, if "
u"kilometers are more commonly used in your language. Note that 1 mile "
u"is about 1.6 kilometres. Note that automated tests for \"numbers\" "
u"will complain if the number is changed, but in this case it is safe "
u"to do so."),
u"The road is 10 miles long",
u""),
(_(u"This message contains a link that the user will be able to click on "
u"to visit the help page. Make sure you correctly keep the information "
u"between the angle brackets (<...>). The double quotes (\") should "
u"never be changed in tags, even if your language uses a different "
u"type of quotation marks."),
(u"Feel free to visit our <a "
u"href=\"http://docs.translatehouse.org/projects/virtaal/en/latest/\">"
u"help page</a>"),
u""),
(_(u"This message contains a similar link, but the programmers decided to "
u"rather insert the tags by using variables so that translators can't "
u"change them. Make sure you position the two variables (%s) so that "
u"they correspond to the opening and closing tags of the previous "
u"translation."),
u"Feel free to visit our %shelp page%s",
u""),
(_(u"This message contains the <b> and </b> tags to emphasize a word, "
u"while everything is within the <p> and </p> tags. Make sure your "
u"whole translation is within the <p> and </p> tags."),
u"<p>Restart the program <b>now</b></p>",
u""),
(_(u"This message contains a similar link that is contained within <span> "
u"and </span>. Make sure you correctly keep all the tags (<...>), and "
u"that the link is completely contained within the <span> and </span> "
u"tags in your translation. Make sure that the text inside the "
u"\"a\" tags correspond to \"help page\" and that your translation "
u"corresponding to the second sentence is contained in the <span> "
u"tags. Note how the full stop is still inside the </span> tag."),
(u"The software has many features. <span class=\"info\">Feel free to "
u"to visit our <a "
u"href=\"http://docs.translatehouse.org/projects/virtaal/en/latest/\">"
u"help page</a>.</span>"),
u""),
)
# Tutorial filename at a temporary file in a random temporary directory.
filename = os.path.join(mkdtemp("", "tmp_virtaal_"), "virtaal_tutorial.pot")
tutorial_file = factory.getobject(filename)
for comment, source, context in tutorial_entries:
# The next creates an unit with the provided source (even if plural)
# and returns it. In case of plural, source should be a list of strings
# instead of a string.
unit = tutorial_file.addsourceunit(source)
if isinstance(source, list):
# Maybe unnecessary since when Virtaal opens the file and doesn't
# crash, even if it has only a msgstr for plural entries, and it
# shows the appropiate number of translation fields (for the target
# language).
unit.settarget([u"", u""])
unit.addnote(comment, "developer")
unit.setcontext(context)
tutorial_file.save()
# Return the filename to enable opening the file.
return filename
0
Example 100
Project: cgat Source File: psl2wiggle.py
def main(argv=sys.argv):
parser = E.OptionParser(
version="%prog version: $Id: psl2wiggle.py 2834 2009-11-24 16:11:23Z andreas $", usage=globals()["__doc__"])
parser.add_option("-g", "--genome-file", dest="genome_file", type="string",
help="filename with genome [default=%default].")
parser.add_option("-b", "--output-filename-pattern", dest="output_filename", type="string",
help="filename for output [default=%default]")
parser.add_option("-o", "--output-format", dest="output_format", type="choice",
choices=("bedgraph", "wiggle", "bigbed", "bigwig"),
help="output format [default=%default]")
parser.set_defaults(genome_file=None,
typecode=numpy.int16,
output_filename=None,
output_format="wiggle",
test=None)
(options, args) = E.Start(parser, add_pipe_options=True)
typecode = options.typecode
if options.genome_file:
fasta = IndexedFasta.IndexedFasta(options.genome_file)
counts = {}
contig_sizes = fasta.getContigSizes(with_synonyms=False)
E.info("allocating memory for %i contigs and %i bytes" %
(len(contig_sizes), sum(contig_sizes.values()) * typecode().itemsize))
for contig, size in list(contig_sizes.items()):
E.debug("allocating %s: %i bases" % (contig, size))
counts[contig] = numpy.zeros(size, typecode)
E.info("allocated memory for %i contigs" % len(fasta))
else:
fasta = None
contig_sizes = {}
if options.output_format in ("bigwig", "bigbed"):
if not options.genome_file:
raise ValueError(
"please supply genome file for bigwig/bigbed computation.")
if not options.output_filename:
raise ValueError(
"please output file for bigwig/bigbed computation.")
if options.output_format == "bigwig":
executable_name = "wigToBigWig"
elif options.output_format == "bigbed":
executable_name = "bedToBigBed"
else:
raise ValueError("unknown output format `%s`" %
options.output_format)
executable = IOTools.which(executable_name)
if not executable:
raise OSError("could not find %s in path." % executable_name)
tmpdir = tempfile.mkdtemp()
E.debug("temporary files are in %s" % tmpdir)
tmpfile_wig = os.path.join(tmpdir, "wig")
tmpfile_sizes = os.path.join(tmpdir, "sizes")
# write contig sizes
outfile_size = IOTools.openFile(tmpfile_sizes, "w")
for contig, size in list(contig_sizes.items()):
outfile_size.write("%s\t%s\n" % (contig, size))
outfile_size.close()
outfile = IOTools.openFile(tmpfile_wig, "w")
else:
outfile = options.stdout
iterator = Blat.BlatIterator(sys.stdin)
ninput, ncontigs, nskipped = 0, 0, 0
E.info("started counting")
while 1:
if options.test and ninput >= options.test:
break
match = next(iterator)
if match is None:
break
ninput += 1
contig = match.mSbjctId
for start, length in zip(match.mSbjctBlockStarts, match.mBlockSizes):
counts[contig][start:start + length] += 1
E.info("finished counting")
if options.output_format in ("wig", "bigwig"):
E.info("starting wig output")
for contig, vals in list(counts.items()):
E.debug("output for %s" % contig)
for val, iter in itertools.groupby(enumerate(vals), lambda x: x[1]):
l = list(iter)
start, end = l[0][0], l[-1][0]
val = vals[start]
if val > 0:
outfile.write("variableStep chrom=%s span=%i\n" %
(contig, end - start + 1))
outfile.write("%i\t%i\n" % (start, val))
ncontigs += 1
elif options.output_format in ("bedgraph", "bigbed"):
E.info("starting bedgraph output")
for contig, vals in list(counts.items()):
E.debug("output for %s" % contig)
for val, iter in itertools.groupby(enumerate(vals), lambda x: x[1]):
l = list(iter)
start, end = l[0][0], l[-1][0]
val = vals[start]
if val > 0:
outfile.write("%s\t%i\t%i\t%i\n" %
(contig, start, end + 1, val))
ncontigs += 1
E.info("finished output")
if options.output_format in ("bigwig", "bigbed"):
outfile.close()
E.info("starting bigwig conversion")
try:
retcode = subprocess.call(" ".join((executable,
tmpfile_wig,
tmpfile_sizes,
os.path.abspath(options.output_filename)), ),
shell=True)
if retcode < 0:
warn("wigToBigWig terminated with signal: %i" % -retcode)
return -retcode
except OSError as msg:
warn("Error while executing bigwig: %s" % e)
return 1
shutil.rmtree(tmpdir)
E.info("finished bigwig conversion")
E.info("ninput=%i, ncontigs=%i, nskipped=%i\n" %
(ninput, ncontigs, nskipped))
E.Stop()