Here are the examples of the python api sys.path.append taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
154 Examples
0
Example 51
Project: mxnet-ssd Source File: train_net.py
def train_net(net, dataset, image_set, year, devkit_path, batch_size,
data_shape, mean_pixels, resume, finetune, pretrained, epoch, prefix,
ctx, begin_epoch, end_epoch, frequent, learning_rate,
momentum, weight_decay, val_set, val_year,
lr_refactor_epoch, lr_refactor_ratio,
iter_monitor=0, log_file=None):
"""
Wrapper for training module
Parameters:
---------
net : mx.Symbol
training network
dataset : str
pascal, imagenet...
image_set : str
train, trainval...
year : str
2007, 2012 or combinations splitted by comma
devkit_path : str
root directory of dataset
batch_size : int
training batch size
data_shape : int or (int, int)
resize image size
mean_pixels : tuple (float, float, float)
mean pixel values in (R, G, B)
resume : int
if > 0, will load trained epoch with name given by prefix
finetune : int
if > 0, will load trained epoch with name given by prefix, in this mode
all convolutional layers except the last(prediction layer) are fixed
pretrained : str
prefix of pretrained model name
epoch : int
epoch of pretrained model
prefix : str
prefix of new model
ctx : mx.gpu(?) or list of mx.gpu(?)
training context
begin_epoch : int
begin epoch, default should be 0
end_epoch : int
when to stop training
frequent : int
frequency to log out batch_end_callback
learning_rate : float
learning rate, will be divided by batch_size automatically
momentum : float
(0, 1), training momentum
weight_decay : float
decay weights regardless of gradient
val_set : str
similar to image_set, used for validation
val_year : str
similar to year, used for validation
lr_refactor_epoch : int
number of epoch to change learning rate
lr_refactor_ratio : float
new_lr = old_lr * lr_refactor_ratio
iter_monitor : int
if larger than 0, will print weights/gradients every iter_monitor iters
log_file : str
log to file if not None
Returns:
---------
None
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if log_file:
fh = logging.FileHandler(log_file)
logger.addHandler(fh)
# check args
if isinstance(data_shape, int):
data_shape = (data_shape, data_shape)
assert len(data_shape) == 2, "data_shape must be (h, w) tuple or list or int"
prefix += '_' + str(data_shape[0])
if isinstance(mean_pixels, (int, float)):
mean_pixels = [mean_pixels, mean_pixels, mean_pixels]
assert len(mean_pixels) == 3, "must provide all RGB mean values"
# load dataset
if dataset == 'pascal':
imdb = load_pascal(image_set, year, devkit_path, cfg.TRAIN.INIT_SHUFFLE)
if val_set and val_year:
val_imdb = load_pascal(val_set, val_year, devkit_path, False)
else:
val_imdb = None
else:
raise NotImplementedError, "Dataset " + dataset + " not supported"
# init data iterator
train_iter = DetIter(imdb, batch_size, data_shape, mean_pixels,
cfg.TRAIN.RAND_SAMPLERS, cfg.TRAIN.RAND_MIRROR,
cfg.TRAIN.EPOCH_SHUFFLE, cfg.TRAIN.RAND_SEED,
is_train=True)
# save per N epoch, avoid saving too frequently
resize_epoch = int(cfg.TRAIN.RESIZE_EPOCH)
if resize_epoch > 1:
batches_per_epoch = ((imdb.num_images - 1) / batch_size + 1) * resize_epoch
train_iter = mx.io.ResizeIter(train_iter, batches_per_epoch)
train_iter = mx.io.PrefetchingIter(train_iter)
if val_imdb:
val_iter = DetIter(val_imdb, batch_size, data_shape, mean_pixels,
cfg.VALID.RAND_SAMPLERS, cfg.VALID.RAND_MIRROR,
cfg.VALID.EPOCH_SHUFFLE, cfg.VALID.RAND_SEED,
is_train=True)
val_iter = mx.io.PrefetchingIter(val_iter)
else:
val_iter = None
# load symbol
sys.path.append(os.path.join(cfg.ROOT_DIR, 'symbol'))
net = importlib.import_module("symbol_" + net).get_symbol_train(imdb.num_classes)
# define layers with fixed weight/bias
fixed_param_names = [name for name in net.list_arguments() \
if name.startswith('conv1_') or name.startswith('conv2_')]
# load pretrained or resume from previous state
ctx_str = '('+ ','.join([str(c) for c in ctx]) + ')'
if resume > 0:
logger.info("Resume training with {} from epoch {}"
.format(ctx_str, resume))
_, args, auxs = mx.model.load_checkpoint(prefix, resume)
begin_epoch = resume
elif finetune > 0:
logger.info("Start finetuning with {} from epoch {}"
.format(ctx_str, finetune))
_, args, auxs = mx.model.load_checkpoint(prefix, finetune)
begin_epoch = finetune
# the prediction convolution layers name starts with relu, so it's fine
fixed_param_names = [name for name in net.list_arguments() \
if name.startswith('conv')]
elif pretrained:
logger.info("Start training with {} from pretrained model {}"
.format(ctx_str, pretrained))
_, args, auxs = mx.model.load_checkpoint(pretrained, epoch)
args = convert_pretrained(pretrained, args)
else:
logger.info("Experimental: start training from scratch with {}"
.format(ctx_str))
args = None
auxs = None
fixed_param_names = None
# helper information
if fixed_param_names:
logger.info("Freezed parameters: [" + ','.join(fixed_param_names) + ']')
# init training module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=fixed_param_names)
# fit
batch_end_callback = mx.callback.Speedometer(train_iter.batch_size, frequent=frequent)
epoch_end_callback = mx.callback.do_checkpoint(prefix)
iter_refactor = lr_refactor_epoch * imdb.num_images / train_iter.batch_size
lr_scheduler = mx.lr_scheduler.FactorScheduler(iter_refactor, lr_refactor_ratio)
optimizer_params={'learning_rate':learning_rate,
'momentum':momentum,
'wd':weight_decay,
'lr_scheduler':lr_scheduler,
'clip_gradient':None,
'rescale_grad': 1.0}
monitor = mx.mon.Monitor(iter_monitor, pattern=".*") if iter_monitor > 0 else None
mod.fit(train_iter,
eval_data=val_iter,
eval_metric=MultiBoxMetric(),
batch_end_callback=batch_end_callback,
epoch_end_callback=epoch_end_callback,
optimizer='sgd',
optimizer_params=optimizer_params,
begin_epoch=begin_epoch,
num_epoch=end_epoch,
initializer=CustomInitializer(factor_type="in", magnitude=1),
arg_params=args,
aux_params=auxs,
allow_missing=True,
monitor=monitor)
0
Example 52
Project: pymtl Source File: systemc.py
def __call__( self, *args, **kwargs ):
inst = super( SomeMeta, self ).__call__( *args, **kwargs )
# Get the full path of source folders based on the location of
# the python class and the relative path
inst._auto_init()
for i, x in enumerate(inst.sourcefolder):
if x.startswith("/"):
# already absolute path, pass
pass
else:
# relative path, do concatenation
x = os.path.dirname( inspect.getfile( inst.__class__ ) ) + os.sep + x
if not x.endswith(os.sep):
x += os.sep
inst.sourcefolder[i] = x
inst.vcd_file = '__dummy__'
inst.elaborate()
# Postpone port dict until elaboration.
if not inst._port_dict:
inst._port_dict = { port.name: port for port in inst.get_ports() }
# else:
# print(inst._port_dict)
sc_module_name = inst.__class__.__name__
model_name = inst.class_name
c_wrapper_file = model_name + '_sc.cpp'
py_wrapper_file = model_name + '_sc.py'
lib_file = 'lib{}_sc.so'.format( model_name )
obj_dir = 'obj_dir_' + model_name + os.sep
if not exists( obj_dir ):
os.mkdir( obj_dir )
include_dirs = deepcopy( inst.sourcefolder )
include_dirs.append( obj_dir )
# Copy all specified source file to obj folder for later compilation
# Also try to copy header files by inferring the file extension
# At the same time check caching status
#
# Check the combination of a path, a filename and a extension
# for both the header and the source. According to C++
# convention the header should have the same filename as the
# source file for the compiler to match.
#
# The reason why I split the source array and header array into
# two groups is for performance -- to hopefully reduce the number
# of disk inode lookup and disk accesses by breaking the loop
# when a header/source is found.
uncached = {}
src_ext = {}
tmp_objs = []
hashfile = obj_dir + "/.hashdict"
hashdict = {}
if exists( hashfile ):
with open( hashfile, "r" ) as f:
hashdict = json.load( f )
for path in inst.sourcefolder:
for filename in inst.sourcefile:
file_prefix = path + filename
temp_prefix = obj_dir + filename
for group in [ [".h", ".hh", ".hpp", ".h++" ], # header group
[".cc", ".cpp", ".c++", ".cxx"] ]:# source group
for ext in group:
target_file = file_prefix + ext
temp_obj = temp_prefix + ".o"
if not exists( target_file ):
# OK this is not the correct extension.
continue
tmp_objs.append( temp_obj )
if ext.startswith(".c"):
src_ext[temp_prefix] = ext
# July 11, 2016
# This piece of code copies all the files
# for caching/tracking purpose.
# Now I use SHA1 hash value to track the update of files,
# so I comment out these lines.
# temp_file = temp_prefix + ext
# 1. No .o file, then yeah it hasn't been cached.
# 2. No .c file, probably something unexpected happened.
# 3. See if the cached file is not up to date.
# if not exists( temp_obj ) or \
# not exists( temp_file ) or \
# not filecmp.cmp( temp_file, target_file ):
# if exists( temp_obj ):
# os.remove( temp_obj )
# copyfile( target_file, temp_file )
# uncached[temp_prefix] = target_file
# 1. No .o file
# 2. Not in the hash value dictionary
# 3. Hash value match?
def get_hash( filename ):
with open( filename, "r" ) as f:
return hashlib.sha1( f.read() ).hexdigest()
h = get_hash( target_file )
if not exists( temp_obj ) or \
target_file not in hashdict or \
h != hashdict[target_file]:
uncached[temp_prefix] = file_prefix
hashdict[target_file] = h
break
# This part is used to handle the missing of source file.
# Specifically, if the user specifies "foo" in s.sourcefile, but
# the above code is not able to find foo with every prefix in all
# folders in s.sourcefolder, we have to terminate the compilation.
unmatched = []
for x in inst.sourcefile:
matched = False
for y in src_ext:
if basename(y) == x:
matched = True
break
if not matched:
unmatched.append( "\""+ x + "\"" )
if unmatched:
raise SystemCSourceFileError( '\n'
'- Source file for [{}] not found.\n'
'- Please double check s.sourcefolder and s.sourcefile!'\
.format(", ".join( unmatched )) )
# Remake only if there're uncached files
if not uncached:
# print( "All Cached!")
pass
else:
# print( "Not Cached", uncached )
# Dump new hashdict
with open( hashfile, "w" ) as f:
json.dump( hashdict, f )
# Compile all uncached modules to .o object file
for obj, src in uncached.items():
compile_object( obj, src + src_ext[obj], include_dirs )
# Regenerate the shared library .so file if individual modules are
# updated or the .so file is missing.
if uncached or not exists( lib_file ):
# Use list for tmp_objs and all_objs to keep dependecies
# O(n^2) but maybe we could refine it later when we need to deal
# with thousands of files ...
all_objs = []
for o in tmp_objs:
if o not in all_objs:
all_objs.append(o)
systemc_to_pymtl( inst, # model instance
obj_dir, include_dirs, sc_module_name,
all_objs, c_wrapper_file, lib_file, # c wrapper
py_wrapper_file # py wrapper
)
# Follows are the same as Translation Tool
# Use some trickery to import the compiled version of the model
sys.path.append( os.getcwd() )
__import__( py_wrapper_file[:-3] )
imported_module = sys.modules[ py_wrapper_file[:-3] ]
# Get the model class from the module, instantiate and elaborate it
model_class = imported_module.__dict__[ model_name ]
new_inst = model_class()
new_inst.vcd_file = None
new_inst.__class__.__name__ = inst.__class__.__name__
new_inst.__class__.__bases__ = (SystemCModel,)
new_inst._args = inst._args
new_inst.modulename = inst.modulename
new_inst.sourcefile = inst.sourcefile
new_inst.sourcefolder = inst.sourcefolder
new_inst.sclinetrace = inst.sclinetrace
new_inst._param_dict = inst._param_dict
new_inst._port_dict = inst._port_dict
# TODO: THIS IS SUPER HACKY. FIXME
# This copies the user-defined line_trace method from the
# VerilogModel to the generated Python wrapper.
try:
new_inst.__class__.line_trace = inst.__class__.__dict__['line_trace']
# If we make it here this means the user has set Verilog line
# tracing to true, but has _also_ defined a PyMTL line tracing, but
# you can't have both.
if inst.sclinetrace:
raise SystemCImportError( "Cannot define a PyMTL line_trace\n"
"function and also use sclinetrace = True. Must use _either_\n"
"PyMTL line tracing or use Verilog line tracing." )
except KeyError:
pass
return new_inst
0
Example 53
def play_file(script, filename=None, args=None):
"""
Run an script.
:param script:
:param filename:
:param args:
"""
global runner
log.info('Running script %s' % script)
runner = Runner(script)
if filename is None:
fd = open(script)
data = fd.readline()[:-1] + fd.readline()[:-1]
# Check for run: lines in the doctests
# run: ....
pos = data.find('run:')
if pos != -1:
rest = data[pos + 5:]
# run: foo --arg
if ' ' in rest:
filename, args = rest.split(' ', 1)
args = [args]
# run: foo
else:
filename = rest
else:
if args is None:
args = []
mod_path = os.path.dirname(filename)
mod_name = os.path.basename(filename)[:-3]
# We are running __import__ instead of execfile so the module will be
# the file itself, not this one (kiwi.ui.test.runner).
try:
sys.path.append(mod_path)
__import__(mod_name, locals(), globals())
finally:
sys.path.remove(mod_path)
0
Example 54
def collect(self, device, ip, user, password):
"""
This function collects the metrics for one filer.
"""
sys.path.append(self.config['netappsdkpath'])
try:
import NaServer
except ImportError:
self.log.error("Unable to load NetApp SDK from %s" % (
self.config['netappsdkpath']))
return
# Set up the parameters
server = NaServer.NaServer(ip, 1, 3)
server.set_transport_type('HTTPS')
server.set_style('LOGIN')
server.set_admin_user(user, password)
# We're only able to query a single object at a time,
# so we'll loop over the objects.
for na_object in self.METRICS.keys():
# For easy reference later, generate a new dict for this object
LOCALMETRICS = {}
for metric in self.METRICS[na_object]:
metricname, prettyname, multiplier = metric
LOCALMETRICS[metricname] = {}
LOCALMETRICS[metricname]["prettyname"] = prettyname
LOCALMETRICS[metricname]["multiplier"] = multiplier
# Keep track of how long has passed since we checked last
CollectTime = time.time()
time_delta = None
if na_object in self.LastCollectTime.keys():
time_delta = CollectTime - self.LastCollectTime[na_object]
self.LastCollectTime[na_object] = CollectTime
self.log.debug("Collecting metric of object %s" % na_object)
query = NaServer.NaElement("perf-object-get-instances-iter-start")
query.child_add_string("objectname", na_object)
counters = NaServer.NaElement("counters")
for metric in LOCALMETRICS.keys():
counters.child_add_string("counter", metric)
query.child_add(counters)
res = server.invoke_elem(query)
if(res.results_status() == "failed"):
self.log.error("Connection to filer %s failed; %s" % (
device, res.results_reason()))
return
iter_tag = res.child_get_string("tag")
num_records = 1
max_records = 100
# For some metrics there are dependencies between metrics for
# a single object, so we'll need to collect all, so we can do
# calculations later.
raw = {}
while(num_records != 0):
query = NaServer.NaElement(
"perf-object-get-instances-iter-next")
query.child_add_string("tag", iter_tag)
query.child_add_string("maximum", max_records)
res = server.invoke_elem(query)
if(res.results_status() == "failed"):
print "Connection to filer %s failed; %s" % (
device, res.results_reason())
return
num_records = res.child_get_int("records")
if(num_records > 0):
instances_list = res.child_get("instances")
instances = instances_list.children_get()
for instance in instances:
raw_name = unicodedata.normalize(
'NFKD',
instance.child_get_string("name")).encode(
'ascii', 'ignore')
# Shorten the name for disks as they are very long and
# padded with zeroes, eg:
# 5000C500:3A236B0B:00000000:00000000:00000000:...
if na_object is "disk":
non_zero_blocks = [
block for block in raw_name.split(":")
if block != "00000000"
]
raw_name = "".join(non_zero_blocks)
instance_name = re.sub(r'\W', '_', raw_name)
counters_list = instance.child_get("counters")
counters = counters_list.children_get()
for counter in counters:
metricname = unicodedata.normalize(
'NFKD',
counter.child_get_string("name")).encode(
'ascii', 'ignore')
metricvalue = counter.child_get_string("value")
# We'll need a long complete pathname to not
# confuse self.derivative
pathname = ".".join([self.config["path_prefix"],
device, na_object,
instance_name, metricname])
raw[pathname] = int(metricvalue)
# Do the math
self.log.debug("Processing %i metrics for object %s" % (len(raw),
na_object))
# Since the derivative function both returns the derivative
# and saves a new point, we'll need to store all derivatives
# for local reference.
derivative = {}
for key in raw.keys():
derivative[key] = self.derivative(key, raw[key])
for key in raw.keys():
metricname = key.split(".")[-1]
prettyname = LOCALMETRICS[metricname]["prettyname"]
multiplier = LOCALMETRICS[metricname]["multiplier"]
if metricname in self.DROPMETRICS:
continue
elif metricname in self.DIVIDERS.keys():
self._gen_delta_depend(key, derivative, multiplier,
prettyname, device)
else:
self._gen_delta_per_sec(key, derivative[key], time_delta,
multiplier, prettyname, device)
0
Example 55
Project: sugar-toolkit-gtk3 Source File: bundlebuilder.py
def cmd_check(config, options):
"""Run tests for the activity"""
run_unit_test = True
run_integration_test = True
if options.choice == 'unit':
run_integration_test = False
if options.choice == 'integration':
run_unit_test = False
print "Running Tests"
test_path = os.path.join(config.source_dir, "tests")
if os.path.isdir(test_path):
unit_test_path = os.path.join(test_path, "unit")
integration_test_path = os.path.join(test_path, "integration")
sys.path.append(config.source_dir)
# Run Tests
if os.path.isdir(unit_test_path) and run_unit_test:
all_tests = unittest.defaultTestLoader.discover(unit_test_path)
unittest.TextTestRunner(verbosity=options.verbose).run(all_tests)
elif not run_unit_test:
print "Not running unit tests"
else:
print 'No "unit" directory found.'
if os.path.isdir(integration_test_path) and run_integration_test:
all_tests = unittest.defaultTestLoader.discover(
integration_test_path)
unittest.TextTestRunner(verbosity=options.verbose).run(all_tests)
elif not run_integration_test:
print "Not running integration tests"
else:
print 'No "integration" directory found.'
print "Finished testing"
else:
print "Error: No tests/ directory"
0
Example 56
Project: OpenSesame Source File: docstruct.py
def docpkg(folder, lvl=2):
"""
Docuements a package.
Arguments:
path -- The path to the package.
Keyword arguments:
lvl -- The depth in the hierarchy. (default=2)
Returns:
A full docuementation string.
"""
md = u''
path = os.path.join(folder, u'__init__.py')
name = os.path.basename(folder)
full_name = folder.replace(u'/', '.')
header = u'#' * lvl
src = u'https://github.com/smathot/OpenSesame/blob/master/%s' % path
if not os.path.exists(path) or not ingit(path):
return md
sys.path.append(os.path.abspath(folder))
pkg = imp.load_source(u'dummy', path)
sys.path.pop()
doc = docstr(pkg)
contains = objcontains(pkg)
md += obj_doc % {u'header' : header, u'name' : name, u'full_name' : \
full_name, u'doc' : doc, u'src': src, u'type' : u'package', \
u'contains' : contains}
# Docuement modules
for fname in sorted(os.listdir(folder)):
path = os.path.join(folder, fname)
if path.endswith(u'.py') and fname != u'__init__.py' and ingit(path):
md += docmod(path, lvl+1)
# Docuement packages
for fname in sorted(os.listdir(folder)):
path = os.path.join(folder, fname)
if os.path.isdir(path):
md += docpkg(path, lvl+1)
return md
0
Example 57
Project: tensor Source File: service.py
def __init__(self, config):
self.running = 0
self.sources = []
self.lastEvents = {}
self.outputs = {}
self.evCache = {}
self.critical = {}
self.warn = {}
self.eventCounter = 0
self.factory = None
self.protocol = None
self.watchdog = None
self.config = config
both = lambda i1, i2, t: isinstance(i1, t) and isinstance(i2, t)
if os.path.exists('/var/lib/tensor'):
sys.path.append('/var/lib/tensor')
if 'include_path' in config:
ipath = config['include_path']
if os.path.exists(ipath):
files = [
os.path.join(ipath, f) for f in os.listdir(ipath)
if f[-4:] == '.yml'
]
for f in files:
conf = yaml.load(open(f, 'rt'))
for k,v in conf.items():
if k in self.config:
if both(v, self.config[k], dict):
# Merge dicts
for k2, v2 in v.items():
self.config[k][j2] = v2
elif both(v, self.config[k], list):
# Extend lists
self.config[k].extend(v)
else:
# Overwrite
self.config[k] = v
else:
self.config[k] = v
log.msg('Loadded additional configuration from %s' % f)
else:
log.msg('Config Error: include_path %s does not exist' % ipath)
# Read some config stuff
self.debug = float(self.config.get('debug', False))
self.ttl = float(self.config.get('ttl', 60.0))
self.stagger = float(self.config.get('stagger', 0.2))
# Backward compatibility
self.server = self.config.get('server', 'localhost')
self.port = int(self.config.get('port', 5555))
self.proto = self.config.get('proto', 'tcp')
self.inter = self.config.get('interval', 60.0)
if self.debug:
print "config:", repr(config)
self.setupSources(self.config)
0
Example 58
Project: yum Source File: yum-leak-test.py
def _leak_tst_ir():
print "Doing install/remove leak test. "
def _init():
yb = cli.YumBaseCli() # Need doTransaction() etc.
yb.preconf.debuglevel = 0
yb.preconf.errorlevel = 0
yb.repos.setCacheDir(yum.misc.getCacheDir())
yb.conf.assumeyes = True
yb.conf.downloadonly = False
return yb
sys.path.append('/usr/share/yum-cli')
import cli
yb = _init()
out_mem(os.getpid())
def _run(yb):
print " Run"
(code, msgs) = yb.buildTransaction()
if code == 1:
print "ERROR:", core, msgs
sys.exit(1)
returnval = yb.doTransaction()
if returnval != 0: # We could allow 1 too, but meh.
print "ERROR:", returnval
sys.exit(1)
yb.closeRpmDB()
last = None
while True:
if True:
yb = _init()
out_mem(os.getpid())
print " Install:", sys.argv[1:]
for pat in sys.argv[1:]:
yb.install(pattern=pat)
out_mem(os.getpid())
_run(yb)
out_mem(os.getpid())
print " Remove:", sys.argv[1:]
for pat in sys.argv[1:]:
yb.remove(pattern=pat)
out_mem(os.getpid())
_run(yb)
0
Example 59
Project: aetros-cli Source File: JobModel.py
def network_get_datasets(self, trainer):
datasets_dir = self.get_dataset_dir()
datasets = {}
from aetros.utils import get_option
from .auto_dataset import get_images, read_images_keras_generator, read_images_in_memory
# load placeholder, auto data
config = self.job['config']
for net in config['layer'][0]:
if 'datasetId' in net and net['datasetId']:
dataset = config['datasets'][net['datasetId']]
if not dataset:
raise Exception('Dataset of id %s does not exists. Available %s' % (net['datasetId'], ','.join(list(config['datasets'].keys()))))
if dataset['type'] == 'images_upload' or dataset['type'] == 'images_search':
connected_to_net = self.get_connected_network(config['layer'], net)
if connected_to_net is None:
# this input is not in use, so we dont need to calculate its dataset
continue
datasets[net['datasetId']] = get_images(self, dataset, net, trainer)
elif dataset['type'] == 'images_local':
all_memory = get_option(dataset['config'], 'allMemory', False, 'bool')
if all_memory:
datasets[net['datasetId']] = read_images_in_memory(self, dataset, net, trainer)
else:
datasets[net['datasetId']] = read_images_keras_generator(self, dataset, net, trainer)
elif dataset['type'] == 'python':
name = dataset['id'].replace('/', '__')
sys.path.append(datasets_dir)
data_provider = __import__(name, '')
print("Imported dataset provider in %s " % (datasets_dir + '/' + name + '.py', ))
sys.path.pop()
datasets[dataset['id']] = data_provider.get_data()
return datasets
0
Example 60
def preprocess(self):
# files in directory
ignore = ['__init__.py']
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
sys.path.append(dname)
for afile in os.listdir(dname + "/preprocessor"):
if afile in ignore:
continue
if ".pyc" in afile:
continue
if len(afile.split(".")) > 2:
print "!" * 50
print "\t[!] Make sure there are no '.' in your preprocessor filename:", afile
print "!" * 50
return False
name = "preprocessor." + afile.strip(".py")
preprocessor_name = __import__( name, fromlist=[''])
if preprocessor_name.enabled is True:
print "[*] Executing preprocessor:", afile.strip(".py")
else:
continue
if preprocessor_name.file_format.lower() in ['elf', 'all']: #'elf', 'macho', 'mach-o']:
print '[*] Running preprocessor', afile.strip(".py"), "against", preprocessor_name.file_format, "formats"
else:
continue
# Allow if any processors to keep it
if self.keep_temp is False:
self.keep_temp = preprocessor_name.keep_temp
# create tempfile here always
if self.tmp_file == None:
self.tmp_file = tempfile.NamedTemporaryFile()
self.tmp_file.write(open(self.FILE, 'rb').read())
self.tmp_file.seek(0)
print "[*] Creating temp file:", self.tmp_file.name
else:
print "[*] Using existing tempfile from prior preprocessor"
load_name = name + ".preprocessor"
preproc = self.loadthis(load_name)
m = preproc(self)
print "=" * 50
# execute preprocessor
result = m.run()
if result is False:
print "[!] Preprocessor Failure :("
print "=" * 50
# After running push it to BDF.
self.FILE = self.tmp_file.name[:]
# check for support after each modification
if preprocessor_name.recheck_support is True:
issupported = self.support_check()
if issupported is False:
print self.FILE, "is not supported."
return False
0
Example 61
Project: Omnik-Data-Logger Source File: OmnikExport.py
def run(self):
"""Get information from inverter and store is configured outputs."""
self.build_logger(self.config)
# Load output plugins
# Prepare path for plugin loading
sys.path.append(self.__expand_path('outputs'))
Plugin.config = self.config
Plugin.logger = self.logger
enabled_plugins = self.config.get('general', 'enabled_plugins')\
.split(',')
for plugin_name in enabled_plugins:
plugin_name = plugin_name.strip()
self.logger.debug('Importing output plugin ' + plugin_name)
__import__(plugin_name)
# Connect to inverter
ip = self.config.get('inverter', 'ip')
port = self.config.get('inverter', 'port')
for res in socket.getaddrinfo(ip, port, socket.AF_INET,
socket.SOCK_STREAM):
family, socktype, proto, canonname, sockadress = res
try:
self.logger.info('connecting to {0} port {1}'.format(ip, port))
inverter_socket = socket.socket(family, socktype, proto)
inverter_socket.settimeout(10)
inverter_socket.connect(sockadress)
except socket.error as msg:
self.logger.error('Could not open socket')
self.logger.error(msg)
sys.exit(1)
wifi_serial = self.config.getint('inverter', 'wifi_sn')
inverter_socket.sendall(OmnikExport.generate_string(wifi_serial))
data = inverter_socket.recv(1024)
inverter_socket.close()
msg = InverterMsg.InverterMsg(data)
self.logger.info("ID: {0}".format(msg.id))
for plugin in Plugin.plugins:
self.logger.debug('Run plugin' + plugin.__class__.__name__)
plugin.process_message(msg)
0
Example 62
Project: TrustRouter Source File: scriptutils.py
def RunScript(defName=None, defArgs=None, bShowDialog = 1, debuggingType=None):
global lastScript, lastArgs, lastDebuggingType
_debugger_stop_frame_ = 1 # Magic variable so the debugger will hide me!
# Get the debugger - may be None!
debugger = GetDebugger()
if defName is None:
try:
pathName = GetActiveFileName()
except KeyboardInterrupt:
return # User cancelled save.
else:
pathName = defName
if not pathName:
pathName = lastScript
if defArgs is None:
args = ''
if pathName==lastScript:
args = lastArgs
else:
args = defArgs
if debuggingType is None: debuggingType = lastDebuggingType
if not pathName or bShowDialog:
dlg = DlgRunScript(debugger is not None)
dlg['script'] = pathName
dlg['args'] = args
dlg['debuggingType'] = debuggingType
if dlg.DoModal() != win32con.IDOK:
return
script=dlg['script']
args=dlg['args']
debuggingType = dlg['debuggingType']
if not script: return
if debuggingType == RS_DEBUGGER_GO and debugger is not None:
# This may surprise users - they select "Run under debugger", but
# it appears not to! Only warn when they pick from the dialog!
# First - ensure the debugger is activated to pickup any break-points
# set in the editor.
try:
# Create the debugger, but _dont_ init the debugger GUI.
rd = debugger._GetCurrentDebugger()
except AttributeError:
rd = None
if rd is not None and len(rd.breaks)==0:
msg = "There are no active break-points.\r\n\r\nSelecting this debug option without any\r\nbreak-points is unlikely to have the desired effect\r\nas the debugger is unlikely to be invoked..\r\n\r\nWould you like to step-through in the debugger instead?"
rc = win32ui.MessageBox(msg, win32ui.LoadString(win32ui.IDR_DEBUGGER), win32con.MB_YESNOCANCEL | win32con.MB_ICONINFORMATION)
if rc == win32con.IDCANCEL:
return
if rc == win32con.IDYES:
debuggingType = RS_DEBUGGER_STEP
lastDebuggingType = debuggingType
lastScript = script
lastArgs = args
else:
script = pathName
# try and open the script.
if len(os.path.splitext(script)[1])==0: # check if no extension supplied, and give one.
script = script + '.py'
# If no path specified, try and locate the file
path, fnameonly = os.path.split(script)
if len(path)==0:
try:
os.stat(fnameonly) # See if it is OK as is...
script = fnameonly
except os.error:
fullScript = LocatePythonFile(script)
if fullScript is None:
win32ui.MessageBox("The file '%s' can not be located" % script )
return
script = fullScript
else:
path = win32ui.FullPath(path)
if not IsOnPythonPath(path): sys.path.append(path)
# py3k fun: If we use text mode to open the file, we get \r\n
# translated so Python allows the syntax (good!), but we get back
# text already decoded from the default encoding (bad!) and Python
# ignores any encoding decls (bad!). If we use binary mode we get
# the raw bytes and Python looks at the encoding (good!) but \r\n
# chars stay in place so Python throws a syntax error (bad!).
# So: so the binary thing and manually normalize \r\n.
try:
f = open(script, 'rb')
except IOError as exc:
win32ui.MessageBox("The file could not be opened - %s (%d)" % (exc.strerror, exc.errno))
return
# Get the source-code - as above, normalize \r\n
code = f.read().replace(byte_crlf, byte_lf).replace(byte_cr, byte_lf) + byte_lf
# Remember and hack sys.argv for the script.
oldArgv = sys.argv
sys.argv = ParseArgs(args)
sys.argv.insert(0, script)
# sys.path[0] is the path of the script
oldPath0 = sys.path[0]
newPath0 = os.path.split(script)[0]
if not oldPath0: # if sys.path[0] is empty
sys.path[0] = newPath0
insertedPath0 = 0
else:
sys.path.insert(0, newPath0)
insertedPath0 = 1
bWorked = 0
win32ui.DoWaitCursor(1)
base = os.path.split(script)[1]
# Allow windows to repaint before starting.
win32ui.PumpWaitingMessages()
win32ui.SetStatusText('Running script %s...' % base,1 )
exitCode = 0
from pywin.framework import interact
# Check the debugger flags
if debugger is None and (debuggingType != RS_DEBUGGER_NONE):
win32ui.MessageBox("No debugger is installed. Debugging options have been ignored!")
debuggingType = RS_DEBUGGER_NONE
# Get a code object - ignore the debugger for this, as it is probably a syntax error
# at this point
try:
codeObject = compile(code, script, "exec")
except:
# Almost certainly a syntax error!
_HandlePythonFailure("run script", script)
# No code object which to run/debug.
return
__main__.__file__=script
try:
if debuggingType == RS_DEBUGGER_STEP:
debugger.run(codeObject, __main__.__dict__, start_stepping=1)
elif debuggingType == RS_DEBUGGER_GO:
debugger.run(codeObject, __main__.__dict__, start_stepping=0)
else:
# Post mortem or no debugging
exec(codeObject, __main__.__dict__)
bWorked = 1
except bdb.BdbQuit:
# Dont print tracebacks when the debugger quit, but do print a message.
print("Debugging session cancelled.")
exitCode = 1
bWorked = 1
except SystemExit as code:
exitCode = code
bWorked = 1
except KeyboardInterrupt:
# Consider this successful, as we dont want the debugger.
# (but we do want a traceback!)
if interact.edit and interact.edit.currentView:
interact.edit.currentView.EnsureNoPrompt()
traceback.print_exc()
if interact.edit and interact.edit.currentView:
interact.edit.currentView.AppendToPrompt([])
bWorked = 1
except:
if interact.edit and interact.edit.currentView:
interact.edit.currentView.EnsureNoPrompt()
traceback.print_exc()
if interact.edit and interact.edit.currentView:
interact.edit.currentView.AppendToPrompt([])
if debuggingType == RS_DEBUGGER_PM:
debugger.pm()
del __main__.__file__
sys.argv = oldArgv
if insertedPath0:
del sys.path[0]
else:
sys.path[0] = oldPath0
f.close()
if bWorked:
win32ui.SetStatusText("Script '%s' returned exit code %s" %(script, exitCode))
else:
win32ui.SetStatusText('Exception raised while running script %s' % base)
try:
sys.stdout.flush()
except AttributeError:
pass
win32ui.DoWaitCursor(0)
0
Example 63
Project: deepcca Source File: utils.py
def load_vc(dataset='../gitlab/voice-conversion/src/test/data/clb_slt_MCEP24_static_span0.data'):
import sys
sys.path.append('../gitlab/voice-conversion/src')
import voice_conversion
import pickle
f=open(dataset,'r')
vcdata=pickle.load(f)
x=vcdata['aligned_data1'][:,:24]
y=vcdata['aligned_data2'][:,:24]
num = x.shape[0]
st_train = 0
en_train = int(num * (64.0/200.0))
st_valid = en_train
en_valid = en_train+int(num * (36.0/200.0))
st_test = en_valid
en_test = num
x_mean = x[st_train:en_train,:].mean(axis=0)
y_mean = y[st_train:en_train,:].mean(axis=0)
x_std = x[st_train:en_train,:].std(axis=0)
y_std = y[st_train:en_train,:].std(axis=0)
x -= x_mean
y -= y_mean
x /= x_std
y /= y_std
import theano
train_set_x = theano.shared(numpy.asarray(x[st_train:en_train,:],
dtype=theano.config.floatX),
borrow=True)
train_set_y = theano.shared(numpy.asarray(y[st_train:en_train,:],
dtype=theano.config.floatX),
borrow=True)
test_set_x = theano.shared(numpy.asarray(x[st_test:en_test,:],
dtype=theano.config.floatX),
borrow=True)
test_set_y = theano.shared(numpy.asarray(y[st_test:en_test,:],
dtype=theano.config.floatX),
borrow=True)
valid_set_x = theano.shared(numpy.asarray(x[st_valid:en_valid,:],
dtype=theano.config.floatX),
borrow=True)
valid_set_y = theano.shared(numpy.asarray(y[st_valid:en_valid,:],
dtype=theano.config.floatX),
borrow=True)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval, x_mean, y_mean, x_std, y_std
0
Example 64
Project: system-config-printer Source File: applet.py
@dbus.service.method(PDS_IFACE, in_signature='isssss', out_signature='')
def NewPrinter (self, status, name, mfg, mdl, des, cmd):
if name.find("/") >= 0:
# name is a URI, no queue was generated, because no suitable
# driver was found
title = _("Missing printer driver")
devid = "MFG:%s;MDL:%s;DES:%s;CMD:%s;" % (mfg, mdl, des, cmd)
if (mfg and mdl) or des:
if (mfg and mdl):
device = "%s %s" % (mfg, mdl)
else:
device = des
text = _("No printer driver for %s.") % device
else:
text = _("No driver for this printer.")
n = Notify.Notification.new (title, text, 'printer')
if "actions" in Notify.get_server_caps():
n.set_urgency (Notify.Urgency.CRITICAL)
n.set_timeout (Notify.EXPIRES_NEVER)
n.add_action ("setup-printer", _("Search"),
lambda x, y:
self.setup_printer (x, y, name, devid))
else:
self.setup_printer (None, None, name, devid)
else:
# name is the name of the queue which hal_lpadmin has set up
# automatically.
c = cups.Connection ()
try:
printer = c.getPrinters ()[name]
except KeyError:
return
try:
filename = c.getPPD (name)
except cups.IPPError:
return
del c
# Check for missing packages
cups.ppdSetConformance (cups.PPD_CONFORM_RELAXED)
ppd = cups.PPD (filename)
import os
os.unlink (filename)
import sys
sys.path.append (APPDIR)
import cupshelpers
(missing_pkgs,
missing_exes) = cupshelpers.missingPackagesAndExecutables (ppd)
from cupshelpers.ppds import ppdMakeModelSplit
(make, model) = ppdMakeModelSplit (printer['printer-make-and-model'])
driver = make + " " + model
if status < self.STATUS_GENERIC_DRIVER:
title = _("Printer added")
else:
title = _("Missing printer driver")
if len (missing_pkgs) > 0:
pkgs = reduce (lambda x,y: x + ", " + y, missing_pkgs)
title = _("Install printer driver")
text = (_("`%s' requires driver installation: %s.") %
(name, pkgs))
n = Notify.Notification.new (title, text, 'printer')
import installpackage
if "actions" in Notify.get_server_caps():
try:
self.packagekit = installpackage.PackageKit ()
n.set_timeout (Notify.EXPIRES_NEVER)
n.add_action ("install-driver", _("Install"),
lambda x, y:
self.install_driver (x, y,
missing_pkgs))
except:
pass
else:
try:
self.packagekit = installpackage.PackageKit ()
self.packagekit.InstallPackageName (0, 0,
missing_pkgs[0])
except:
pass
elif status == self.STATUS_SUCCESS:
devid = "MFG:%s;MDL:%s;DES:%s;CMD:%s;" % (mfg, mdl, des, cmd)
text = _("`%s' is ready for printing.") % name
n = Notify.Notification.new (title, text, 'printer')
if "actions" in Notify.get_server_caps():
n.set_urgency (Notify.Urgency.NORMAL)
n.add_action ("test-page", _("Print test page"),
lambda x, y:
self.print_test_page (x, y, name))
n.add_action ("configure", _("Configure"),
lambda x, y: self.configure (x, y, name))
else: # Model mismatch
devid = "MFG:%s;MDL:%s;DES:%s;CMD:%s;" % (mfg, mdl, des, cmd)
text = (_("`%s' has been added, using the `%s' driver.") %
(name, driver))
n = Notify.Notification.new (title, text, 'printer')
if "actions" in Notify.get_server_caps():
n.set_urgency (Notify.Urgency.CRITICAL)
n.add_action ("test-page", _("Print test page"),
lambda x, y:
self.print_test_page (x, y, name, devid))
n.add_action ("find-driver", _("Find driver"),
lambda x, y:
self.find_driver (x, y, name, devid))
n.set_timeout (Notify.EXPIRES_NEVER)
else:
self.configure (None, None, name)
self.timeout_ready ()
n.show ()
self.notification = n
0
Example 65
Project: AZOrange Source File: runTests.py
def test(self):
self.addLog("*Testing AZOrange")
#run the defined AfterInstall tests
os.chdir(self.testsDir)
sys.path.append(self.testsDir)
if not self.testScript:
self.addLog("#No test Script defined in setup.ini ")
self.testsOK = False
return
else:
self.addLog("#Running the tests in Test suite")
if not os.path.isfile("./"+self.testScript):
self.addLog("#ERROR: " + self.testScript + " not found in the tests directory.")
self.testsOK = False
return
status = commands.getstatusoutput("./" + self.testScript)
lines = status[1].split("\n")
lines.pop(0)
testsLogFileText = ""
testsLogDir = []
try:
for line in lines:
if "LOGFILE:" in line and line[-1]!=":":
testsLogFile = open(line.split(":")[1])
testsLogDir.append(os.path.split(line.split(":")[1])[0])
testsLogFileText += testsLogFile.read()
testsLogFile.close()
except:
testsLogFileText = "Tests log file not found!"
if testsLogFileText == "":
testsLogFileText = "No LOGFILE returned from test script " + self.testScript
for logDir in testsLogDir:
if os.path.isdir(logDir):
statusRM = commands.getstatusoutput("rm -rf "+logDir)
if statusRM[0] != 0:
self.addLog("#WARNING: Could nor remove dir %s : %s" % (logDir,statusRM[1]))
else:
self.addLog("#WARNING: Dir '%s' not found" % logDir)
self.addLog((status[0], status[1] + "\n" + testsLogFileText))
logStr = ""
for line in lines:
if "[34m" in line:
#line = filter(lambda x: x in string.printable, line)
#line = re.sub(r'[0-9]* %.',r'',re.sub(r'\[[0-9]*m',r'',line))
if "SKIPPED" in line:
self.testsSkipped = True
#if "== Report ==" in line:
# break
line = filter(lambda x: x in string.printable, line)
line = re.sub(r'[0-9]* %.',r'',re.sub(r'\[[0-9]*m',r'',line))
logStr+=line.strip()+"\n"
self.addLog("#"+logStr)
0
Example 66
Project: woven Source File: woven-admin.py
def start_distribution(project_name, template_dir, dist, noadmin):
"""
Custom startproject command to override django default
"""
directory = os.getcwd()
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % project_name)
#woven override
copy_helper('project', project_name, directory, dist, template_dir, noadmin)
#Create a random SECRET_KEY hash, and put it in the main settings.
main_settings_file = os.path.join(directory, dist, project_name, 'settings.py')
settings_contents = open(main_settings_file, 'r').read()
fp = open(main_settings_file, 'w')
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents)
fp.write(settings_contents)
fp.close()
#import settings and create start directories
sys.path.append(os.path.join(directory, dist))
s = import_module('.'.join([project_name,'settings']))
sys.path.pop()
if s.DATABASES['default']['ENGINE']=='django.db.backends.sqlite3':
if s.DATABASES['default']['NAME'] and not os.path.exists(s.DATABASES['default']['NAME']):
os.mkdir(os.path.dirname(s.DATABASES['default']['NAME']))
if s.STATIC_ROOT and os.path.isabs(s.STATIC_ROOT) and not os.path.exists(s.STATIC_ROOT):
os.mkdir(s.STATIC_ROOT)
if s.MEDIA_ROOT and os.path.isabs(s.MEDIA_ROOT) and not os.path.exists(s.MEDIA_ROOT):
os.mkdir(s.MEDIA_ROOT)
if s.TEMPLATE_DIRS:
for t in s.TEMPLATE_DIRS:
if not os.path.exists(t) and os.path.sep in t:
os.mkdir(t)
0
Example 67
Project: splunk-sdk-python Source File: test_examples.py
def test_analytics(self):
# We have to add the current path to the PYTHONPATH,
# otherwise the import doesn't work quite right
sys.path.append(os.getcwd())
import analytics
# Create a tracker
tracker = analytics.input.AnalyticsTracker(
"sdk-test", self.opts.kwargs, index = "sdk-test")
service = client.connect(**self.opts.kwargs)
# Before we start, we'll clean the index
index = service.indexes["sdk-test"]
index.clean()
tracker.track("test_event", distinct_id="abc123", foo="bar", abc="123")
tracker.track("test_event", distinct_id="123abc", abc="12345")
# Wait until the events get indexed
self.assertEventuallyTrue(lambda: index.refresh()['totalEventCount'] == '2', timeout=200)
# Now, we create a retriever to retrieve the events
retriever = analytics.output.AnalyticsRetriever(
"sdk-test", self.opts.kwargs, index = "sdk-test")
# Assert applications
applications = retriever.applications()
self.assertEquals(len(applications), 1)
self.assertEquals(applications[0]["name"], "sdk-test")
self.assertEquals(applications[0]["count"], 2)
# Assert events
events = retriever.events()
self.assertEqual(len(events), 1)
self.assertEqual(events[0]["name"], "test_event")
self.assertEqual(events[0]["count"], 2)
# Assert properties
expected_properties = {
"abc": 2,
"foo": 1
}
properties = retriever.properties("test_event")
self.assertEqual(len(properties), len(expected_properties))
for prop in properties:
name = prop["name"]
count = prop["count"]
self.assertTrue(name in expected_properties.keys())
self.assertEqual(count, expected_properties[name])
# Assert property values
expected_property_values = {
"123": 1,
"12345": 1
}
values = retriever.property_values("test_event", "abc")
self.assertEqual(len(values), len(expected_property_values))
for value in values:
name = value["name"]
count = value["count"]
self.assertTrue(name in expected_property_values.keys())
self.assertEqual(count, expected_property_values[name])
# Assert event over time
over_time = retriever.events_over_time(
time_range = analytics.output.TimeRange.MONTH)
self.assertEquals(len(over_time), 1)
self.assertEquals(len(over_time["test_event"]), 1)
self.assertEquals(over_time["test_event"][0]["count"], 2)
# Now that we're done, we'll clean the index
index.clean()
0
Example 68
Project: Prism Source File: __init__.py
def _search_plugins(self, path, is_core):
""" Searches for plugins in a specified folder """
if is_core:
logging.info('Finding core plugins')
else:
logging.info('Finding additional plugins')
sys.path.append(path)
for plugin_id in os.listdir(path):
if not plugin_id.startswith('prism_'):
continue
base_folder = os.path.join(path, plugin_id)
if not os.path.isfile(base_folder):
if not os.path.exists(os.path.join(base_folder, 'plugin.json')):
logging.error('Plugin does not have a plugin.json file. Offender: %s' % plugin_id)
continue
plugin_info = JSONConfig(base_folder, 'plugin.json', auto_save=False)
plugin_info['_id'] = plugin_id
plugin_info['id'] = plugin_info['_id'].split('_', 1)[1]
plugin_info['_is_core'] = is_core
plugin_info['_is_satisfied'] = True
plugin_info['_is_enabled'] = False
# Make the version readable
version = None
for i in plugin_info['version']:
if isinstance(i, int):
if version is None:
version = str(i)
else:
version += '.' + str(i)
else:
version += '-' + i
plugin_info['_version'] = plugin_info['version']
plugin_info['version'] = version
plugin_info['_dependencies'] = list()
self.available_plugins[plugin_id] = plugin_info
0
Example 69
Project: pyspace Source File: launch.py
def main():
#### Find pySPACE package and import it ####
# Determine path of current file
path = os.path.realpath(__file__)
# Move up to parent directory that contains the pySPACE tree
suffix = []
for i in range(3):
path, tail = os.path.split(path)
suffix.append(tail)
parent_dir = path
# Check proper directory structure
if suffix != ['launch.py', 'run', 'pySPACE']:
raise RuntimeError, "Encountered incorrect directory structure. "\
"launch.py needs to reside in $PARENT_DIR/pySPACE/run"
# Workaround for eegserver crashing after 255 open ports
# - Now it crashes after 4096 open ports ;-)
import resource
(fd1, fd2) = resource.getrlimit(resource.RLIMIT_NOFILE)
fd1 = 4096 if fd2 == resource.RLIM_INFINITY else fd2-1
resource.setrlimit(resource.RLIMIT_NOFILE, (fd1,fd2))
# ------------------------------------------------------
#########################################
### Parsing of command line arguments
usage = "Usage: %prog [BACKEND_SPECIFICATION] [--config <conf.yaml>] "\
"[--operation <operation.yaml> | --operation_chain <operation_chain.yaml>] "\
"[--profile]"\
" where BACKEND_SPECIFICATION can be --serial, --mcore, --loadl or --mpi"
parser = LaunchParser(usage=usage, epilog=epilog)
# Configuration
parser.add_option("-c", "--configuration",
default="config.yaml",
help="Choose the configuration file, which is looked up in PYSPACE_CONF_DIR",
action="store")
# Backends
parser.add_option("-s", "--serial", action="store_true", default=False,
help="Enables execution on the SerialBackend (one local process)")
parser.add_option("-m", "--mcore", action="store_true", default=False,
help="Enables execution on the MulticoreBackend (one process per CPU core)")
parser.add_option("-l", "--local", action="store_true", default=False,
help="Enables execution on the MulticoreBackend (one process per CPU core)")
parser.add_option("-i", "--mpi", action="store_true", default=False,
help="Enables execution via MPI")
parser.add_option("-L", "--loadl", action="store_true", default=False,
help="Enables execution via LoadLeveler.")
# Operation / operation chain
parser.add_option("-o", "--operation",
help="Chooses the operation that will be executed. The "
"operation specification file is looked up in "
"$SPEC_DIR/operations",
action="store")
parser.add_option("-O", "-C", "--operation_chain",
help="Chooses the operation chain that will be executed. "
"The operation chain specification file is looked up "
"in $SPEC_DIR/operation_chains",
action="store")
# Profiling
parser.add_option("-p", "--profile",
help="Profiles execution.",
action="store_true", default=False,)
(options, args) = parser.parse_args()
# Load configuration file
pySPACE.load_configuration(options.configuration)
if hasattr(pySPACE.configuration, "eeg_acquisition_dir"):
eeg_parent_dir =\
os.sep.join(pySPACE.configuration.eeg_acquisition_dir.split(os.sep)[:-1])
if not hasattr(pySPACE.configuration, "eeg_acquisition_dir"):
pySPACE.configuration.eeg_module_path = eeg_parent_dir
else:
eeg_parent_dir, tail = os.path.split(parent_dir)
eeg_parent_dir = os.path.join(eeg_parent_dir, "eeg_modules")
pySPACE.configuration.eeg_module_path = eeg_parent_dir
sys.path.append(eeg_parent_dir)
# Create backend
if options.serial:
default_backend = create_backend("serial")
elif options.mcore or options.local:
default_backend = create_backend("mcore")
elif options.mpi:
default_backend = create_backend("mpi")
elif options.loadl:
default_backend = create_backend("loadl")
else: # Falling back to serial backend
default_backend = create_backend("serial")
print(" --> Using backend: \n\t\t %s."%str(default_backend))
if not options.operation is None:
# Create operation for the given name
operation = create_operation_from_file(options.operation)
# Store current source code for later inspection
create_source_archive(archive_path=operation.get_output_directory())
if not options.profile:
# Execute the current operation
run_operation(default_backend, operation)
else:
# Execute and profile operation
cProfile.runctx('pySPACE.run_operation(default_backend, operation)',
globals(), locals(),
filename = operation.get_output_directory()\
+ os.sep + "profile.pstat")
elif not options.operation_chain is None:
# Create operation chain for the given name
operation_chain = create_operation_chain(options.operation_chain)
# Store current source code for later inspection
create_source_archive(archive_path=operation_chain.get_output_directory())
if not options.profile:
# Execute the current operation_chain
run_operation_chain(default_backend, operation_chain)
else:
# Execute and profile operation
cProfile.runctx('pySPACE.run_operation_chain(default_backend, operation_chain)',
globals(), locals(),
filename=operation_chain.get_output_directory()\
+ os.sep + "profile.pstat")
else:
parser.error("Neither operation chain nor operation specification file given!")
logging.shutdown()
# Stop logger thread in backend
default_backend._stop_logging()
del default_backend
0
Example 70
Project: PipelineConstructionSet Source File: sysGlobalMenu.py
def createTool(self):
moBuLogger.info('----------------------------------------')
moBuLogger.info('| Running Art Monkey v:%s |' % _VERSION)
moBuLogger.info('| Build #:%s |' % self.buildNumber)
moBuLogger.info('----------------------------------------')
pcsGlobalToolName = "Art Monkey"
tool = FBCreateUniqueTool(pcsGlobalToolName)
tool.StartSizeX = 610
tool.StartSizeY = 185
# Layout for the controls
x = FBAddRegionParam(5, FBAttachType.kFBAttachLeft, "")
y = FBAddRegionParam(0, FBAttachType.kFBAttachNone, "")
w = FBAddRegionParam(50, FBAttachType.kFBAttachRight, "")
h = FBAddRegionParam(85, FBAttachType.kFBAttachNone, "")
tool.AddRegion("main", "main", x, y, w, h)
# Add grid layout
grid = FBGridLayout()
tool.SetControl("main", grid)
col = 0
logo = FBButton()
logo.SetImageFileNames('%s/logo_small.jpg' % self.pcsImagePath)
logo.Caption = "yeah"
logo.OnClick.Add(self.openToolDocs)
grid.AddRange(logo, 0, 1, col, col)
grid.SetColWidth( col, 70 )
v = FBLabel()
v.Caption = 'v:%s' % _VERSION
v.Style = FBTextStyle.kFBTextStyleItalic
v.Justify = FBTextJustify.kFBTextJustifyCenter
grid.Add(v, 2, col)
col +=1
# create load button
loadB = FBButton()
loadB.Caption = "Load"
loadB.Look = FBButtonLook.kFBLookColorChange
loadB.OnClick.Add(self.KToolsCallback)
# create save button
saveB = FBButton()
saveB.Caption = "Save"
saveB.Look = FBButtonLook.kFBLookColorChange
saveB.OnClick.Add(self.KToolsCallback)
# create saveAs button
saveAsB = FBButton()
saveAsB.Caption = "SaveAs"
saveAsB.Look = FBButtonLook.kFBLookColorChange
saveAsB.OnClick.Add(self.KToolsCallback)
# add buttons to row 1,2,3 column 1
grid.Add(loadB, 0, col)
grid.Add(saveB, 1, col)
grid.Add(saveAsB, 2, col)
# want to fix the width of column 1 and 2 so the buttons are of a normal size
grid.SetColWidth( col, 40 )
col +=1
# create tab
tab = FBTabControl()
# we want the tab to span from row 0 to row 3 and from column 2 to column 2
grid.AddRange(tab, 0, 3, col, col)
# set the spacing between col0 and col1
grid.SetColSpacing(col, 20)
# now assign the rows and columns attributes
# Fixed the height of row 0 and row 2 so the label and the buttons have a normal height
grid.SetRowHeight(0, 20)
grid.SetRowHeight(1, 20)
grid.SetRowHeight(2, 20)
grid.SetRowHeight(3, 70)
# 1. Build a construction dictionary with {folder=[files]}
self.menuDict, ext = self.getMenuDic()
# 1.5 add top menu path for Load, Save, SaveAs
# menuTop = self.menuDict.keys()[0]
if not Path.modulePath(self.mobuMenuPath):
sys.path.append(self.mobuMenuPath)
# 2. Sorted Keys list
sKeys = []
for dr in self.menuDict.iterkeys():
sKeys.append(dr)
sKeys.sort()
for menu in sKeys:
menuName = str(Path(menu).basename())
# skip root and 'old'
if not menuName == 'old' and not menuName == 'menu':
tabLayout = FBGridLayout()
# lyt.default_space = 5
tabLayout.SetRegionTitle("My Title", "Title")
x = FBAddRegionParam(10, FBAttachType.kFBAttachLeft, "")
y = FBAddRegionParam(20, FBAttachType.kFBAttachTop, "")
w = FBAddRegionParam(140, FBAttachType.kFBAttachRight, "")
h = FBAddRegionParam(75, FBAttachType.kFBAttachBottom, "")
tabLayout.AddRegion(menuName, menuName, x, y, w, h)
# must add dir to sys.path so imp can find it
if not Path.modulePath(menu):
sys.path.append(menu)
row = 0
column = 0
colWidth = 0
colMax = {0:0, 1:0, 2:0, 3:0, 4:0}
for script in self.menuDict[menu]:
if not script == '__init__%s' % ext:
toolName = str(Path(script).namebase)
lTool = FBButton()
lTool.Caption = toolName
lTool.Justify = FBTextJustify.kFBTextJustifyCenter
# Make "Fix this!" red
if toolName == 'Fix this!':
lTool.Look = FBButtonLook.kFBLookColorChange
lTool.SetStateColor(FBButtonState.kFBButtonState0, mbCore.Red)
lTool.SetStateColor(FBButtonState.kFBButtonState1, mbCore.Red)
# this callBack is active and all buttons will run this "last" menuModule.run()
lTool.OnClick.Add(self.KToolsCallback)
tabLayout.Add(lTool, row, column)
# make column maximum of all rows
colWidth = len(toolName) * 7 + 2
if colWidth > colMax[column]:
colMax[column] = colWidth
tabLayout.SetColWidth(column, colMax[column])
tabLayout.SetRowHeight(row, 30)
# add button to tabbed sub-layout
tabLayout.Add(lTool, row, column)
# increment columns, rows
column += 1
if column > 3:
row += 1
column = 0
# add layouts to tabControl with name of dir
tab.Add(menuName, tabLayout)
# finish up tab
tab.SetContent(0)
tab.TabPanel.TabStyle = 0 # normal tabs
0
Example 71
Project: blog Source File: plot_error.py
def gen_data_plot(folder="weights", index=None, show_plot=True,
save_plot=None, save_paths=False, verbose=True):
files = sorted(glob.glob('%s/rnn*' % folder))
files = files[:index] if index is not None else files
# plot the values over time
vals = []
for ii, name in enumerate(files):
if verbose:
print(name)
name = name.split('err')[1]
name = name.split('.npz')[0]
vals.append(float(name))
vals = np.array(vals)
plt.figure(figsize=(10, 3))
ax = plt.subplot2grid((1, 3), (0, 0), colspan=2)
ax.loglog(vals)
ax.loglog(range(len(vals)), np.ones(len(vals)) * min(vals), 'r--')
ax.loglog(range(len(vals)), np.ones(len(vals)) * min(vals), 'r--')
plt.xlim([0, len(files)])
plt.ylim([10**-5, 10])
plt.title('AHF training error')
plt.xlabel('Training iterations')
plt.ylabel('Error')
plt.yscale('log')
# load in the weights and see how well they control the arm
dt = 1e-2
sig_len = 40
# HACK: append system path to have access to the arm code
# NOTE: Change this path to wherever your plant model is kept!
sys.path.append("../../../studywolf_control/studywolf_control/")
# from arms.two_link.arm_python import Arm as Arm
from arms.three_link.arm import Arm as Arm
if verbose:
print('Plant is: %s' % str(Arm))
arm = Arm(dt=dt)
from hessianfree import RNNet
from hessianfree.nonlinearities import (Tanh, Linear)
from train_hf_3link import PlantArm, gen_targets
rec_coeff = [1, 1]
rec_type = "sparse"
eps = 1e-6
num_states = arm.DOF * 2
targets = gen_targets(arm, sig_len=sig_len)
init_state = np.zeros((len(targets), num_states), dtype=np.float32)
init_state[:, :arm.DOF] = arm.init_q # set up the initial joint angles
plant = PlantArm(arm, targets=targets,
init_state=init_state, eps=eps)
index = -1 if index is None else index
W = np.load(files[index])['arr_0']
# make sure this network is the same as the one you trained!
net_size = 96
if '32' in folder:
net_size = 32
rnn = RNNet(shape=[num_states * 2,
net_size,
net_size,
num_states,
num_states],
layers=[Linear(), Tanh(), Tanh(), Linear(), plant],
debug=False,
rec_layers=[1, 2],
conns={0: [1, 2], 1: [2], 2: [3], 3: [4]},
W_rec_params={"coeff": rec_coeff, "init_type": rec_type},
load_weights=W,
use_GPU=False)
rnn.forward(plant, rnn.W)
states = np.asarray(plant.get_vecs()[0][:, :, num_states:])
targets = np.asarray(plant.get_vecs()[1])
def kin(q):
x = np.sum([arm.L[ii] * np.cos(np.sum(q[:, :ii+1], axis=1))
for ii in range(arm.DOF)], axis=0)
y = np.sum([arm.L[ii] * np.sin(np.sum(q[:, :ii+1], axis=1))
for ii in range(arm.DOF)], axis=0)
return x,y
ax = plt.subplot2grid((1, 3), (0, 2))
# plot start point
initx, inity = kin(init_state)
ax.plot(initx, inity, 'x', mew=10)
for jj in range(0, len(targets)):
# plot target
targetx, targety = kin(targets[jj])
ax.plot(targetx, targety, 'rx', mew=1)
# plat path
pathx, pathy = kin(states[jj, :, :])
path = np.hstack([pathx[:, None], pathy[:, None]])
if save_paths is True:
np.savez_compressed('end-effector position%.3i.npz' % int(jj/8),
array1=path)
ax.plot(path[:, 0], path[:, 1])
plt.tight_layout()
# plt.xlim([-.1, .1])
# plt.ylim([.25, .45])
plt.title('Hand trajectory')
plt.xlabel('x')
plt.ylabel('y')
if save_plot is not None:
plt.savefig(save_plot)
if show_plot is True:
plt.show()
plt.close()
0
Example 72
Project: blog Source File: train_hf.py
def test_plant():
"""Example of a network using a dynamic plant as the output layer."""
eps = 1e-6 # value to use for finite differences computations
dt = 1e-2 # size of time step
sig_len = 40 # how many time steps to train over
batch_size = 32 # how many updates to perform with static input
num_batches = 20000 # how many batches to run total
import sys
# NOTE: Change to wherever you keep your arm models
sys.path.append("../../../studywolf_control/studywolf_control/")
from arms.two_link.arm_python import Arm as Arm
print('Plant is: %s' % str(Arm))
arm = Arm(dt=dt)
num_states = arm.DOF * 2 # are states are [positions, velocities]
targets = gen_targets(arm=arm, sig_len=sig_len) # target joint angles
init_state = np.zeros((len(targets), num_states), dtype=np.float32)
init_state[:, :arm.DOF] = arm.init_q # set up the initial joint angles
plant = PlantArm(arm=arm, targets=targets,
init_state=init_state, eps=eps)
# open up weights folder and checked for saved weights
import glob
folder = 'weights'
files = sorted(glob.glob('%s/rnn*' % folder))
if len(files) > 0:
# if weights found, load them up and keep going from last trial
W = np.load(files[-1])['arr_0']
print('loading from %s' % files[-1])
last_trial = int(files[-1].split('%s/rnn_weights-trial' %
folder)[1].split('-err')[0])
print('last_trial: %i' % last_trial)
else:
# if no weights found, start fresh with new random seed
W = None
last_trial = -1
seed = np.random.randint(100000000)
print('seed : %i' % seed)
np.random.seed(seed)
# specify the network structure and loss functions
from hessianfree.loss_funcs import SquaredError, SparseL2
net_size = 32
rnn = RNNet(
# specify the number of nodes in each layer
shape=[num_states * 2,
net_size,
net_size,
num_states,
num_states],
# specify the function of the nodes in each layer
layers=[Linear(), Tanh(), Tanh(), Linear(), plant],
# specify the layers that have recurrent connections
rec_layers=[1, 2],
# specify the connections between layers
conns={0: [1, 2], 1: [2], 2: [3], 3: [4]},
# specify the loss function
loss_type=[
# squared error between plant output and targets
SquaredError()],
load_weights=W,
use_GPU=False)
# set up masking so that weights between network output
# and the plant aren't modified in learning, always = 1
offset, W_end, b_end = rnn.offsets[(3, 4)]
rnn.mask = np.zeros(rnn.W.shape, dtype=bool)
rnn.mask[offset:b_end] = True
rnn.W[offset:W_end] = np.eye(num_states, dtype=np.float32).flatten()
for ii in range(last_trial+1, num_batches):
print('=============================================')
print('training batch %i' % ii)
err = rnn.run_epochs(plant, None, max_epochs=batch_size,
optimizer=HessianFree(CG_iter=96,
init_damping=100))
# save the weights to file, track trial and error
err = rnn.best_error
name = '%s/rnn_weights-trial%04i-err%.5f' % (folder, ii, err)
np.savez_compressed(name, rnn.W)
print('=============================================')
print('network: %s' % name)
print('final error: %f' % err)
print('=============================================')
return rnn.best_error
0
Example 73
def collect(self, device, ip, user, password):
"""
This function collects the metrics for one filer.
"""
sys.path.append(self.config['netappsdkpath'])
try:
import NaServer
except ImportError:
self.log.error("Unable to load NetApp SDK from %s" % (
self.config['netappsdkpath']))
return
# Set up the parameters
server = NaServer.NaServer(ip, 1, 3)
server.set_transport_type('HTTPS')
server.set_style('LOGIN')
server.set_admin_user(user, password)
# We're only able to query a single object at a time,
# so we'll loop over the objects.
for na_object in self.METRICS.keys():
# For easy reference later, generate a new dict for this object
LOCALMETRICS = {}
for metric in self.METRICS[na_object]:
metricname, prettyname, multiplier = metric
LOCALMETRICS[metricname] = {}
LOCALMETRICS[metricname]["prettyname"] = prettyname
LOCALMETRICS[metricname]["multiplier"] = multiplier
# Keep track of how long has passed since we checked last
CollectTime = time.time()
time_delta = None
if na_object in self.LastCollectTime.keys():
time_delta = CollectTime - self.LastCollectTime[na_object]
self.LastCollectTime[na_object] = CollectTime
self.log.debug("Collecting metric of object %s" % na_object)
query = NaServer.NaElement("perf-object-get-instances-iter-start")
query.child_add_string("objectname", na_object)
counters = NaServer.NaElement("counters")
for metric in LOCALMETRICS.keys():
counters.child_add_string("counter", metric)
query.child_add(counters)
res = server.invoke_elem(query)
if(res.results_status() == "failed"):
self.log.error("Connection to filer %s failed; %s" % (
device, res.results_reason()))
return
iter_tag = res.child_get_string("tag")
num_records = 1
max_records = 100
# For some metrics there are dependencies between metrics for
# a single object, so we'll need to collect all, so we can do
# calculations later.
raw = {}
while(num_records != 0):
query = NaServer.NaElement(
"perf-object-get-instances-iter-next")
query.child_add_string("tag", iter_tag)
query.child_add_string("maximum", max_records)
res = server.invoke_elem(query)
if(res.results_status() == "failed"):
print "Connection to filer %s failed; %s" % (
device, res.results_reason())
return
num_records = res.child_get_int("records")
if(num_records > 0):
instances_list = res.child_get("instances")
instances = instances_list.children_get()
for instance in instances:
raw_name = unicodedata.normalize(
'NFKD',
instance.child_get_string("name")).encode(
'ascii', 'ignore')
# Shorten the name for disks as they are very long and
# padded with zeroes, eg:
# 5000C500:3A236B0B:00000000:00000000:00000000:...
if na_object is "disk":
non_zero_blocks = [
block for block in raw_name.split(":")
if block != "00000000"
]
raw_name = "".join(non_zero_blocks)
instance_name = re.sub(r'\W', '_', raw_name)
counters_list = instance.child_get("counters")
counters = counters_list.children_get()
for counter in counters:
metricname = unicodedata.normalize(
'NFKD',
counter.child_get_string("name")).encode(
'ascii', 'ignore')
metricvalue = counter.child_get_string("value")
# We'll need a long complete pathname to not
# confuse self.derivative
pathname = ".".join([self.config["path_prefix"],
device, na_object,
instance_name, metricname])
raw[pathname] = int(metricvalue)
# Do the math
self.log.debug("Processing %i metrics for object %s" % (len(raw),
na_object))
# Since the derivative function both returns the derivative
# and saves a new point, we'll need to store all derivatives
# for local reference.
derivative = {}
for key in raw.keys():
derivative[key] = self.derivative(key, raw[key])
for key in raw.keys():
metricname = key.split(".")[-1]
prettyname = LOCALMETRICS[metricname]["prettyname"]
multiplier = LOCALMETRICS[metricname]["multiplier"]
if metricname in self.DROPMETRICS:
continue
elif metricname in self.DIVIDERS.keys():
self._gen_delta_depend(key, derivative, multiplier,
prettyname, device)
else:
self._gen_delta_per_sec(key, derivative[key], time_delta,
multiplier, prettyname, device)
0
Example 74
Project: WAPT Source File: regsetup.py
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import os
import regutil, win32api,win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print corePaths
regutil.RegisterNamedPath(None, ';'.join(corePaths))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey() , regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = os.path.abspath( os.path.split(win32api.__file__)[0]) + ";" + \
os.path.abspath( os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path ) )[0] )
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if "64 bit" in sys.version:
check = os.path.join(check, "amd64")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild",check)
0
Example 75
def preprocess(self):
# files in directory
ignore = ['__init__.py']
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
sys.path.append(dname)
for afile in os.listdir(dname + "/preprocessor"):
if afile in ignore:
continue
if ".pyc" in afile:
continue
if len(afile.split(".")) > 2:
print "!" * 50
print "\t[!] Make sure there are no '.' in your preprocessor filename:", afile
print "!" * 50
return False
name = "preprocessor." + afile.strip(".py")
preprocessor_name = __import__( name, fromlist=[''])
if preprocessor_name.enabled is True:
print "[*] Executing preprocessor:", afile.strip(".py")
else:
continue
if preprocessor_name.file_format.lower() in ['macho', 'all']: #'elf', 'macho', 'mach-o']:
print '[*] Running preprocessor', afile.strip(".py"), "against", preprocessor_name.file_format, "formats"
else:
continue
# Allow if any processors to keep it
if self.keep_temp is False:
self.keep_temp = preprocessor_name.keep_temp
# create tempfile here always
if self.tmp_file == None:
self.tmp_file = tempfile.NamedTemporaryFile()
self.tmp_file.write(open(self.FILE, 'rb').read())
self.tmp_file.seek(0)
print "[*] Creating temp file:", self.tmp_file.name
else:
print "[*] Using existing tempfile from prior preprocessor"
load_name = name + ".preprocessor"
preproc = self.loadthis(load_name)
m = preproc(self)
print "=" * 50
# execute preprocessor
result = m.run()
if result is False:
print "[!] Preprocessor Failure :("
print "=" * 50
# After running push it to BDF.
self.FILE = self.tmp_file.name[:]
# check for support after each modification
if preprocessor_name.recheck_support is True:
issupported = self.support_check()
if issupported is False:
print self.FILE, "is not supported."
return False
0
Example 76
Project: shiji Source File: shiji_admin.py
def cmd_docs(docs, template):
"Generate API docuementation."
doc_tree = {"versions" : {}}
sys.path.append(curr_dir)
api_module = __import__(docs)
doc_tree["api_name"] = api_module.api_name
for version in api_module.api_versions.keys():
doc_tree["versions"][version] = {"calls" : {}, "error_codes" : {}}
for member in inspect.getmembers(api_module.api_versions[version][1].calls):
if inspect.isclass(member[1]):
if issubclass(member[1], urldispatch.URLMatchJSONResource):
if member[1].routes:
api_call = {}
for sub_member in dir(member[1]):
if sub_member[:7] == "render_":
method_doc = getattr(member[1], sub_member).__doc__
if method_doc and sub_member[7:] != "HEAD" and method_doc[:14] != "Our super-JSON":
api_call[sub_member[7:]] = process_api_desc(getattr(member[1], sub_member).__doc__)
if isinstance(member[1].routes, str):
doc_tree["versions"][version]["calls"][process_route(member[1].routes)] = api_call
elif isinstance(member[1].routes, list):
route_list = ""
for route in member[1].routes:
route_list = route_list + "," + process_route(route)
doc_tree["versions"][version]["calls"][route_list] = api_call
for member in (inspect.getmembers(api_module.api_versions[version][1].errors) + \
inspect.getmembers(webapi)):
if inspect.isclass(member[1]):
if issubclass(member[1], webapi.APIError) and not member[1].__name__=="APIError":
if not member[1].exception_class:
raise Exception("Error Generation API Docs: Error class %s is missing an exception_class." % member[1].__name__)
errcode_contents = { "error_code" : member[1].error_code,
"exception_text" : member[1].exception_text }
doc_tree["versions"][version]["error_codes"][member[1].exception_class] = errcode_contents
if template.lower() == "json":
return json.dumps(doc_tree)
else:
try:
tmpl_doc = template_lookup.get_template("doc_api_%s.html.mako" % template)
return tmpl_doc.render(versions=doc_tree["versions"], year=date.today().year)
except mako.exceptions.TopLevelLookupException:
print "Error. No docuementation template '%s'." % template
sys.exit(TEMPLATE_ERR)
0
Example 77
def main():
"""
allows to run worker without python web2py.py .... by simply python this.py
"""
parser = optparse.OptionParser()
parser.add_option(
"-w", "--worker_name", dest="worker_name", default=None,
help="start a worker with name")
parser.add_option(
"-b", "--heartbeat", dest="heartbeat", default=10,
type='int', help="heartbeat time in seconds (default 10)")
parser.add_option(
"-L", "--logger_level", dest="logger_level",
default=30,
type='int',
help="set debug output level (0-100, 0 means all, 100 means none;default is 30)")
parser.add_option("-E", "--empty-runs",
dest="max_empty_runs",
type='int',
default=0,
help="max loops with no grabbed tasks permitted (0 for never check)")
parser.add_option(
"-g", "--group_names", dest="group_names",
default='main',
help="comma separated list of groups to be picked by the worker")
parser.add_option(
"-f", "--db_folder", dest="db_folder",
default='/Users/mdipierro/web2py/applications/scheduler/databases',
help="location of the dal database folder")
parser.add_option(
"-u", "--db_uri", dest="db_uri",
default='sqlite://storage.sqlite',
help="database URI string (web2py DAL syntax)")
parser.add_option(
"-t", "--tasks", dest="tasks", default=None,
help="file containing task files, must define" +
"tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
parser.add_option(
"-U", "--utc-time", dest="utc_time", default=False,
help="work with UTC timestamps"
)
(options, args) = parser.parse_args()
if not options.tasks or not options.db_uri:
print USAGE
if options.tasks:
path, filename = os.path.split(options.tasks)
if filename.endswith('.py'):
filename = filename[:-3]
sys.path.append(path)
print 'importing tasks...'
tasks = __import__(filename, globals(), locals(), [], -1).tasks
print 'tasks found: ' + ', '.join(tasks.keys())
else:
tasks = {}
group_names = [x.strip() for x in options.group_names.split(',')]
logging.getLogger().setLevel(options.logger_level)
print 'groups for this worker: ' + ', '.join(group_names)
print 'connecting to database in folder: ' + options.db_folder or './'
print 'using URI: ' + options.db_uri
db = DAL(options.db_uri, folder=options.db_folder)
print 'instantiating scheduler...'
scheduler = Scheduler(db=db,
worker_name=options.worker_name,
tasks=tasks,
migrate=True,
group_names=group_names,
heartbeat=options.heartbeat,
max_empty_runs=options.max_empty_runs,
utc_time=options.utc_time)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
print 'starting main worker loop...'
scheduler.loop()
0
Example 78
Project: sd-agent Source File: plugins.py
def check(self, agentConfig):
self.logger.debug('getPlugins: start')
plugin_directory = agentConfig.get('plugin_directory', None)
if plugin_directory:
self.logger.info(
'getPlugins: plugin_directory %s', plugin_directory)
if not os.access(plugin_directory, os.R_OK):
self.logger.warning(
'getPlugins: Plugin path %s is set but not readable by ' +
'agent. Skipping plugins.', plugin_directory)
return False
else:
self.logger.debug('getPlugins: plugin_directory not set')
return False
# Have we already imported the plugins?
# Only load the plugins once
if self.plugins is None:
self.logger.debug(
'getPlugins: initial load from %s', plugin_directory)
sys.path.append(plugin_directory)
self.plugins = []
plugins = []
# Loop through all the plugin files
for root, dirs, files in os.walk(plugin_directory):
for name in files:
self.logger.debug('getPlugins: considering: %s', name)
name = name.split('.', 1)
# Only pull in .py files (ignores others, inc .pyc files)
try:
if name[1] == 'py':
self.logger.debug(
'getPlugins: ' + name[0] + '.' + name[1] +
' is a plugin')
plugins.append(name[0])
except IndexError:
continue
# Loop through all the found plugins, import them then create new
# objects
for plugin_name in plugins:
self.logger.debug('getPlugins: loading %s', plugin_name)
plugin_path = os.path.join(
plugin_directory, '%s.py' % plugin_name)
if not os.access(plugin_path, os.R_OK):
self.logger.error(
'getPlugins: Unable to read %s so skipping this '
'plugin.', plugin_path)
continue
try:
# Import the plugin, but only from the plugin directory
# (ensures no conflicts with other module names elsewhere
# in the sys.path
import imp
imported_plugin = imp.load_source(plugin_name, plugin_path)
self.logger.debug('getPlugins: imported %s', plugin_name)
# Find out the class name and then instantiate it
plugin_class = getattr(imported_plugin, plugin_name, None)
if plugin_class is None:
self.logger.info(
'getPlugins: Unable to locate class %s in %s, '
'skipping', plugin_name, plugin_path)
continue
try:
plugin_obj = plugin_class(
agentConfig, self.logger, self.raw_config)
except TypeError:
try:
plugin_obj = plugin_class(
agentConfig, self.logger)
except TypeError:
# Support older plugins.
plugin_obj = plugin_class()
self.logger.debug('getPlugins: instantiated %s', plugin_name)
# Store in class var so we can execute it again on the
# next cycle
self.plugins.append(plugin_obj)
except Exception:
self.logger.error(
'getPlugins (%s): exception = %s', plugin_name,
traceback.format_exc())
# Now execute the objects previously created
if self.plugins is not None:
self.logger.debug('getPlugins: executing plugins')
# Execute the plugins
output = {}
for plugin in self.plugins:
self.logger.info(
'getPlugins: executing %s', plugin.__class__.__name__)
try:
value = plugin.run()
if value:
output[plugin.__class__.__name__] = value
self.logger.debug(
'getPlugins: %s output: %s',
plugin.__class__.__name__,
output[plugin.__class__.__name__])
self.logger.info(
'getPlugins: executed %s',
plugin.__class__.__name__)
else:
self.logger.info(
'getPlugins: executed %s but returned no data',
plugin.__class__.__name__)
except Exception:
self.logger.error(
'getPlugins: exception = %s', traceback.format_exc())
self.logger.debug('getPlugins: returning')
# Each plugin should output a dictionary so we can convert it to
# JSON later
return output
else:
self.logger.debug('getPlugins: no plugins, returning false')
return False
0
Example 79
Project: minos Source File: process_exit_monitor.py
def handle_event(payload):
'''
Execute the post script when the monitored events happen
'''
pheaders, pdata = childutils.eventdata(payload+'\n')
name_list = pheaders['groupname'].split('--')
if len(name_list) == 3:
service, cluster, job = name_list
else:
return None
childutils.pcomm.stderr(childutils.get_asctime()+' Process %(processname)s '
'in group %(groupname)s exited from state %(from_state)s. '
'Now execute the post script.\n' % pheaders)
supervisor_config_path = '%s/../supervisord.conf' % os.path.dirname(__file__)
if not os.path.exists(supervisor_config_path):
childutils.pcomm.stderr('Cannot find the config file: supervisord.conf.\n')
parser = ConfigParser.SafeConfigParser()
parser.read([supervisor_config_path])
sys.path.append('%s/../deployment' % os.path.dirname(__file__))
from rpcinterface import DEFAULT_APP_ROOT
app_root = parser.get('rpcinterface:deployment', 'app_root', DEFAULT_APP_ROOT)
reg_expr = JOB_INSTANCES_REGEX.match(job)
job = reg_expr.group('job')
if reg_expr.group('instance_id'):
instance_id = reg_expr.group('instance_id')
service_root = '%s/%s/%s/%s/%s' % (app_root, service, cluster, job, instance_id)
else:
service_root = '%s/%s/%s/%s' % (app_root, service, cluster, job)
if not os.path.exists('%s/post.sh' % service_root):
childutils.pcomm.stderr('No post.sh for %s found.\n' % service)
return None
cmd = ['/bin/bash', '%s/post.sh' % service_root]
subprocess.call(cmd)
0
Example 80
def __init__(self, ):
try:
try:
import gui
except ImportError:
sys.path.append(GUI2PY_PATH)
# import controls (fill the registry!)
from gui import Window
# import tools used by the designer
from gui.tools.inspector import InspectorPanel
from gui.tools.propeditor import PropertyEditorPanel
from gui.tools.designer import BasicDesigner
from gui.tools.toolbox import ToolBox
# create the windows and the property editor / inspector
log = sys.stdout
self.propeditor = PropertyEditorPanel(self, log)
self.inspector = InspectorPanel(self, self.propeditor, log)
self._mgr.AddPane(self.propeditor, aui.AuiPaneInfo().Name("propeditor").
Caption("Property Editor").Float().FloatingSize(wx.Size(400, 100)).
FloatingPosition(self.GetStartPosition()).DestroyOnClose(False).PinButton(True).
MinSize((100, 100)).Right().Bottom().MinimizeButton(True))
self._mgr.AddPane(self.inspector, aui.AuiPaneInfo().Name("inspector").
Caption("inspector").Float().FloatingSize(wx.Size(400, 100)).
FloatingPosition(self.GetStartPosition()).DestroyOnClose(False).PinButton(True).
MinSize((100, 100)).Right().Bottom().MinimizeButton(True))
self.toolbox = ToolBox(self)
self._mgr.AddPane(self.toolbox, aui.AuiPaneInfo().Name("toolbox").
ToolbarPane().Left().Position(2))
except ImportError, e:
self.ShowInfoBar(u"cannot import gui2py!: %s" % unicode(e),
flags=wx.ICON_ERROR, key="gui2py")
0
Example 81
Project: pyspider Source File: run.py
@click.group(invoke_without_command=True)
@click.option('-c', '--config', callback=read_config, type=click.File('r'),
help='a json file with default values for subcommands. {"webui": {"port":5001}}')
@click.option('--logging-config', default=os.path.join(os.path.dirname(__file__), "logging.conf"),
help="logging config file for built-in python logging module", show_default=True)
@click.option('--debug', envvar='DEBUG', default=False, is_flag=True, help='debug mode')
@click.option('--queue-maxsize', envvar='QUEUE_MAXSIZE', default=100,
help='maxsize of queue')
@click.option('--taskdb', envvar='TASKDB', callback=connect_db,
help='database url for taskdb, default: sqlite')
@click.option('--projectdb', envvar='PROJECTDB', callback=connect_db,
help='database url for projectdb, default: sqlite')
@click.option('--resultdb', envvar='RESULTDB', callback=connect_db,
help='database url for resultdb, default: sqlite')
@click.option('--message-queue', envvar='AMQP_URL',
help='connection url to message queue, '
'default: builtin multiprocessing.Queue')
@click.option('--amqp-url', help='[deprecated] amqp url for rabbitmq. '
'please use --message-queue instead.')
@click.option('--beanstalk', envvar='BEANSTALK_HOST',
help='[deprecated] beanstalk config for beanstalk queue. '
'please use --message-queue instead.')
@click.option('--phantomjs-proxy', envvar='PHANTOMJS_PROXY', help="phantomjs proxy ip:port")
@click.option('--data-path', default='./data', help='data dir path')
@click.option('--add-sys-path/--not-add-sys-path', default=True, is_flag=True,
help='add current working directory to python lib search path')
@click.version_option(version=pyspider.__version__, prog_name=pyspider.__name__)
@click.pass_context
def cli(ctx, **kwargs):
"""
A powerful spider system in python.
"""
if kwargs['add_sys_path']:
sys.path.append(os.getcwd())
logging.config.fileConfig(kwargs['logging_config'])
# get db from env
for db in ('taskdb', 'projectdb', 'resultdb'):
if kwargs[db] is not None:
continue
if os.environ.get('MYSQL_NAME'):
kwargs[db] = utils.Get(lambda db=db: connect_database(
'sqlalchemy+mysql+%s://%s:%s/%s' % (
db, os.environ['MYSQL_PORT_3306_TCP_ADDR'],
os.environ['MYSQL_PORT_3306_TCP_PORT'], db)))
elif os.environ.get('MONGODB_NAME'):
kwargs[db] = utils.Get(lambda db=db: connect_database(
'mongodb+%s://%s:%s/%s' % (
db, os.environ['MONGODB_PORT_27017_TCP_ADDR'],
os.environ['MONGODB_PORT_27017_TCP_PORT'], db)))
elif ctx.invoked_subcommand == 'bench':
if kwargs['data_path'] == './data':
kwargs['data_path'] += '/bench'
shutil.rmtree(kwargs['data_path'], ignore_errors=True)
os.mkdir(kwargs['data_path'])
if db in ('taskdb', 'resultdb'):
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s://' % (db)))
else:
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s:///%s/%s.db' % (
db, kwargs['data_path'], db[:-2])))
else:
if not os.path.exists(kwargs['data_path']):
os.mkdir(kwargs['data_path'])
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s:///%s/%s.db' % (
db, kwargs['data_path'], db[:-2])))
kwargs['is_%s_default' % db] = True
# create folder for counter.dump
if not os.path.exists(kwargs['data_path']):
os.mkdir(kwargs['data_path'])
# message queue, compatible with old version
if kwargs.get('message_queue'):
pass
elif kwargs.get('amqp_url'):
kwargs['message_queue'] = kwargs['amqp_url']
elif os.environ.get('RABBITMQ_NAME'):
kwargs['message_queue'] = ("amqp://guest:guest@%(RABBITMQ_PORT_5672_TCP_ADDR)s"
":%(RABBITMQ_PORT_5672_TCP_PORT)s/%%2F" % os.environ)
elif kwargs.get('beanstalk'):
kwargs['message_queue'] = "beanstalk://%s/" % kwargs['beanstalk']
for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result'):
if kwargs.get('message_queue'):
kwargs[name] = utils.Get(lambda name=name: connect_message_queue(
name, kwargs.get('message_queue'), kwargs['queue_maxsize']))
else:
kwargs[name] = connect_message_queue(name, kwargs.get('message_queue'),
kwargs['queue_maxsize'])
# phantomjs-proxy
if kwargs.get('phantomjs_proxy'):
pass
elif os.environ.get('PHANTOMJS_NAME'):
kwargs['phantomjs_proxy'] = os.environ['PHANTOMJS_PORT_25555_TCP'][len('tcp://'):]
ctx.obj = utils.ObjectDict(ctx.obj or {})
ctx.obj['instances'] = []
ctx.obj.update(kwargs)
if ctx.invoked_subcommand is None and not ctx.obj.get('testing_mode'):
ctx.invoke(all)
return ctx
0
Example 82
Project: locality-sensitive-hashing Source File: doctests.py
def loadTestsFromFile(self, filename):
"""Load doctests from the file.
Tests are loaded only if filename's extension matches
configured doctest extension.
"""
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = open(filename)
try:
doc = dh.read()
finally:
dh.close()
fixture_context = None
globs = {'__file__': filename}
if self.fixtures:
base, ext = os.path.splitext(name)
dirname = os.path.dirname(filename)
sys.path.append(dirname)
fixt_mod = base + self.fixtures
try:
fixture_context = __import__(
fixt_mod, globals(), locals(), ["nop"])
except ImportError, e:
log.debug(
"Could not import %s: %s (%s)", fixt_mod, e, sys.path)
log.debug("Fixture module %s resolved to %s",
fixt_mod, fixture_context)
if hasattr(fixture_context, 'globs'):
globs = fixture_context.globs(globs)
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs=globs, name=name,
filename=filename, lineno=0)
if test.examples:
case = DocFileCase(
test,
optionflags=self.optionflags,
setUp=getattr(fixture_context, 'setup_test', None),
tearDown=getattr(fixture_context, 'teardown_test', None),
result_var=self.doctest_result_var)
if fixture_context:
yield ContextList((case,), context=fixture_context)
else:
yield case
else:
yield False # no tests to load
0
Example 83
Project: mxnet-ssd Source File: evaluate_net.py
def evaluate_net(net, dataset, devkit_path, mean_pixels, data_shape,
model_prefix, epoch, ctx, year=None, sets='test',
batch_size=1, nms_thresh=0.5, force_nms=False):
"""
Evaluate entire dataset, basically simple wrapper for detections
Parameters:
---------
dataset : str
name of dataset to evaluate
devkit_path : str
root directory of dataset
mean_pixels : tuple of float
(R, G, B) mean pixel values
data_shape : int
resize input data shape
model_prefix : str
load model prefix
epoch : int
load model epoch
ctx : mx.ctx
running context, mx.cpu() or mx.gpu(0)...
year : str or None
evaluate on which year's data
sets : str
evaluation set
batch_size : int
using batch_size for evaluation
nms_thresh : float
non-maximum suppression threshold
force_nms : bool
force suppress different categories
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if dataset == "pascal":
if not year:
year = '2007'
imdb = PascalVoc(sets, year, devkit_path, shuffle=False, is_train=False)
data_iter = DetIter(imdb, batch_size, data_shape, mean_pixels,
rand_samplers=[], rand_mirror=False, is_train=False, shuffle=False)
sys.path.append(os.path.join(cfg.ROOT_DIR, 'symbol'))
net = importlib.import_module("symbol_" + net) \
.get_symbol(imdb.num_classes, nms_thresh, force_nms)
model_prefix += "_" + str(data_shape)
detector = Detector(net, model_prefix, epoch, data_shape, mean_pixels, batch_size, ctx)
logger.info("Start evaluation with {} images, be patient...".format(imdb.num_images))
detections = detector.detect(data_iter)
imdb.evaluate_detections(detections)
else:
raise NotImplementedError, "No support for dataset: " + dataset
0
Example 84
Project: pylons Source File: test_templating.py
def make_app(global_conf, full_stack=True, static_files=True, include_cache_middleware=False, attribsafe=False, **app_conf):
import pylons
import pylons.configuration as configuration
from pylons import url
from pylons.decorators import jsonify
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.error import handle_mako_error
from pylons.wsgiapp import PylonsApp
root = os.path.dirname(os.path.abspath(__file__))
paths = dict(root=os.path.join(test_root, 'sample_controllers'), controllers=os.path.join(test_root, 'sample_controllers', 'controllers'),
templates=os.path.join(test_root, 'sample_controllers', 'templates'))
sys.path.append(test_root)
config = configuration.PylonsConfig()
config.init_app(global_conf, app_conf, package='sample_controllers', paths=paths)
map = Mapper(directory=config['pylons.paths']['controllers'])
map.connect('/{controller}/{action}')
config['routes.map'] = map
class AppGlobals(object): pass
config['pylons.app_globals'] = AppGlobals()
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'], imports=['from markupsafe import escape']
)
if attribsafe:
config['pylons.strict_tmpl_context'] = False
app = PylonsApp(config=config)
app = RoutesMiddleware(app, config['routes.map'], singleton=False)
if include_cache_middleware:
app = CacheMiddleware(app, config)
app = SessionMiddleware(app, config)
if asbool(full_stack):
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
if asbool(config['debug']):
app = StatusCodeRedirect(app)
else:
app = StatusCodeRedirect(app, [401, 403, 404, 500])
app = RegistryManager(app)
app.config = config
return app
0
Example 85
def load_driver(conf):
'''
Return the driver module from the filename specified in the configuration
file with key configuration.scripts.driver.
'''
driver_file = conf.driver.get('file', None)
driver_module_name = conf.driver.get('module', None)
if driver_file and driver_module_name:
log.error('you should only specify one of file and module for driver '
'configuration')
exit(1)
if driver_module_name is not None:
driver_module = __import__(driver_module_name)
return driver_module.driver
expanded_path = os.path.expanduser(conf.driver.file)
directory, name = os.path.split(expanded_path)
sys.path.append(os.path.dirname(directory))
if hasattr(conf, 'filename'):
conf_directory, _ = os.path.split(conf.filename)
search_dirs = [directory, conf_directory]
else:
search_dirs = [directory]
module_name = os.path.splitext(name)[0]
try:
found_module = imp.find_module(module_name, search_dirs)
_, filename, _ = found_module
log.info('Loading boat driver from {}'.format(color(filename, 37)))
driver_module = imp.load_module('driver_module', *found_module)
log.info('Using \'{}\' as boat driver'.format(
color(type(driver_module.driver).__name__, 33)))
except Exception:
log.exception('Exception raised in boat driver module')
raise
finally:
found_module[0].close()
if not isinstance(driver_module.driver, BaseBoatdDriver):
log.error('Driver module does not instantiate BaseBoatdDriver')
sys.exit(1)
return driver_module.driver
0
Example 86
Project: scoop Source File: __main__.py
@staticmethod
def setupEnvironment(self=None):
"""Set the environment (argv, sys.path and module import) of
scoop.MAIN_MODULE.
"""
# get the module path in the Python path
sys.path.append(os.path.dirname(os.path.abspath(scoop.MAIN_MODULE)))
# Add the user arguments to argv
sys.argv = sys.argv[:1]
if self:
sys.argv += self.args.args
try:
if scoop.IS_ORIGIN:
_ = open(scoop.MAIN_MODULE, 'r')
user_module = None
else:
user_module = importFunction(
"SCOOP_WORKER",
scoop.MAIN_MODULE,
)
except FileNotFoundError as e:
# Could not find file
sys.stderr.write('{0}\nFile: {1}\nIn path: {2}\n'.format(
str(e),
scoop.MAIN_MODULE,
sys.path[-1],
)
)
sys.stderr.flush()
sys.exit(-1)
globs = {}
try:
attrlist = user_module.__all__
except AttributeError:
attrlist = dir(user_module)
for attr in attrlist:
globs[attr] = getattr(user_module, attr)
if self and scoop.IS_ORIGIN:
return {}
elif self:
return globs
return user_module
0
Example 87
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception as err:
print >>sys.stderr, "Error processing line {:d} of {}:\n".format(
n+1, fullname)
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print >>sys.stderr, ' '+line
print >>sys.stderr, "\nRemainder of file ignored"
break
if reset:
known_paths = None
return known_paths
0
Example 88
Project: imagrium Source File: test_compile_jy.py
def test_mtime_compile(self):
"""
This test exercises the mtime annotation that is now stored in Jython
compiled files. CPython already stores an mtime in its pyc files. To
exercise this functionality, I am writing a py file, compiling it,
setting the os modified time to a very low value on the compiled file,
then changing the py file after a small sleep. On CPython, this would
still cause a re-compile. In Jython before this fix it would not.
See http://bugs.jython.org/issue1024
"""
import time
os.mkdir(TESTFN)
try:
mod = "mod1"
source_path = os.path.join(TESTFN, "%s.py" % mod)
if is_jython:
compiled_path = os.path.join(TESTFN, "%s$py.class" % mod)
else:
compiled_path = os.path.join(TESTFN, "%s.pyc" % mod)
fp = open(source_path, "w")
fp.write("def foo(): return 'first'\n")
fp.close()
py_compile.compile(source_path)
#sleep so that the internal mtime is older for the next source write.
time.sleep(1)
fp = open(source_path, "w")
fp.write("def foo(): return 'second'\n")
fp.close()
# make sure the source file's mtime is artificially younger than
# the compiled path's mtime.
os.utime(source_path, (1,1))
sys.path.append(TESTFN)
import mod1
self.assertEquals(mod1.foo(), 'second')
finally:
shutil.rmtree(TESTFN)
0
Example 89
def setup_environ(settings_mod, original_settings_path=None):
"""
Configures the runtime environment. This can also be used by external
scripts wanting to set up a similar environment to manage.py.
Returns the project directory (assuming the passed settings module is
directly in the project directory).
The "original_settings_path" parameter is optional, but recommended, since
trying to work out the original path from the module can be problematic.
"""
# Add this project to sys.path so that it's importable in the conventional
# way. For example, if this file (manage.py) lives in a directory
# "myproject", this code would add "/path/to/myproject" to sys.path.
if '__init__.py' in settings_mod.__file__:
p = os.path.dirname(settings_mod.__file__)
else:
p = settings_mod.__file__
project_directory, settings_filename = os.path.split(p)
if project_directory == os.curdir or not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
# Strip filename suffix to get the module name.
settings_name = os.path.splitext(settings_filename)[0]
# Strip $py for Jython compiled files (like settings$py.class)
if settings_name.endswith("$py"):
settings_name = settings_name[:-3]
# Set DJANGO_SETTINGS_MODULE appropriately.
if original_settings_path:
os.environ['DJANGO_SETTINGS_MODULE'] = original_settings_path
else:
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
# Import the project module. We add the parent directory to PYTHONPATH to
# avoid some of the path errors new users can have.
sys.path.append(os.path.join(project_directory, os.pardir))
project_module = import_module(project_name)
sys.path.pop()
return project_directory
0
Example 90
Project: maltrail Source File: update.py
def update_trails(server=None, force=False, offline=False):
"""
Update trails from feeds
"""
trails = {}
duplicates = {}
if server:
print "[i] retrieving trails from provided 'UPDATE_SERVER' server..."
_ = retrieve_content(server)
if not _:
exit("[!] unable to retrieve data from '%s'" % server)
else:
with _fopen(TRAILS_FILE, "w+b") as f:
f.write(_)
trails = load_trails()
trail_files = set()
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, "trails"))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if config.CUSTOM_TRAILS_DIR:
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, os.path.expanduser(config.CUSTOM_TRAILS_DIR)))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
try:
if not os.path.isdir(USERS_DIR):
os.makedirs(USERS_DIR, 0755)
except Exception, ex:
exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
_chown(USERS_DIR)
if not trails and (force or not os.path.isfile(TRAILS_FILE) or (time.time() - os.stat(TRAILS_FILE).st_mtime) >= config.UPDATE_PERIOD or os.stat(TRAILS_FILE).st_size == 0 or any(os.stat(_).st_mtime > os.stat(TRAILS_FILE).st_mtime for _ in trail_files)):
print "[i] updating trails (this might take a while)..."
if not offline and (force or config.USE_FEED_UPDATES):
sys.path.append(os.path.abspath(os.path.join(ROOT_DIR, "trails", "feeds")))
filenames = sorted(glob.glob(os.path.join(sys.path[-1], "*.py")))
else:
filenames = []
sys.path.append(os.path.abspath(os.path.join(ROOT_DIR, "trails")))
filenames += [os.path.join(sys.path[-1], "static")]
filenames += [os.path.join(sys.path[-1], "custom")]
filenames = [_ for _ in filenames if "__init__.py" not in _]
for i in xrange(len(filenames)):
filename = filenames[i]
try:
module = __import__(os.path.basename(filename).split(".py")[0])
except (ImportError, SyntaxError), ex:
print "[x] something went wrong during import of feed file '%s' ('%s')" % (filename, ex)
continue
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "fetch":
print(" [o] '%s'%s" % (module.__url__, " " * 20 if len(module.__url__) < 20 else ""))
sys.stdout.write("[?] progress: %d/%d (%d%%)\r" % (i, len(filenames), i * 100 / len(filenames)))
sys.stdout.flush()
try:
results = function()
for item in results.items():
if item[0].startswith("www.") and '/' not in item[0]:
item = [item[0][len("www."):], item[1]]
if item[0] in trails:
if item[0] not in duplicates:
duplicates[item[0]] = set((trails[item[0]][1],))
duplicates[item[0]].add(item[1][1])
if not (item[0] in trails and (any(_ in item[1][0] for _ in LOW_PRIORITY_INFO_KEYWORDS) or trails[item[0]][1] in HIGH_PRIORITY_REFERENCES)) or (item[1][1] in HIGH_PRIORITY_REFERENCES and "history" not in item[1][0]) or any(_ in item[1][0] for _ in HIGH_PRIORITY_INFO_KEYWORDS):
trails[item[0]] = item[1]
if not results and "abuse.ch" not in module.__url__:
print "[x] something went wrong during remote data retrieval ('%s')" % module.__url__
except Exception, ex:
print "[x] something went wrong during processing of feed file '%s' ('%s')" % (filename, ex)
# basic cleanup
for key in trails.keys():
if key not in trails:
continue
if not key or re.search(r"\A(?i)\.?[a-z]+\Z", key) and not any(_ in trails[key][1] for _ in ("custom", "static")):
del trails[key]
continue
if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key):
if any(_ in trails[key][0] for _ in ("parking site", "sinkhole")) and key in duplicates:
del duplicates[key]
if trails[key][0] == "malware":
trails[key] = ("potential malware site", trails[key][1])
if trails[key][0] == "ransomware":
trails[key] = ("ransomware (malware)", trails[key][1])
if key.startswith("www.") and '/' not in key:
_ = trails[key]
del trails[key]
key = key[len("www."):]
if key:
trails[key] = _
if '?' in key:
_ = trails[key]
del trails[key]
key = key.split('?')[0]
if key:
trails[key] = _
if '//' in key:
_ = trails[key]
del trails[key]
key = key.replace('//', '/')
trails[key] = _
if key != key.lower():
_ = trails[key]
del trails[key]
key = key.lower()
trails[key] = _
if key in duplicates:
_ = trails[key]
others = sorted(duplicates[key] - set((_[1],)))
if others and " (+" not in _[1]:
trails[key] = (_[0], "%s (+%s)" % (_[1], ','.join(others)))
read_whitelist()
for key in trails.keys():
if check_whitelisted(key) or any(key.startswith(_) for _ in BAD_TRAIL_PREFIXES):
del trails[key]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key) and cdn_ip(key):
del trails[key]
else:
try:
key.decode("utf8")
trails[key][0].decode("utf8")
trails[key][1].decode("utf8")
except UnicodeDecodeError:
del trails[key]
try:
if trails:
with _fopen(TRAILS_FILE, "w+b") as f:
writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)
for trail in trails:
writer.writerow((trail, trails[trail][0], trails[trail][1]))
except Exception, ex:
print "[x] something went wrong during trails file write '%s' ('%s')" % (TRAILS_FILE, ex)
print "[i] update finished%s" % (40 * " ")
return trails
0
Example 91
Project: conan Source File: loader.py
def _parse_file(self, conan_file_path):
""" From a given path, obtain the in memory python import module
"""
# Check if precompiled exist, delete it
if os.path.exists(conan_file_path + "c"):
os.unlink(conan_file_path + "c")
# Python 3
pycache = os.path.join(os.path.dirname(conan_file_path), "__pycache__")
if os.path.exists(pycache):
rmdir(pycache)
if not os.path.exists(conan_file_path):
raise NotFoundException("%s not found!" % conan_file_path)
filename = os.path.splitext(os.path.basename(conan_file_path))[0]
try:
current_dir = os.path.dirname(conan_file_path)
sys.path.append(current_dir)
old_modules = list(sys.modules.keys())
loaded = imp.load_source(filename, conan_file_path)
# Put all imported files under a new package name
module_id = uuid.uuid1()
added_modules = set(sys.modules).difference(old_modules)
for added in added_modules:
module = sys.modules[added]
if module:
folder = os.path.dirname(module.__file__)
if folder.startswith(current_dir):
module = sys.modules.pop(added)
sys.modules["%s.%s" % (module_id, added)] = module
except Exception:
import traceback
trace = traceback.format_exc().split('\n')
raise ConanException("Unable to load conanfile in %s\n%s" % (conan_file_path,
'\n'.join(trace[3:])))
finally:
sys.path.pop()
return loaded, filename
0
Example 92
def test_local_index(self):
# make sure the local index is used
# when easy_install looks for installed
# packages
new_location = tempfile.mkdtemp()
target = tempfile.mkdtemp()
egg_file = os.path.join(new_location, 'foo-1.0.egg-info')
f = open(egg_file, 'w')
try:
f.write('Name: foo\n')
finally:
f.close()
sys.path.append(target)
old_ppath = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path)
try:
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.install_dir = target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([new_location])
res = cmd.easy_install('foo')
actual = os.path.normcase(os.path.realpath(res.location))
expected = os.path.normcase(os.path.realpath(new_location))
self.assertEqual(actual, expected)
finally:
sys.path.remove(target)
for basedir in [new_location, target, ]:
if not os.path.exists(basedir) or not os.path.isdir(basedir):
continue
try:
shutil.rmtree(basedir)
except:
pass
if old_ppath is not None:
os.environ['PYTHONPATH'] = old_ppath
else:
del os.environ['PYTHONPATH']
0
Example 93
Project: pymtl Source File: verilator_sim.py
def TranslationTool( model_inst, lint=False, enable_blackbox=False, verilator_xinit="zeros" ):
"""Translates a PyMTL model into Python-wrapped Verilog.
model_inst: an un-elaborated Model instance
lint: run verilator linter, warnings are fatal
(disables -Wno-lint flag)
enable_blackbox: also generate a .v file with black boxes
"""
model_inst.elaborate()
# Translate the PyMTL module to Verilog, if we've already done
# translation check if there's been any changes to the source
model_name = model_inst.class_name
verilog_file = model_name + '.v'
temp_file = model_name + '.v.tmp'
c_wrapper_file = model_name + '_v.cpp'
py_wrapper_file = model_name + '_v.py'
lib_file = 'lib{}_v.so'.format( model_name )
obj_dir = 'obj_dir_' + model_name
blackbox_file = model_name + '_blackbox' + '.v'
vcd_en = True
vcd_file = ''
try:
vcd_en = ( model_inst.vcd_file != '' )
vcd_file = model_inst.vcd_file
except AttributeError:
vcd_en = False
# Write the output to a temporary file
with open( temp_file, 'w+' ) as fd:
verilog.translate( model_inst, fd, verilator_xinit=verilator_xinit )
# write Verilog with black boxes
if enable_blackbox:
with open( blackbox_file, 'w+' ) as fd:
verilog.translate( model_inst, fd, enable_blackbox=True, verilator_xinit=verilator_xinit )
# Check if the temporary file matches an existing file (caching)
cached = False
if ( exists(verilog_file)
and exists(py_wrapper_file)
and exists(lib_file)
and exists(obj_dir) ):
cached = filecmp.cmp( temp_file, verilog_file )
# if not cached:
# os.system( ' diff %s %s'%( temp_file, verilog_file ))
# Rename temp to actual output
os.rename( temp_file, verilog_file )
# Verilate the module only if we've updated the verilog source
if not cached:
#print( "NOT CACHED", verilog_file )
verilog_to_pymtl( model_inst, verilog_file, c_wrapper_file,
lib_file, py_wrapper_file, vcd_en, lint,
verilator_xinit )
#else:
# print( "CACHED", verilog_file )
# Use some trickery to import the verilated version of the model
sys.path.append( os.getcwd() )
__import__( py_wrapper_file[:-3] )
imported_module = sys.modules[ py_wrapper_file[:-3] ]
# Get the model class from the module, instantiate and elaborate it
model_class = imported_module.__dict__[ model_name ]
model_inst = model_class()
if vcd_en:
model_inst.vcd_file = vcd_file
return model_inst
0
Example 94
Project: vext Source File: __init__.py
def load_specs():
bad_specs = set()
last_error = None
for fn in spec_files():
logger.debug("load spec: %s", fn)
if fn in bad_specs:
# Don't try and load the same bad spec twice
continue
try:
spec = open_spec(open(fn))
for module in spec['modules']:
logger.debug("allow module: %s", module)
allowed_modules.add(module)
for path_name in spec.get('extra_paths', []):
extra_path = get_extra_path(path_name)
if isdir(extra_path):
os.environ['PATH'] += env_t(os.pathsep + extra_path)
sys.path.append(extra_path)
added_dirs.add(extra_path)
else:
logger.warn("Could not add extra path: {0}".format(extra_path))
sys_sitedirs = getsyssitepackages()
for sys_sitedir in sys_sitedirs:
with fixup_paths():
for pth in [pth for pth in spec['pths'] or [] if pth]:
try:
logger.debug("open pth: %s", pth)
pth_file = join(sys_sitedir, pth)
addpackage(sys_sitedir, pth_file, added_dirs)
init_path() # TODO
except IOError as e:
# Path files are optional..
logging.debug('No pth found at %s', pth_file)
pass
except Exception as e:
bad_specs.add(fn)
err_msg = 'error loading spec %s: %s' % (fn, e)
if last_error != err_msg:
logging.error(err_msg)
last_error = err_msg
if bad_specs:
raise VextError('Error loading spec files: %s' % ', '.join(bad_specs))
0
Example 95
def test_local_index(self):
# make sure the local index is used
# when easy_install looks for installed
# packages
new_location = tempfile.mkdtemp()
target = tempfile.mkdtemp()
egg_file = os.path.join(new_location, 'foo-1.0.egg-info')
f = open(egg_file, 'w')
try:
f.write('Name: foo\n')
finally:
f.close()
sys.path.append(target)
old_ppath = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path)
try:
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.install_dir = target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([new_location])
res = cmd.easy_install('foo')
self.assertEqual(os.path.realpath(res.location),
os.path.realpath(new_location))
finally:
sys.path.remove(target)
for basedir in [new_location, target, ]:
if not os.path.exists(basedir) or not os.path.isdir(basedir):
continue
try:
shutil.rmtree(basedir)
except:
pass
if old_ppath is not None:
os.environ['PYTHONPATH'] = old_ppath
else:
del os.environ['PYTHONPATH']
0
Example 96
Project: virt-test Source File: kernelinstall.py
def _kernel_install_koji(self, kernel_koji_spec, kernel_deps_koji_spec,
need_reboot=True):
# Using hardcoded package names (the names are not expected to change)
# we avoid lookup errors due to SSL problems, so let's go with that.
for koji_package in ['koji', 'brewkoji']:
if not self.sm.check_installed(koji_package):
logging.debug("%s missing - trying to install", koji_package)
self.sm.install(koji_package)
sys.path.append(self.bindir)
try:
from staging import utils_koji
except ImportError:
from autotest.client.shared import utils_koji
# First, download packages via koji/brew
c = utils_koji.KojiClient()
deps_rpms = []
k_dep = utils_koji.KojiPkgSpec(text=kernel_deps_koji_spec)
logging.info('Fetching kernel dependencies: %s', kernel_deps_koji_spec)
c.get_pkgs(k_dep, self.bindir)
rpm_file_name_list = c.get_pkg_rpm_file_names(k_dep)
if len(rpm_file_name_list) == 0:
raise error.TestError("No packages on brew/koji match spec %s" %
kernel_deps_koji_spec)
dep_rpm_basename = rpm_file_name_list[0]
deps_rpms.append(os.path.join(self.bindir, dep_rpm_basename))
k = utils_koji.KojiPkgSpec(text=kernel_koji_spec)
logging.info('Fetching kernel: %s', kernel_koji_spec)
c.get_pkgs(k, self.bindir)
rpm_file_name_list = c.get_pkg_rpm_file_names(k)
if len(rpm_file_name_list) == 0:
raise error.TestError("No packages on brew/koji match spec %s" %
kernel_koji_spec)
kernel_rpm_basename = rpm_file_name_list[0]
kernel_rpm_path = os.path.join(self.bindir, kernel_rpm_basename)
# Then install kernel rpm packages.
self._kernel_install_rpm(kernel_rpm_path, deps_rpms, need_reboot)
0
Example 97
Project: pydy Source File: cython_code.py
def compile(self, tmp_dir=None, verbose=False):
"""Returns a function which evaluates the matrices.
Parameters
==========
tmp_dir : string
The path to an existing or non-existing directory where all of
the generated files will be stored.
verbose : boolean
If true the output of the completed compilation steps will be
printed.
"""
base_prefix = self.prefix
if tmp_dir is None:
codedir = tempfile.mkdtemp(".pydy_compile")
else:
codedir = os.path.abspath(tmp_dir)
if not os.path.exists(codedir):
os.makedirs(codedir)
self.prefix = '{}_{}'.format(base_prefix,
CythonMatrixGenerator._module_counter)
workingdir = os.getcwd()
os.chdir(codedir)
try:
sys.path.append(codedir)
self.write()
cmd = [sys.executable, self.prefix + '_setup.py', 'build_ext',
'--inplace']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if verbose:
print(output.decode())
cython_module = importlib.import_module(self.prefix)
except:
raise Exception('Failed to compile and import Cython module.')
finally:
sys.path.remove(codedir)
CythonMatrixGenerator._module_counter += 1
os.chdir(workingdir)
if tmp_dir is None:
# rmtree fails on Windows with permissions errors, so skip the
# removal on Windows.
try:
shutil.rmtree(codedir)
except OSError:
pass
self.prefix = base_prefix
return getattr(cython_module, 'eval')
0
Example 98
Project: prosodic Source File: Dictionary.py
def __init__(self,lang):
import prosodic
dirself=prosodic.dir_prosodic
libfolder=os.path.join(dirself,'lib')
dictsfolder=os.path.join(dirself,'dicts')
self.config=prosodic.config
self.lang = lang
self.libfolder = libfolder
self.dictsfolder = os.path.join(dictsfolder,self.lang)
sys.path.append(self.dictsfolder)
self.language=""
self.getprep=False
self.booted=False
for filename in glob.glob(os.path.join(self.dictsfolder, self.lang+'*')):
self.language = filename.split(os.path.sep).pop().split(".")[0]
break
if not self.language:
exit('!! language could not be ascertained from files in '+self.dictsfolder+'. Please name your .tsv and/or .py dictionary file(s) using a string which begins with the two characters which serve as the name for the dictionary folder (eg, "en")')
self.unstressedWords=[]
for filename in glob.glob(os.path.join(self.dictsfolder, 'unstressed*')):
file=codecs.open(filename,encoding='utf-8')
for ln in file:
for word in ln.split():
self.unstressedWords.append(word)
file.close()
break
self.maybestressedWords=[]
for filename in glob.glob(os.path.join(self.dictsfolder, 'maybestressed*')):
file=codecs.open(filename,encoding='utf-8')
for ln in file:
for word in ln.split():
self.maybestressedWords.append(word)
file.close()
break
pyfile=os.path.join(self.dictsfolder,self.language+'.py')
if os.path.exists(pyfile):
self.getprep=get_class(self.language+'.get')
self.cachefolder=os.path.join(self.dictsfolder,'_cache')
self.dictentries=None
build=False
## language objects
timestart=time.clock()
if being.persists:
if __name__=='__main__':
print "## booting ontology: " + self.language + " ..."
if not os.path.exists(self.cachefolder):os.mkdir(self.cachefolder)
self.storage = FileStorage(self.cachefolder+'ontology.zodb')
self.db = DB(self.storage)
self.conn = self.db.open()
self.dict = self.conn.root()
self.t=transaction
if not len(self.dict.values()):
build=True
else:
self.dict={}
self.refresh()
topickle=self.exists_pickle()
topickle=False
if topickle:
self.boot_pickle(topickle)
else:
build=True
if build:
self.refresh()
self.boot()
if __name__=='__main__':
print self.stats(prefix="\t").replace("[[time]]",str(round((time.clock() - timestart),2)))
0
Example 99
Project: king-phisher Source File: client_rpc.py
def vte_child_routine(config):
"""
This is the method which is executed within the child process spawned
by VTE. It expects additional values to be set in the *config*
object so it can initialize a new :py:class:`.KingPhisherRPCClient`
instance. It will then drop into an interpreter where the user may directly
interact with the rpc object.
:param str config: A JSON encoded client configuration.
"""
config = json_ex.loads(config)
try:
import readline
import rlcompleter # pylint: disable=unused-variable
except ImportError:
pass
else:
readline.parse_and_bind('tab: complete')
for plugins_directory in ('rpc_plugins', 'rpc-plugins'):
plugins_directory = find.find_data_directory(plugins_directory)
if not plugins_directory:
continue
sys.path.append(plugins_directory)
headers = config['rpc_data'].pop('headers')
rpc = KingPhisherRPCClient(**config['rpc_data'])
if rpc.headers is None:
rpc.headers = {}
for name, value in headers.items():
rpc.headers[str(name)] = str(value)
banner = "Python {0} on {1}".format(sys.version, sys.platform)
print(banner) # pylint: disable=superfluous-parens
information = "Campaign Name: '{0}' ID: {1}".format(config['campaign_name'], config['campaign_id'])
print(information) # pylint: disable=superfluous-parens
console_vars = {
'CAMPAIGN_NAME': config['campaign_name'],
'CAMPAIGN_ID': config['campaign_id'],
'os': os,
'rpc': rpc,
'sys': sys
}
export_to_builtins = ['CAMPAIGN_NAME', 'CAMPAIGN_ID', 'rpc']
console = code.InteractiveConsole(console_vars)
for var in export_to_builtins:
console.push("__builtins__['{0}'] = {0}".format(var))
console.interact('The \'rpc\' object holds the connected KingPhisherRPCClient instance')
return
0
Example 100
Project: LTLMoP Source File: specCompiler.py
def _writeLTLFile(self):
self.LTL2SpecLineNumber = None
#regionList = [r.name for r in self.parser.proj.rfi.regions]
regionList = [r.name for r in self.proj.rfi.regions]
sensorList = deepcopy(self.proj.enabled_sensors)
robotPropList = self.proj.enabled_actuators + self.proj.all_customs
text = self.proj.specText
response = None
# Create LTL using selected parser
# TODO: rename decomposition object to something other than 'parser'
if self.proj.compile_options["parser"] == "slurp":
# default to no region tags if no simconfig is defined, so we can compile without
if self.proj.current_config == "":
region_tags = {}
else:
self.hsub = handlerSubsystem.HandlerSubsystem(None, self.proj.project_root)
config, success = self.hsub.loadConfigFile(self.proj.current_config)
if success: self.hsub.configs.append(config)
self.hsub.setExecutingConfig(self.proj.current_config)
region_tags = self.hsub.executing_config.region_tags
# Hack: We need to make sure there's only one of these
global _SLURP_SPEC_GENERATOR
# Make a new specgenerator and have it process the text
if not _SLURP_SPEC_GENERATOR:
# Add SLURP to path for import
p = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(p, "..", "etc", "SLURP"))
from ltlbroom.specgeneration import SpecGenerator
_SLURP_SPEC_GENERATOR = SpecGenerator()
# Filter out regions it shouldn't know about
filtered_regions = [region.name for region in self.proj.rfi.regions
if not (region.isObstacle or region.name.lower() == "boundary")]
LTLspec_env, LTLspec_sys, self.proj.internal_props, internal_sensors, results, responses, traceback = \
_SLURP_SPEC_GENERATOR.generate(text, sensorList, filtered_regions, robotPropList, region_tags)
oldspec_env = LTLspec_env
oldspec_sys = LTLspec_sys
for ln, result in enumerate(results):
if not result:
logging.warning("Could not parse the sentence in line {0}".format(ln))
# Abort compilation if there were any errors
if not all(results):
return None, None, responses
# Add in the sensors so they go into the SMV and spec files
for s in internal_sensors:
if s not in sensorList:
sensorList.append(s)
self.proj.all_sensors.append(s)
self.proj.enabled_sensors.append(s)
# Conjoin all the spec chunks
LTLspec_env = '\t\t' + ' & \n\t\t'.join(LTLspec_env)
LTLspec_sys = '\t\t' + ' & \n\t\t'.join(LTLspec_sys)
if self.proj.compile_options["decompose"]:
# substitute decomposed region names
for r in self.proj.rfi.regions:
if not (r.isObstacle or r.name.lower() == "boundary"):
LTLspec_env = re.sub('\\bs\.' + r.name + '\\b', "("+' | '.join(["s."+x for x in self.parser.proj.regionMapping[r.name]])+")", LTLspec_env)
LTLspec_env = re.sub('\\be\.' + r.name + '\\b', "("+' | '.join(["e."+x for x in self.parser.proj.regionMapping[r.name]])+")", LTLspec_env)
LTLspec_sys = re.sub('\\bs\.' + r.name + '\\b', "("+' | '.join(["s."+x for x in self.parser.proj.regionMapping[r.name]])+")", LTLspec_sys)
LTLspec_sys = re.sub('\\be\.' + r.name + '\\b', "("+' | '.join(["e."+x for x in self.parser.proj.regionMapping[r.name]])+")", LTLspec_sys)
response = responses
elif self.proj.compile_options["parser"] == "ltl":
# delete comments
text = re.sub(r"#.*$", "", text, flags=re.MULTILINE)
# split into env and sys parts (by looking for a line of just dashes in between)
LTLspec_env, LTLspec_sys = re.split(r"^\s*-+\s*$", text, maxsplit=1, flags=re.MULTILINE)
# split into subformulas
LTLspec_env = re.split(r"(?:[ \t]*[\n\r][ \t]*)+", LTLspec_env)
LTLspec_sys = re.split(r"(?:[ \t]*[\n\r][ \t]*)+", LTLspec_sys)
# remove any empty initial entries (HACK?)
while '' in LTLspec_env:
LTLspec_env.remove('')
while '' in LTLspec_sys:
LTLspec_sys.remove('')
# automatically conjoin all the subformulas
LTLspec_env = '\t\t' + ' & \n\t\t'.join(LTLspec_env)
LTLspec_sys = '\t\t' + ' & \n\t\t'.join(LTLspec_sys)
if self.proj.compile_options["decompose"]:
# substitute decomposed region
for r in self.proj.rfi.regions:
if not (r.isObstacle or r.name.lower() == "boundary"):
LTLspec_env = re.sub('\\b(?:s\.)?' + r.name + '\\b', "("+' | '.join(["s."+x for x in self.parser.proj.regionMapping[r.name]])+")", LTLspec_env)
LTLspec_sys = re.sub('\\b(?:s\.)?' + r.name + '\\b', "("+' | '.join(["s."+x for x in self.parser.proj.regionMapping[r.name]])+")", LTLspec_sys)
else:
for r in self.proj.rfi.regions:
if not (r.isObstacle or r.name.lower() == "boundary"):
LTLspec_env = re.sub('\\b(?:s\.)?' + r.name + '\\b', "s."+r.name, LTLspec_env)
LTLspec_sys = re.sub('\\b(?:s\.)?' + r.name + '\\b', "s."+r.name, LTLspec_sys)
traceback = [] # HACK: needs to be something other than None
elif self.proj.compile_options["parser"] == "structured":
import parseEnglishToLTL
if self.proj.compile_options["decompose"]:
# substitute the regions name in specs
for m in re.finditer(r'near (?P<rA>\w+)', text):
text=re.sub(r'near (?P<rA>\w+)', "("+' or '.join(["s."+r for r in self.parser.proj.regionMapping['near$'+m.group('rA')+'$'+str(50)]])+")", text)
for m in re.finditer(r'within (?P<dist>\d+) (from|of) (?P<rA>\w+)', text):
text=re.sub(r'within ' + m.group('dist')+' (from|of) '+ m.group('rA'), "("+' or '.join(["s."+r for r in self.parser.proj.regionMapping['near$'+m.group('rA')+'$'+m.group('dist')]])+")", text)
for m in re.finditer(r'between (?P<rA>\w+) and (?P<rB>\w+)', text):
text=re.sub(r'between ' + m.group('rA')+' and '+ m.group('rB'),"("+' or '.join(["s."+r for r in self.parser.proj.regionMapping['between$'+m.group('rA')+'$and$'+m.group('rB')+"$"]])+")", text)
# substitute decomposed region
for r in self.proj.rfi.regions:
if not (r.isObstacle or r.name.lower() == "boundary"):
text = re.sub('\\b' + r.name + '\\b', "("+' | '.join(["s."+x for x in self.parser.proj.regionMapping[r.name]])+")", text)
regionList = ["s."+x.name for x in self.parser.proj.rfi.regions]
else:
for r in self.proj.rfi.regions:
if not (r.isObstacle or r.name.lower() == "boundary"):
text = re.sub('\\b' + r.name + '\\b', "s."+r.name, text)
regionList = ["s."+x.name for x in self.proj.rfi.regions]
spec, traceback, failed, self.LTL2SpecLineNumber, self.proj.internal_props = parseEnglishToLTL.writeSpec(text, sensorList, regionList, robotPropList)
# Abort compilation if there were any errors
if failed:
return None, None, None
LTLspec_env = spec["EnvInit"] + spec["EnvTrans"] + spec["EnvGoals"]
LTLspec_sys = spec["SysInit"] + spec["SysTrans"] + spec["SysGoals"]
else:
logging.error("Parser type '{0}' not currently supported".format(self.proj.compile_options["parser"]))
return None, None, None
if self.proj.compile_options["decompose"]:
regionList = [x.name for x in self.parser.proj.rfi.regions]
else:
regionList = [x.name for x in self.proj.rfi.regions]
if self.proj.compile_options["use_region_bit_encoding"]:
# Define the number of bits needed to encode the regions
numBits = int(math.ceil(math.log(len(regionList),2)))
# creating the region bit encoding
bitEncode = bitEncoding(len(regionList),numBits)
currBitEnc = bitEncode['current']
nextBitEnc = bitEncode['next']
# switch to bit encodings for regions
LTLspec_env = replaceRegionName(LTLspec_env, bitEncode, regionList)
LTLspec_sys = replaceRegionName(LTLspec_sys, bitEncode, regionList)
if self.LTL2SpecLineNumber is not None:
for k in self.LTL2SpecLineNumber.keys():
new_k = replaceRegionName(k, bitEncode, regionList)
if new_k != k:
self.LTL2SpecLineNumber[new_k] = self.LTL2SpecLineNumber[k]
del self.LTL2SpecLineNumber[k]
if self.proj.compile_options["decompose"]:
adjData = self.parser.proj.rfi.transitions
else:
adjData = self.proj.rfi.transitions
# Store some data needed for later analysis
self.spec = {}
if self.proj.compile_options["decompose"]:
self.spec['Topo'] = createTopologyFragment(adjData, self.parser.proj.rfi.regions, use_bits=self.proj.compile_options["use_region_bit_encoding"])
else:
self.spec['Topo'] = createTopologyFragment(adjData, self.proj.rfi.regions, use_bits=self.proj.compile_options["use_region_bit_encoding"])
# Substitute any macros that the parsers passed us
LTLspec_env = self.substituteMacros(LTLspec_env)
LTLspec_sys = self.substituteMacros(LTLspec_sys)
# If we are not using bit-encoding, we need to
# explicitly encode a mutex for regions
if not self.proj.compile_options["use_region_bit_encoding"]:
# DNF version (extremely slow for core-finding)
#mutex = "\n\t&\n\t []({})".format(" | ".join(["({})".format(" & ".join(["s."+r2.name if r is r2 else "!s."+r2.name for r2 in self.parser.proj.rfi.regions])) for r in self.parser.proj.rfi.regions]))
if self.proj.compile_options["decompose"]:
region_list = self.parser.proj.rfi.regions
else:
region_list = self.proj.rfi.regions
# Almost-CNF version
exclusions = []
for i, r1 in enumerate(region_list):
for r2 in region_list[i+1:]:
exclusions.append("!(s.{} & s.{})".format(r1.name, r2.name))
mutex = "\n&\n\t []({})".format(" & ".join(exclusions))
LTLspec_sys += mutex
self.spec.update(self.splitSpecIntoComponents(LTLspec_env, LTLspec_sys))
# Add in a fragment to make sure that we start in a valid region
if self.proj.compile_options["decompose"]:
self.spec['InitRegionSanityCheck'] = createInitialRegionFragment(self.parser.proj.rfi.regions, use_bits=self.proj.compile_options["use_region_bit_encoding"])
else:
self.spec['InitRegionSanityCheck'] = createInitialRegionFragment(self.proj.rfi.regions, use_bits=self.proj.compile_options["use_region_bit_encoding"])
LTLspec_sys += "\n&\n" + self.spec['InitRegionSanityCheck']
LTLspec_sys += "\n&\n" + self.spec['Topo']
createLTLfile(self.proj.getFilenamePrefix(), LTLspec_env, LTLspec_sys)
if self.proj.compile_options["parser"] == "slurp":
self.reversemapping = {self.postprocessLTL(line,sensorList,robotPropList).strip():line.strip() for line in oldspec_env + oldspec_sys}
self.reversemapping[self.spec['Topo'].replace("\n","").replace("\t","").lstrip().rstrip("\n\t &")] = "TOPOLOGY"
#for k,v in self.reversemapping.iteritems():
# print "{!r}:{!r}".format(k,v)
return self.spec, traceback, response