Here are the examples of the python api numpy.savetxt taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
109 Examples
4
Example 1
Project: qutip Source File: dump.py
def update_grad_log(self, grad):
"""add an entry to the grad log"""
self.grad_log.append(grad)
if self.write_to_file:
fname = "{}-fid_err_gradients{}.{}".format(self.fname_base,
len(self.grad_log),
self.dump_file_ext)
fpath = os.path.join(self.dump_dir, fname)
np.savetxt(fpath, grad, delimiter=self.data_sep)
3
Example 2
def to_ascii(self, fname, mode='luminosity_density'):
if mode == 'luminosity_density':
np.savetxt(fname, zip(self.wavelength.value, self.luminosity_density_lambda.value))
elif mode == 'flux':
np.savetxt(fname, zip(self.wavelength.value, self.flux_lambda.value))
else:
raise NotImplementedError('only mode "luminosity_density" and "flux" are implemented')
3
Example 3
Project: mtpy Source File: pek1dclasses.py
def write_inmodel(self, wd=None):
"""
"""
if wd is not None:
self.working_directory = wd
if not hasattr(self,'inmodel'):
self.build_inmodel()
np.savetxt(os.path.join(self.working_directory,'inmodel.dat'),
self.inmodel,
fmt=['%5i','%11.4e','%11.4e','%11.4e','%11.4e'])
print "written inmodel file to {}".format(self.working_directory)
3
Example 4
def entry_point():
args = parser.parse_args()
arglib.die_if_path_exists(args.output)
LagTimes = args.lagtime.split(',')
MinLagtime = int(LagTimes[0])
MaxLagtime = int(LagTimes[1])
# Pass the symmetric flag
if args.symmetrize in ["None", "none", None]:
args.symmetrize = None
impTimes = run(
MinLagtime, MaxLagtime, args.interval, args.eigvals, args.assignments,
(not args.notrim), args.symmetrize, args.procs)
np.savetxt(args.output, impTimes)
logger.info("Saved output to %s", args.output)
3
Example 5
def save_results(out_folder, base_name, y_pred, y_true):
folder = os.path.join(out_folder, base_name)
try:
os.mkdir(folder)
except:
pass
out_file = os.path.join(folder, 'pred.dat')
np.savetxt(out_file, y_pred)
with open(os.path.join(folder, 'summ.dat'), 'w') as summ_file:
print(classification_report(y_true, y_pred), file=summ_file)
3
Example 6
def execute(self):
for i, case in enumerate(self.load_cases.cases):
fid = file(self.file_base + '%03d.dat' % i, 'w')
fid.write('# %s\n' % case.case_id)
lc2d = case._toarray()
np.savetxt(fid, lc2d[:, :])
3
Example 7
Project: dragonn Source File: __main__.py
def main_predict(sequences=None,
arch_file=None,
weights_file=None,
output_file=None):
# encode fasta
print("loading sequence data...")
X = encode_fasta_sequences(sequences)
# load model
print("loading model...")
model = SequenceDNN.load(arch_file, weights_file)
# predict
print("getting predictions...")
predictions = model.predict(X)
# save predictions
print("saving predictions to output file...")
np.savetxt(output_file, predictions)
print("Done!")
3
Example 8
Project: quality-assessment-protocol Source File: dvars.py
def mean_dvars_wrapper(func_file, mask_file, dvars_out_file=None):
func = load(func_file, mask_file)
dvars = calc_dvars(func)
if dvars_out_file:
np.savetxt(dvars_out_file, dvars, fmt='%.12f')
mean_d = calc_mean_dvars(dvars)
return mean_d[0]
3
Example 9
Project: gensim Source File: hdpmodel.py
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s" % fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
3
Example 10
Project: pygmi Source File: tiltdepth.py
def save_depths(self):
""" Save Depths """
if self.depths is None:
return
ext = "Text File (*.csv)"
filename = QtGui.QFileDialog.getSaveFileName(self.parent,
'Save File',
'.', ext)
if filename == '':
return False
os.chdir(filename.rpartition('/')[0])
np.savetxt(filename, self.depths, delimiter=',',
header='x, y, id, depth')
3
Example 11
def gen_istate(self, basis_state, initial_state):
'''Generate a new initial state from the given basis state.'''
initial_coords = self.basis_coordinates.copy()
initial_coords[0,0] = random.randrange(5, 16)
new_template_args = {'initial_state': initial_state}
istate_data_ref = self.makepath(self.initial_state_ref_template, new_template_args)
self.mkdir_p(os.path.dirname(istate_data_ref))
# Save coordinates of initial state as a text file
# NOTE: this is ok for this example, but should be optimized for large systems
np.savetxt(istate_data_ref, initial_coords)
# Calculate pcoord for generated initial state
pcoord = self.dist(initial_coords[0,:], initial_coords[1,:])
initial_state.pcoord = np.array([pcoord], dtype=pcoord_dtype)
initial_state.istate_status = initial_state.ISTATE_STATUS_PREPARED
return initial_state
3
Example 12
Project: GroundedTranslation Source File: initial_state_features.py
def serialise_to_csv(self, split, hidden_states,
batch_start=None, batch_end=None):
""" Serialise the hidden representations from generate_activations
into a CSV for t-SNE visualisation."""
logger.info("Serialising merge state features from %s to csv",
split)
fhf_str = "%s-initial_hidden_features" % self.args.run_string
if self.args.source_vectors is not None:
fhf_str = "%s-multilingual_initial_hidden_features" % self.args.run_string
f = open(fhf_str, 'a')
for h in hidden_states:
np.savetxt(f, h, delimiter=',', newline=',')
f.write("\n")
f.close()
3
Example 13
def write_loads(csv_filename, loads, node_id):
(Cp, Mach, T, U, V, W, p, rhoU) = loads
#print("loads.keys() = ", sorted(loads.keys()))
f = open(csv_filename, 'wb')
dt = 1.0
t = arange(len(Cp[node_id])) * dt # broken...
f.write('time\t')
savetxt(f, t, delimiter='', newline=',')
f.write('\n')
for node_id, Cpi in sorted(iteritems(Cp)):
f.write("\nnode_id=%i\n" % node_id)
f.write('Cp[%s],' % node_id)
savetxt(f, Cpi, delimiter='', newline=',')
f.write('\np[%s],' % node_id)
savetxt(f, p[node_id], delimiter='', newline=',')
f.write('\n\n')
f.close()
3
Example 14
Project: pycog Source File: all.py
def train_seeds(model, start_seed=1, ntrain=5):
for seed in xrange(start_seed, start_seed+ntrain):
suffix = '_s{}'.format(seed)
s = ' --seed {} --suffix {}'.format(seed, suffix)
tstart = datetime.datetime.now()
call("python {} {} clean{}"
.format(join(examplespath, 'do.py'), join(modelspath, model), s))
call("python {} {} train{} -g{}"
.format(join(examplespath, 'do.py'), join(modelspath, model), s, gpus))
tend = datetime.datetime.now()
# Save training time
totalmins = int((tend - tstart).total_seconds()/60)
timefile = join(timespath, model + suffix + '_time.txt')
np.savetxt(timefile, [totalmins], fmt='%d')
3
Example 15
Project: yoink Source File: cmap_app.py
def dump_txt(self):
data = self.get_data()
print('dumping to %s.*.txt' % self.path)
for key, val in data:
np.savetxt('%s.%s.txt' % (self.path, key), val)
print('dumped')
3
Example 16
Project: klustaviewa Source File: recluster.py
def write_fet(fet, filepath):
with open(filepath, 'w') as fd:
#header line: number of features
fd.write('%i\n' % fet.shape[1])
#next lines: one feature vector per line
np.savetxt(fd, fet, fmt="%i")
3
Example 17
Project: pyNastran Source File: cart3d.py
def _write_regions(self, outfile, regions, is_binary):
if is_binary:
fmt = self._endian + b('i')
four = pack(fmt, 4)
outfile.write(four)
nregions = len(regions)
fmt = self._endian + b('%ii' % nregions)
ints = pack(fmt, *regions)
outfile.write(ints)
outfile.write(four)
else:
fmt = b'%i'
np.savetxt(outfile, regions, fmt)
3
Example 18
Project: keras-molecules Source File: sample_latent.py
def main():
args = get_arguments()
model = MoleculeVAE()
data, data_test, charset = load_dataset(args.data)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = args.latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
if not args.visualize:
x_latent = model.encoder.predict(data)
np.savetxt(sys.stdout, x_latent, delimiter = '\t')
else:
visualize_latent_rep(args, model, data)
3
Example 19
Project: simpeg Source File: MeshIO.py
def writeModelUBC(mesh, fileName, model):
"""
Writes a model associated with a SimPEG TensorMesh
to a UBC-GIF format model file.
:param string fileName: File to write to
:param numpy.ndarray model: The model
"""
# Reshape model to a matrix
modelMat = mesh.r(model,'CC','CC','M')
# Transpose the axes
modelMatT = modelMat.transpose((2,0,1))
# Flip z to positive down
modelMatTR = Utils.mkvc(modelMatT[::-1,:,:])
np.savetxt(fileName, modelMatTR.ravel())
3
Example 20
def entry_point():
import matplotlib
args = parser.parse_args()
try:
assignments = io.loadh(args.assignments, 'arr_0')
except KeyError:
assignments = io.loadh(args.assignments, 'Data')
K = run(assignments)
T = scipy.linalg.matfuncs.expm(K)
np.savetxt(os.path.join(args.output_dir, "Rate.dat"), K)
scipy.io.mmwrite(os.path.join(args.output_dir, "tProb.mtx.tl"), T)
3
Example 21
Project: thunder Source File: test_series_io.py
def test_from_text_skip(tmpdir):
k = [[i] for i in range(10)]
v = [[0, i] for i in range(10)]
a = [kv[0] + kv[1] for kv in zip(k, v)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, a, fmt='%.02g')
data = fromtext(f, skip=1)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
3
Example 22
Project: splocs Source File: inout.py
def save_off(filename, vertices=None, faces=None):
if vertices is None:
vertices = []
if faces is None:
faces = []
with open(filename, 'w') as f:
f.write("OFF\n%d %d 0\n" % (len(vertices), len(faces)))
if len(vertices) > 1:
np.savetxt(f, vertices, fmt="%f %f %f")
if len(faces) > 1:
for face in faces:
fmt = " ".join(["%d"] * (len(face) + 1)) + "\n"
f.write(fmt % ((len(face),) + tuple(map(int, face))))
3
Example 23
Project: pele Source File: bench_minimization.py
def bench_pure_cpp(prog, coords):
print ""
coords_file = "coords.bench"
print "calling pure c++ executable {} using subprocess module".format(prog)
np.savetxt(coords_file, coords.ravel())
t0 = time.time()
p = subprocess.call([prog, coords_file])
t1 = time.time()
print "pure c++ LJ minimization : time {}".format(t1-t0)
3
Example 24
def read_symbols(args):
with h5py.File(args.h5file, 'r') as h5file:
value_map = h5file[args.sourcename].attrs['value_map']
if args.outfilename == '-':
out_file = sys.stdout
else:
out_file=args.outfilename
value_map.sort(order=('val',))
numpy.savetxt(out_file, value_map, fmt="%s %d")
3
Example 25
def write(file_name, data):
"""
Write data to txt file.
Arguments:
file_name (str): path and file name.
data (numpy.ndarray): data to be written.
"""
np.savetxt(file_name, data)
3
Example 26
Project: kaggle_otto Source File: rgf.py
def write_into_files(self, prefix, x, y=None):
if not os.path.exists(self.files_location_data_):
os.makedirs(self.files_location_data_)
# Write file with X
data_location = os.path.join(self.files_location_data_, '%s.data.x' % prefix)
np.savetxt(data_location, x, delimiter='\t', fmt='%.5f')
paths = dict(x=data_location, y=[])
if y is not None:
for i in range(self.n_classes_):
labels = map(lambda l: ['+1'] if i == l else ['-1'], y)
labels_location = os.path.join(self.files_location_data_, '%s.data.y.%d' % (prefix, i))
np.savetxt(labels_location, labels, delimiter='\t', fmt='%s')
paths['y'].append(labels_location)
return paths
3
Example 27
def gen_testdata(n=100, k=4):
# use static data to compare to R
data = randn(n, k)
mean = randn(k)
np.savetxt('test_data', data)
np.savetxt('test_mean', mean)
3
Example 28
Project: InfVocLDA Source File: hybrid.py
def export_intermediate_gamma(self, directory='../output/tmp/'):
if not directory.endswith('/'):
directory += "/";
if self._counter!=0:
gamma_path = directory + self._gamma_title + str(self._counter) + ".txt";
numpy.savetxt(gamma_path, self._docuement_topic_distribution);
#scipy.io.mmwrite(gamma_path, self._docuement_topic_distribution);
self._docuement_topic_distribution = None;
3
Example 29
def main(tseries_fpath, base_folder, k):
k = int(k)
idx_fpath = os.path.join(os.path.join(base_folder, '..'), 'train.dat')
X = ioutil.load_series(tseries_fpath, idx_fpath)
cent, assign, shift, dists_cent = ksc.inc_ksc(X, k)
np.savetxt(os.path.join(base_folder, 'cents.dat'), cent, fmt='%.5f')
np.savetxt(os.path.join(base_folder, 'assign.dat'), assign, fmt='%d')
np.savetxt(os.path.join(base_folder, 'shift.dat'), shift, fmt='%d')
np.savetxt(os.path.join(base_folder, 'dists_cent.dat'), dists_cent,
fmt='%.5f')
3
Example 30
Project: thunder Source File: test_series_io.py
def test_from_text(tmpdir, eng):
v = [[0, i] for i in range(10)]
f = os.path.join(str(tmpdir), 'data.txt')
savetxt(f, v, fmt='%.02g')
data = fromtext(f, engine=eng)
assert allclose(data.shape, (10, 2))
assert data.dtype == 'float64'
assert allclose(data.toarray(), v)
3
Example 31
def train(model, seed=None):
if seed is None:
seed = ''
else:
seed = ' -s {}'.format(seed)
tstart = datetime.datetime.now()
call("python {} {} train{} -g{}"
.format(join(examplespath, 'do.py'), join(modelspath, model), seed, gpus))
tend = datetime.datetime.now()
# Save training time
totalmins = int((tend - tstart).total_seconds()/60)
timefile = join(timespath, model + '_time.txt')
np.savetxt(timefile, [totalmins], fmt='%d')
3
Example 32
def entry_point():
args = parser.parse_args()
arglib.die_if_path_exists(args.output)
indices = run(args.pdb, args.atom_type)
np.savetxt(args.output, indices, '%d')
logger.info('Saved output to %s', args.output)
3
Example 33
def save_text(filepath, data, header=None, fmt='%d', delimiter=' '):
if isinstance(data, basestring):
with open(filepath, 'w') as f:
f.write(data)
else:
np.savetxt(filepath, data, fmt=fmt, newline='\n', delimiter=delimiter)
# Write a header.
if header is not None:
with open(filepath, 'r') as f:
contents = f.read()
contents_updated = str(header) + '\n' + contents
with open(filepath, 'w') as f:
f.write(contents_updated)
3
Example 34
def save_txt(pc, fname, header=True):
""" TODO support multi-count fields
"""
with open(fname, 'w') as f:
if header:
header_lst = []
for field_name, cnt in zip(pc.fields, pc.count):
if cnt == 1:
header_lst.append(field_name)
else:
for c in xrange(cnt):
header_lst.append('%s_%04d' % (field_name, c))
f.write(' '.join(header_lst)+'\n')
fmtstr = build_ascii_fmtstr(pc)
np.savetxt(f, pc.pc_data, fmt=fmtstr)
3
Example 35
def plot_1d(self,filename=None):
"""plot the 1d insertion representation of the matrix"""
fig = plt.figure()
xlim = len(self.one_d)/2
plt.plot(range(-xlim,xlim+1),self.one_d)
plt.vlines(-73,0,max(self.one_d)*1.1,linestyles='dashed')
plt.vlines(73,0,max(self.one_d)*1.1,linestyles='dashed')
plt.xlabel("Position relative to dyad")
plt.ylabel("Insertion Frequency")
if filename:
fig.savefig(filename)
plt.close(fig)
#Also save text output!
filename2 = ".".join(filename.split(".")[:-1]+['txt'])
np.savetxt(filename2,self.one_d,delimiter="\t")
else:
fig.show()
3
Example 36
Project: radical.pilot Source File: lsdm.py
def save_distance_matrix(self, comm, args, distance_matrix_thread):
size = comm.Get_size() # number of threads
rank = comm.Get_rank() # number of the current thread
if rank == 0:
try:
os.remove(args.dmfile)
except OSError:
pass
dmfile = open(args.dmfile, 'a')
for idx in xrange(size):
if idx == 0:
distance_matrix = distance_matrix_thread
else:
distance_matrix = comm.recv(source=idx, tag=idx)
np.savetxt(dmfile, distance_matrix)
dmfile.close()
else:
comm.send(distance_matrix_thread, dest=0, tag=rank)
3
Example 37
Project: seisflows Source File: test_optimize.py
def finalize(cls):
m_new = loadnpy('m_new')
m_old = loadnpy('m_old')
if PAR.VERBOSE > 0:
print '%14.7e %14.7e'%tuple(m_new)
if cls.status(m_new, m_old):
print 'Stopping criteria met.\n'
np.savetxt('niter', [cls.iter], '%d')
sys.exit(0)
elif cls.iter >= PAR.END:
print 'Maximum number of iterations exceeded.\n'
sys.exit(-1)
3
Example 38
Project: Py6S Source File: ground_reflectance.py
@classmethod
def _ArrayToString(cls, array):
text = StringIO.StringIO()
np.savetxt(text, array, fmt="%.5f", delimiter=' ')
s = text.getvalue()
text.close()
return s
3
Example 39
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
0
Example 40
Project: scikit-rf Source File: metas.py
def ns_2_sdatcv(ns,fname, polar=False):
'''
write a sdatcv from a skrf.NetworkSet
'''
ntwk = ns[0]
nports = ntwk.nports
nntwks = len(ns)
nfreq = len(ntwk)
freq_hz = ntwk.f.reshape(-1,1)
## make the header and columns information
# top junk
top = '\n'.join(['SDATCV',
'Ports',
'\t'.join(['%i\t'%(k+1) for k in range(nports)])])
# port impedance info
z0ri = complex2Scalar(ntwk.z0[0])
zcol='\t'.join(['Zr[%i]re\tZr[%i]im'%(k+1,k+1) \
for k in range(nports)])
zvals = '\t'.join([str(k) for k in z0ri])
zhead='\n'.join([zcol,zvals])
# s and cov matrix info
shead = '\t'.join(['S[%i,%i]%s'%(m+1, n+1,k) \
for m,n in ntwk.port_tuples for k in ['re','im'] ])
cvhead = '\t'.join(['CV[%i,%i]'%(n+1,m+1) \
for m in range(2*nports**2) for n in range(2*nports**2)])
datahead = '\t'.join(['Freq',shead,cvhead])
header = '\n'.join([top,zhead,datahead])
## calculate covariance matrix
cv = ns.cov()
## calculate mean s value, everything so we have a 2D matrix
mean_ntwk = average(ns, polar=polar)
s_mean_flat =NetworkSet([mean_ntwk]).scalar_mat().squeeze()
cv_flat = array([k.flatten('F') for k in cv])
data = hstack([freq_hz,s_mean_flat, cv_flat])
savetxt( fname,data,delimiter = '\t', header=header, comments='')
0
Example 41
Project: pyspace Source File: time_series.py
def store(self, result_dir, s_format="pickle"):
""" Stores this collection in the directory *result_dir*.
In contrast to *dump* this method stores the collection
not in a single file but as a whole directory structure with meta
information etc. The data sets are stored separately for each run,
split, train/test combination.
**Parameters**
:result_dir:
The directory in which the collection will be stored.
:name:
The prefix of the file names in which the individual data sets are
stored. The actual file names are determined by appending suffixes
that encode run, split, train/test information.
(*optional, default: "time_series"*)
:s_format:
The format in which the actual data sets should be stored.
Possible formats are 'pickle', 'text', 'csv' and 'mat' (matlab)
format. If s_format is a list, the second element further
specifies additional options for storing.
- pickle :
Standard python format.
- text :
In the text format, all time series objects are concatenated
to a single large table containing only integer values.
- csv :
For the csv format comma separated values are taken as default
or a specified Python format string.
- mat :
Scipy's savemat function is used for storing. Thereby the data
is stored as 3 dimensional array. Also meta data information,
like sampling frequency and channel names are saved.
As an additional parameter the orientation of the data arrays
can be given as 'channelXtime' or 'timeXchannel'
.. note:: For the text and MATLAB format, markers could be added
by using a Marker_To_Mux node before
(*optional, default: "pickle"*)
.. todo:: Put marker to the right time point and also write marker channel.
.. todo:: Shouldn't be 'text' and 'csv' format part of the stream data
set?!
"""
name = "time_series"
# for some storage procedures we need further specifications
s_type = None
if type(s_format) == list:
# file format is first position
f_format = s_format[0]
if len(s_format) > 1:
s_type = s_format[1]
else:
f_format = s_format
if f_format == "text" and s_type is None:
s_type = "%i"
elif f_format == "csv" and s_type == "real":
s_type = "%.18e"
# Update the meta data
author = get_author()
self.update_meta_data({"type": "time_series",
"storage_format": s_format,
"author": author,
"data_pattern": "data_run" + os.sep
+ name + "_sp_tt." + f_format})
# Iterate through splits and runs in this dataset
for key, time_series in self.data.iteritems():
# load data, if necessary
# (due to the lazy loading, the data might be not loaded already)
if isinstance(time_series, basestring):
time_series = self.get_data(key[0], key[1], key[2])
if self.sort_string is not None:
time_series.sort(key=eval(self.sort_string))
# Construct result directory
result_path = result_dir + os.sep + "data" + "_run%s" % key[0]
if not os.path.exists(result_path):
os.mkdir(result_path)
key_str = "_sp%s_%s" % key[1:]
# Store data depending on the desired format
if f_format in ["pickle", "cpickle", "cPickle"]:
result_file = open(os.path.join(result_path,
name+key_str+".pickle"), "w")
cPickle.dump(time_series, result_file, cPickle.HIGHEST_PROTOCOL)
result_file.close()
elif f_format in ["text","csv"]:
self.update_meta_data({
"type": "stream",
"marker_column": "marker"})
result_file = open(os.path.join(result_path,
name + key_str + ".csv"), "w")
csvwriter = csv.writer(result_file)
channel_names = copy.deepcopy(time_series[0][0].channel_names)
if f_format == "csv":
channel_names.append("marker")
csvwriter.writerow(channel_names)
for (data, key) in time_series:
if f_format == "text":
numpy.savetxt(result_file, data, delimiter=",", fmt=s_type)
if not key is None:
result_file.write(str(key))
result_file.flush()
elif data.marker_name is not None \
and len(data.marker_name) > 0:
result_file.write(str(data.marker_name))
result_file.flush()
else:
first_line = True
marker = ""
if not key is None:
marker = str(key)
elif data.marker_name is not None \
and len(data.marker_name) > 0:
marker = str(data.marker_name)
for line in data:
l = list(line)
l.append(marker)
csvwriter.writerow(list(l))
if first_line:
first_line = False
marker = ""
result_file.flush()
result_file.close()
elif f_format in ["matlab", "mat", "MATLAB"]:
# todo: handle all the other attributes of ts objects!
import scipy.io
result_file_name = os.path.join(result_path,
name + key_str + ".mat")
# extract a first time series object to get meta data
ts1 = time_series[0][0]
# collect all important information in the collection_object
dataset_dict = {
"sampling_frequency": ts1.sampling_frequency,
"channel_names": ts1.channel_names}
# we have to extract the data and labels separatly
if 'channelXtime' in s_format:
dataset_dict["data"] = [data.T for data, _ in time_series]
else:
dataset_dict["data"] = [data for data, _ in time_series]
dataset_dict["labels"] = [label for _, label in time_series]
# construct numpy 3d array (e.g., channelXtimeXtrials)
dataset_dict["data"] = numpy.rollaxis(numpy.array(
dataset_dict["data"]), 0, 3)
scipy.io.savemat(result_file_name, mdict=dataset_dict)
elif f_format in ["bp_eeg"]:
result_file = open(os.path.join(result_path,
name + key_str + ".eeg"),"a+")
result_file_mrk = open(os.path.join(result_path,
name + key_str + ".vmrk"),"w")
result_file_mrk.write("Brain Vision Data Exchange Marker File, "
"Version 1.0\n")
result_file_mrk.write("; Data stored by pySPACE\n")
result_file_mrk.write("[Common Infos]\n")
result_file_mrk.write("Codepage=UTF-8\n")
result_file_mrk.write("DataFile=%s\n" %
str(name + key_str + ".eeg"))
result_file_mrk.write("\n[Marker Infos]\n")
markerno = 1
datapoint = 1
sf = None
channel_names = None
for t in time_series:
if sf is None:
sf = t[0].sampling_frequency
if channel_names is None:
channel_names = t[0].get_channel_names()
for mrk in t[0].marker_name.keys():
for tm in t[0].marker_name[mrk]:
result_file_mrk.write(str("Mk%d=Stimulus,%s,%d,1,0\n" %
(markerno, mrk, datapoint+(tm*sf/1000.0))))
markerno += 1
data_ = t[0].astype(numpy.int16)
data_.tofile(result_file)
datapoint += data_.shape[0]
result_hdr = open(os.path.join(result_path,
name + key_str + ".vhdr"),"w")
result_hdr.write("Brain Vision Data Exchange Header "
"File Version 1.0\n")
result_hdr.write("; Data stored by pySPACE\n\n")
result_hdr.write("[Common Infos]\n")
result_hdr.write("Codepage=UTF-8\n")
result_hdr.write("DataFile=%s\n" %
str(name + key_str + ".eeg"))
result_hdr.write("MarkerFile=%s\n" %
str(name + key_str + ".vmrk"))
result_hdr.write("DataFormat=BINARY\n")
result_hdr.write("DataOrientation=MULTIPLEXED\n")
result_hdr.write("NumberOfChannels=%d\n" % len(channel_names))
result_hdr.write("SamplingInterval=%d\n\n" % (1000000/sf))
result_hdr.write("[Binary Infos]\n")
result_hdr.write("BinaryFormat=INT_16\n\n")
result_hdr.write("[Channel Infos]\n")
# TODO: Add Resolutions to time_series
# 0 = 0.1 [micro]V,
# 1 = 0.5 [micro]V,
# 2 = 10 [micro]V,
# 3 = 152.6 [micro]V (seems to be unused!)
resolutions_str = [unicode("0.1,%sV" % unicode(u"\u03BC")),
unicode("0.5,%sV" % unicode(u"\u03BC")),
unicode("10,%sV" % unicode(u"\u03BC")),
unicode("152.6,%sV" % unicode(u"\u03BC"))]
for i in range(len(channel_names)):
result_hdr.write(unicode("Ch%d=%s,,%s\n" %
(i+1,channel_names[i],
unicode(resolutions_str[0]))).encode('utf-8'))
result_file.close()
else:
NotImplementedError("Using unavailable storage format:%s!"
% f_format)
self.update_meta_data({
"channel_names": copy.deepcopy(time_series[0][0].channel_names),
"sampling_frequency": time_series[0][0].sampling_frequency
})
#Store meta data
BaseDataset.store_meta_data(result_dir, self.meta_data)
0
Example 42
Project: scipy Source File: mmio.py
def _write(self, stream, a, comment='', field=None, precision=None,
symmetry=None):
if isinstance(a, list) or isinstance(a, ndarray) or \
isinstance(a, tuple) or hasattr(a, '__array__'):
rep = self.FORMAT_ARRAY
a = asarray(a)
if len(a.shape) != 2:
raise ValueError('Expected 2 dimensional array')
rows, cols = a.shape
if field is not None:
if field == self.FIELD_INTEGER:
a = a.astype('i')
elif field == self.FIELD_REAL:
if a.dtype.char not in 'fd':
a = a.astype('d')
elif field == self.FIELD_COMPLEX:
if a.dtype.char not in 'FD':
a = a.astype('D')
else:
if not isspmatrix(a):
raise ValueError('unknown matrix type: %s' % type(a))
rep = 'coordinate'
rows, cols = a.shape
typecode = a.dtype.char
if precision is None:
if typecode in 'fF':
precision = 8
else:
precision = 16
if field is None:
kind = a.dtype.kind
if kind == 'i':
field = 'integer'
elif kind == 'f':
field = 'real'
elif kind == 'c':
field = 'complex'
else:
raise TypeError('unexpected dtype kind ' + kind)
if symmetry is None:
symmetry = self._get_symmetry(a)
# validate rep, field, and symmetry
self.__class__._validate_format(rep)
self.__class__._validate_field(field)
self.__class__._validate_symmetry(symmetry)
# write initial header line
stream.write(asbytes('%%MatrixMarket matrix {0} {1} {2}\n'.format(rep,
field, symmetry)))
# write comments
for line in comment.split('\n'):
stream.write(asbytes('%%%s\n' % (line)))
template = self._field_template(field, precision)
# write dense format
if rep == self.FORMAT_ARRAY:
# write shape spec
stream.write(asbytes('%i %i\n' % (rows, cols)))
if field in (self.FIELD_INTEGER, self.FIELD_REAL):
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
stream.write(asbytes(template % a[i, j]))
else:
for j in range(cols):
for i in range(j, rows):
stream.write(asbytes(template % a[i, j]))
elif field == self.FIELD_COMPLEX:
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
else:
for j in range(cols):
for i in range(j, rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
elif field == self.FIELD_PATTERN:
raise ValueError('pattern type inconsisted with dense format')
else:
raise TypeError('Unknown field type %s' % field)
# write sparse format
else:
coo = a.tocoo() # convert to COOrdinate format
# if symmetry format used, remove values above main diagonal
if symmetry != self.SYMMETRY_GENERAL:
lower_triangle_mask = coo.row >= coo.col
coo = coo_matrix((coo.data[lower_triangle_mask],
(coo.row[lower_triangle_mask],
coo.col[lower_triangle_mask])),
shape=coo.shape)
# write shape spec
stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz)))
# make indices and data array
if field == self.FIELD_PATTERN:
IJV = vstack((coo.row, coo.col)).T
elif field in [self.FIELD_INTEGER, self.FIELD_REAL]:
IJV = vstack((coo.row, coo.col, coo.data)).T
elif field == self.FIELD_COMPLEX:
IJV = vstack((coo.row, coo.col, coo.data.real,
coo.data.imag)).T
else:
raise TypeError('Unknown field type %s' % field)
IJV[:, :2] += 1 # change base 0 -> base 1
# formats for row indices, col indices and data columns
fmt = ('%i', '%i') + ('%%.%dg' % precision,) * (IJV.shape[1]-2)
# save to file
savetxt(stream, IJV, fmt=fmt)
0
Example 43
Project: fmriprep Source File: base.py
def create_encoding_file(input_images, in_dict):
"""Creates a valid encoding file for topup"""
import json
import nibabel as nb
import numpy as np
import os
if not isinstance(input_images, list):
input_images = [input_images]
if not isinstance(in_dict, list):
in_dict = [in_dict]
pe_dirs = {'i': 0, 'j': 1, 'k': 2}
enc_table = []
for fmap, meta in zip(input_images, in_dict):
line_values = [0, 0, 0, meta['TotalReadoutTime']]
line_values[pe_dirs[meta['PhaseEncodingDirection'][0]]] = 1 + (
-2*(len(meta['PhaseEncodingDirection']) == 2))
nvols = 1
if len(nb.load(fmap).shape) > 3:
nvols = nb.load(fmap).shape[3]
enc_table += [line_values] * nvols
np.savetxt(os.path.abspath('parameters.txt'), enc_table, fmt=['%0.1f', '%0.1f', '%0.1f', '%0.20f'])
return os.path.abspath('parameters.txt')
0
Example 44
def save(ar, fileName):
# Used only when verbosity level > 10.
from numpy import savetxt
savetxt(fileName, ar, precision=8)
0
Example 45
Project: TensorflowProjects Source File: FaceDetectionDataUtils.py
def read_data(data_dir, force=False):
pickle_file = os.path.join(data_dir, "FaceDetectionData.pickle")
if force or not os.path.exists(pickle_file):
train_filename = os.path.join(data_dir, "training.csv")
data_frame = pd.read_csv(train_filename)
cols = data_frame.columns[:-1]
np.savetxt(os.path.join(data_dir, "column_labels.txt"), cols.values, fmt="%s")
data_frame['Image'] = data_frame['Image'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
print "Reading training.csv ..."
# scale data to a 1x1 image with pixel values 0-1
train_images = np.vstack(data_frame['Image']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
train_labels = (data_frame[cols].values - IMAGE_LOCATION_NORM) / float(IMAGE_LOCATION_NORM)
permutations = np.random.permutation(train_images.shape[0])
train_images = train_images[permutations]
train_labels = train_labels[permutations]
validation_percent = int(train_images.shape[0] * VALIDATION_PERCENT)
validation_images = train_images[:validation_percent]
validation_labels = train_labels[:validation_percent]
train_images = train_images[validation_percent:]
train_labels = train_labels[validation_percent:]
print "Reading test.csv ..."
test_filename = os.path.join(data_dir, "test.csv")
data_frame = pd.read_csv(test_filename)
data_frame['Image'] = data_frame['Image'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
test_images = np.vstack(data_frame['Image']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
with open(pickle_file, "wb") as file:
try:
print 'Picking ...'
save = {
"train_images": train_images,
"train_labels": train_labels,
"validation_images": validation_images,
"validation_labels": validation_labels,
"test_images": test_images,
}
pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)
except:
print("Unable to pickle file :/")
with open(pickle_file, "rb") as file:
save = pickle.load(file)
train_images = save["train_images"]
train_labels = save["train_labels"]
validation_images = save["validation_images"]
validation_labels = save["validation_labels"]
test_images = save["test_images"]
return train_images, train_labels, validation_images, validation_labels, test_images
0
Example 46
def main():
# add some program options
parser = OptionParser(usage = "usage: %prog [options] storage")
parser.add_option("--write-disconnect",
dest="writeDPS", action="store_true",
help="generate min.dat and ts.dat to use with disconnectDPS")
parser.add_option("-m",
dest="writeMinima", action="store_true",
help="dump minima to screen")
parser.add_option("-t",
dest="writeTS", action="store_true",
help="dump transition states to screen")
parser.add_option("--coords",
dest="writeCoords", action="store_true",
help="export coordinates files")
parser.add_option("--xyz",
dest="writeXYZ", action="store_true",
help="export xyz files")
(options, args) = parser.parse_args()
# print help if no input file is given
if(len(args) != 1):
parser.print_help()
exit(-1)
db = Database(db=args[0])
if(options.writeMinima):
print "List of minima:"
print "---------------"
for m in db.minima():
print "%f\t\tid %d"%(m.energy, m._id)
print "END\n"
if(options.writeTS):
print "List of transition states:"
print "--------------------------"
for ts in db.transition_states():
print "%d\t<->\t%d\tid %d\tenergies %f %f %f"%\
(ts.minimum1._id, ts.minimum2._id, ts._id, ts.minimum1.energy, ts.energy, ts.minimum2.energy)
print "END\n"
if(options.writeDPS):
writeDPS(db)
if(options.writeCoords):
GMIN.initialize()
i=0
for m in db.minima():
i+=1
filename = "lowest/lowest%03d.cif"%(i)
print "minimum",i, "energy",m.energy,"to",filename
GMIN.userpot_dump(filename, m.coords)
if(not TO_PDB is None):
os.system(TO_PDB%filename)
np.savetxt("lowest/coords_%03d.txt"%(i), m.coords)
if(options.writeXYZ):
traj=open("lowest/traj.xyz", "w")
i=0
for m in db.minima():
i+=1
filename = "lowest/lowest%03d.xyz"%(i)
print "minimum",i, "energy",m.energy,"to",filename
export_xyz(open(filename, "w"), m.coords)
export_xyz(traj, m.coords)
traj.close()
0
Example 47
def writeout(self, f=None):
"""write all the logs and the summary out to file(s)
Parameters
----------
f : filename or filehandle
If specified then all summary and object data will go in one file.
If None is specified then type specific files will be generated
in the dump_dir
If a filehandle is specified then it must be a byte mode file
as numpy.savetxt is used, and requires this.
"""
fall = None
# If specific file given then write everything to it
if hasattr(f, 'write'):
if not 'b' in f.mode:
raise RuntimeError("File stream must be in binary mode")
# write all to this stream
fall = f
fs = f
closefall = False
closefs = False
elif f:
# Assume f is a filename
fall = open(f, 'wb')
fs = fall
closefs = False
closefall = True
else:
self.create_dump_dir()
closefall = False
if self.dump_summary:
fs = open(self.summary_file, 'wb')
closefs = True
if self.dump_summary:
for ois in self.iter_summary:
if ois.idx == 0:
fs.write(asbytes("{}\n{}\n".format(
ois.get_header_line(self.summary_sep),
ois.get_value_line(self.summary_sep))))
else:
fs.write(asbytes("{}\n".format(
ois.get_value_line(self.summary_sep))))
if closefs:
fs.close()
logger.info("Optim dump summary saved to {}".format(
self.summary_file))
if self.dump_fid_err:
if fall:
fall.write(asbytes("Fidelity errors:\n"))
np.savetxt(fall, self.fid_err_log)
else:
np.savetxt(self.fid_err_file, self.fid_err_log)
if self.dump_grad_norm:
if fall:
fall.write(asbytes("gradients norms:\n"))
np.savetxt(fall, self.grad_norm_log)
else:
np.savetxt(self.grad_norm_file, self.grad_norm_log)
if self.dump_grad:
g_num = 0
for grad in self.grad_log:
g_num += 1
if fall:
fall.write(asbytes("gradients (call {}):\n".format(g_num)))
np.savetxt(fall, grad)
else:
fname = "{}-fid_err_gradients{}.{}".format(self.fname_base,
g_num,
self.dump_file_ext)
fpath = os.path.join(self.dump_dir, fname)
np.savetxt(fpath, grad, delimiter=self.data_sep)
if closefall:
fall.close()
logger.info("Optim dump saved to {}".format(f))
else:
if fall:
logger.info("Optim dump saved to specified stream")
else:
logger.info("Optim dump saved to {}".format(self.dump_dir))
0
Example 48
def writeout(self, f=None):
""" write all the objects out to files
Parameters
----------
f : filename or filehandle
If specified then all object data will go in one file.
If None is specified then type specific files will be generated
in the dump_dir
If a filehandle is specified then it must be a byte mode file
as numpy.savetxt is used, and requires this.
"""
dump = self.parent
fall = None
closefall = True
closef = False
# If specific file given then write everything to it
if hasattr(f, 'write'):
if not 'b' in f.mode:
raise RuntimeError("File stream must be in binary mode")
# write all to this stream
fall = f
closefall = False
f.write(asbytes("EVOLUTION COMPUTATION {}\n".format(self.idx)))
elif f:
fall = open(f, 'wb')
else:
# otherwise files for each type will be created
fnbase = "{}-evo{}".format(dump._fname_base, self.idx)
closefall = False
#ctrl amps
if not self.ctrl_amps is None:
if fall:
f = fall
f.write(asbytes("Ctrl amps\n"))
else:
fname = "{}-ctrl_amps.{}".format(fnbase,
dump.dump_file_ext)
f = open(os.path.join(dump.dump_dir, fname), 'wb')
closef = True
np.savetxt(f, self.ctrl_amps, fmt='%14.6g',
delimiter=dump.data_sep)
if closef: f.close()
# dynamics generators
if not self.dyn_gen is None:
k = 0
if fall:
f = fall
f.write(asbytes("Dynamics Generators\n"))
else:
fname = "{}-dyn_gen.{}".format(fnbase,
dump.dump_file_ext)
f = open(os.path.join(dump.dump_dir, fname), 'wb')
closef = True
for dg in self.dyn_gen:
f.write(asbytes(
"dynamics generator for timeslot {}\n".format(k)))
np.savetxt(f, self.dyn_gen[k], delimiter=dump.data_sep)
k += 1
if closef: f.close()
# Propagators
if not self.prop is None:
k = 0
if fall:
f = fall
f.write(asbytes("Propagators\n"))
else:
fname = "{}-prop.{}".format(fnbase,
dump.dump_file_ext)
f = open(os.path.join(dump.dump_dir, fname), 'wb')
closef = True
for dg in self.dyn_gen:
f.write(asbytes("Propagator for timeslot {}\n".format(k)))
np.savetxt(f, self.prop[k], delimiter=dump.data_sep)
k += 1
if closef: f.close()
# Propagator gradient
if not self.prop_grad is None:
k = 0
if fall:
f = fall
f.write(asbytes("Propagator gradients\n"))
else:
fname = "{}-prop_grad.{}".format(fnbase,
dump.dump_file_ext)
f = open(os.path.join(dump.dump_dir, fname), 'wb')
closef = True
for k in range(self.prop_grad.shape[0]):
for j in range(self.prop_grad.shape[1]):
f.write(asbytes("Propagator gradient for timeslot {} "
"control {}\n".format(k, j)))
np.savetxt(f, self.prop_grad[k, j],
delimiter=dump.data_sep)
if closef: f.close()
# forward evolution
if not self.fwd_evo is None:
k = 0
if fall:
f = fall
f.write(asbytes("Forward evolution\n"))
else:
fname = "{}-fwd_evo.{}".format(fnbase,
dump.dump_file_ext)
f = open(os.path.join(dump.dump_dir, fname), 'wb')
closef = True
for dg in self.dyn_gen:
f.write(asbytes("Evolution from 0 to {}\n".format(k)))
np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep)
k += 1
if closef: f.close()
# onward evolution
if not self.onwd_evo is None:
k = 0
if fall:
f = fall
f.write(asbytes("Onward evolution\n"))
else:
fname = "{}-onwd_evo.{}".format(fnbase,
dump.dump_file_ext)
f = open(os.path.join(dump.dump_dir, fname), 'wb')
closef = True
for dg in self.dyn_gen:
f.write(asbytes("Evolution from {} to end\n".format(k)))
np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep)
k += 1
if closef: f.close()
# onto evolution
if not self.onto_evo is None:
k = 0
if fall:
f = fall
f.write(asbytes("Onto evolution\n"))
else:
fname = "{}-onto_evo.{}".format(fnbase,
dump.dump_file_ext)
f = open(os.path.join(dump.dump_dir, fname), 'wb')
closef = True
for dg in self.dyn_gen:
f.write(asbytes("Evolution from {} onto target\n".format(k)))
np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep)
k += 1
if closef: f.close()
if closefall:
fall.close()
0
Example 49
def save_txt(self, filename):
"""
Save the Grid object to text files.
The latitude, longitude and time sequences are stored in three separate
text files.
:arg str filename: The name of the files where Grid object is stored
(excluding ending).
"""
# Gather sequences
lat_seq = self.lat_sequence()
lon_seq = self.lon_sequence()
time_seq = self.grid()["time"]
# Store as text files
try:
np.savetxt(filename + "_lat.txt", lat_seq)
np.savetxt(filename + "_lon.txt", lon_seq)
np.savetxt(filename + "_time.txt", time_seq)
except IOError:
print "An error occurred while saving Grid instance to \
text files", filename
0
Example 50
def savetxt(filename, v):
"""Save scalar to text file"""
np.savetxt(filename, [v], '%11.6e')