numpy.savetxt

Here are the examples of the python api numpy.savetxt taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

109 Examples 7

Example 51

Project: automl-phase-2 Source File: util.py
def convert_automl_into_automl_folds(folder, save_folder_root, n_folds=5,
                                     random_state=0, usage='testing'):
    """Convert a dataset in automl format into several folds of automl format"""
    # Load data
    input_dir, basename = os.path.split(folder)
    D = DataManager(basename, input_dir, replace_missing=True, filter_features=True)
    X = D.data['X_train']
    y = D.data['Y_train']
    info = D.info
    if not usage is None:
        info['usage'] = usage
    # Now split into folds and save
    folds = KFold(n=X.shape[0], n_folds=n_folds, shuffle=True, random_state=random_state)
    for (fold, (train_index, test_index)) in enumerate(folds):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        fold_folder = os.path.join(save_folder_root + '_fold_%02d' % (fold + 1), basename)
        mkdir(fold_folder)
        fmt = '%f'
        np.savetxt(os.path.join(fold_folder, basename + '_train.data'), X_train, fmt=fmt, delimiter=' ')
        np.savetxt(os.path.join(fold_folder, basename + '_test.data'), X_test, fmt=fmt, delimiter=' ')
        if info['task'] == 'binary.classification':
            fmt = '%d'
        np.savetxt(os.path.join(fold_folder, basename + '_train.solution'), y_train, fmt=fmt, delimiter=' ')
        np.savetxt(os.path.join(fold_folder, basename + '_test.solution'), y_test, fmt=fmt, delimiter=' ')
        info['train_num'] = X_train.shape[0]
        info['test_num'] = X_test.shape[0]
        with open(os.path.join(fold_folder, basename + '_public.info'), 'w') as info_file:
            for (key, value) in info.iteritems():
                info_file.write('%s = %s\n' % (key, value))
        shutil.copy(os.path.join(folder, basename + '_feat.type'), os.path.join(fold_folder, basename + '_feat.type'))

Example 52

Project: PyEMMA Source File: test_random_access_stride.py
    def test_csv_filereader_random_access(self):
        tmpfiles = [tempfile.mktemp(suffix='.dat') for _ in range(0, len(self.data))]
        try:
            for idx, tmp in enumerate(tmpfiles):
                np.savetxt(tmp, self.data[idx])

            # large enough chunksize
            csv_fr = coor.source(tmpfiles, chunk_size=10)
            out1 = csv_fr.get_output(stride=self.stride)

            # small chunk size
            np_fr = coor.source(tmpfiles, chunk_size=1)
            out2 = np_fr.get_output(stride=self.stride)

            for idx in np.unique(self.stride[:, 0]):
                np.testing.assert_array_almost_equal(self.data[idx][self.stride[self.stride[:, 0] == idx][:, 1]],
                                                     out1[idx])
                np.testing.assert_array_almost_equal(out1[idx], out2[idx])
        finally:
            for tmp in tmpfiles:
                try:
                    os.unlink(tmp)
                except EnvironmentError:
                    pass

Example 53

Project: DronePilot Source File: pyrenn.py
def saveNN(net,filename):
	"""	Save neural network object to file
		
	Args:
		net: 	neural network object
		filename:	path of csv file to save neural network
	
	"""	
	import csv
	import pandas as pd
	
	#create csv write
	file = open(filename,"w")
	writer = csv.writer(file, lineterminator='\n')

	
	#write network structure nn
	writer.writerow(['nn'])
	writer.writerow(net['nn'])
	
	#write input delays dIn
	writer.writerow(['dIn'])
	writer.writerow(net['delay']['In'])
	
	#write internal delays dIntern
	writer.writerow(['dIntern'])
	if not net['delay']['Intern']:
		writer.writerow(['',''])
	else:
		writer.writerow(net['delay']['Intern'])
		
	#write output delays dIOut
	writer.writerow(['dOut'])
	if not net['delay']['Out']:
		writer.writerow(['',''])
	else:
		writer.writerow(net['delay']['Out'])
		
	#write factor for input data normalization normP
	writer.writerow(['normP'])
	writer.writerow(net['normP'])
	
	#write factor for output data normalization normY
	writer.writerow(['normY'])
	writer.writerow(net['normY'])
	
	#write weight vector w
	writer.writerow(['w'])
	file.close()
	
	file = open(filename,"ab")
	np.savetxt(file,net['w'],delimiter=',',fmt='%.55f')
	
	#close file
	file.close()
	
	return

Example 54

Project: gwpy Source File: test_table.py
    def test_read_ascii(self):
        # read table
        table = self.TABLE_CLASS.read(self.TEST_XML_FILE)
        # write to ASCII
        tmpascii = tempfile.mktemp(suffix='.txt')
        numpy.savetxt(tmpascii, zip(table.get_peak(), table.get_column('snr'),
                                    table.get_column('central_freq')),
                      fmt=['%s', '%.18e', '%.18e'])
        # read from ASCII
        try:
            table2 = self.TABLE_CLASS.read(
                tmpascii, columns=['time', 'snr', 'central_freq'])
        finally:
            if os.path.isfile(tmpascii):
                os.remove(tmpascii)
        self.assertEquals(len(table), len(table2))
        nptest.assert_array_equal(table.get_peak(), table2.get_peak())
        nptest.assert_array_equal(table.get_column('snr'),
                                  table2.get_column('snr'))
        nptest.assert_array_equal(table.get_column('central_freq'),
                                  table2.get_column('central_freq'))

Example 55

Project: deepTools Source File: computeGCBias.py
def main(args=None):
    args = parse_arguments().parse_args(args)

    if args.extraSampling:
        extra_sampling_file = args.extraSampling.name
        args.extraSampling.close()
    else:
        extra_sampling_file = None

    global global_vars
    global_vars = {}
    global_vars['2bit'] = args.genome
    global_vars['bam'] = args.bamfile
    global_vars['filter_out'] = args.blackListFileName
    global_vars['extra_sampling_file'] = extra_sampling_file

    tbit = py2bit.open(global_vars['2bit'])
    bam = bamHandler.openBam(global_vars['bam'])

    if args.fragmentLength:
        fragment_len_dict = \
            {'median': args.fragmentLength}

    else:
        fragment_len_dict, __ = \
            get_read_and_fragment_length(args.bamfile, None,
                                         numberOfProcessors=args.numberOfProcessors,
                                         verbose=args.verbose)
        if not fragment_len_dict:
            print("\nPlease provide the fragment length used for the "
                  "sample preparation.\n")
            exit(1)

        fragment_len_dict = {'median': int(fragment_len_dict['median'])}

    chrNameBitToBam = tbitToBamChrName(list(tbit.chroms().keys()), bam.references)

    global_vars['genome_size'] = sum(tbit.chroms().values())
    global_vars['total_reads'] = bam.mapped
    global_vars['reads_per_bp'] = \
        float(global_vars['total_reads']) / args.effectiveGenomeSize

    confidence_p_value = float(1) / args.sampleSize

    # chromSizes: list of tuples
    chromSizes = [(bam.references[i], bam.lengths[i])
                  for i in range(len(bam.references))]

    # use poisson distribution to identify peaks that should be discarted.
    # I multiply by 4, because the real distribution of reads
    # vary depending on the gc content
    # and the global number of reads per bp may a be too low.
    # empirically, a value of at least 4 times as big as the
    # reads_per_bp was found.
    # Similarly for the min value, I divide by 4.
    global_vars['max_reads'] = \
        poisson(4 * global_vars['reads_per_bp'] *
                fragment_len_dict['median']).isf(confidence_p_value)
    # this may be of not use, unless the depth of sequencing is really high
    # as this value is close to 0
    global_vars['min_reads'] = \
        poisson(0.25 * global_vars['reads_per_bp'] *
                fragment_len_dict['median']).ppf(confidence_p_value)

    for key in global_vars:
        print("{}: {}".format(key, global_vars[key]))

    print("computing frequencies")
    # the GC of the genome is sampled each stepSize bp.
    stepSize = max(int(global_vars['genome_size'] / args.sampleSize), 1)
    print("stepSize: {}".format(stepSize))
    data = tabulateGCcontent(fragment_len_dict,
                             chrNameBitToBam, stepSize,
                             chromSizes,
                             numberOfProcessors=args.numberOfProcessors,
                             verbose=args.verbose,
                             region=args.region)

    np.savetxt(args.GCbiasFrequenciesFile.name, data)

    if args.biasPlot:
        reads_per_gc = countReadsPerGC(args.regionSize,
                                       chrNameBitToBam, stepSize * 10,
                                       chromSizes,
                                       numberOfProcessors=args.numberOfProcessors,
                                       verbose=args.verbose,
                                       region=args.region)
        plotGCbias(args.biasPlot, data, reads_per_gc, args.regionSize, image_format=args.plotFileFormat)

Example 56

Project: kaggle_otto Source File: utils.py
def write_blender_data(path, file_name, predictions):
    file_path = os.path.join(path, file_name)
    np.savetxt(file_path, predictions, delimiter=',', fmt='%.5f')

Example 57

Project: klustaviewa Source File: probe_viewer.py
Function: save
    def save(self, filename):
        np.savetxt(filename, self.get_positions(), fmt='%.3f')

Example 58

Project: pdnn Source File: model_io.py
Function: array_2_string
def array_2_string(array):
    str_out = StringIO()
    np.savetxt(str_out, array)
    return str_out.getvalue()

Example 59

Project: msmbuilder-legacy Source File: CalculateMFPTs.py
Function: entry_point
def entry_point():
    args = parser.parse_args()

    T = scipy.io.mmread(args.tProb)
    state = int(args.state)
    print(args.state, state)

    # Check output isn't taken
    if state == -1:
        base_filename = "PairwiseMFPTs.dat"
    else:
        base_filename = "MFPTs_%d.dat" % state

    output_filename = os.path.join(args.output_dir, base_filename)
    arglib.die_if_path_exists(output_filename)

    MFPTs = run(T, state)

    np.savetxt(output_filename, MFPTs)
    logger.info("Saved output to %s" % output_filename)

Example 60

Project: onlineldavb Source File: onlineldavb.py
def main():
    infile = sys.argv[1]
    K = int(sys.argv[2])
    alpha = float(sys.argv[3])
    eta = float(sys.argv[4])
    kappa = float(sys.argv[5])
    S = int(sys.argv[6])

    docs = corpus.corpus()
    docs.read_data(infile)

    vocab = open(sys.argv[7]).readlines()
    model = OnlineLDA(vocab, K, 100000,
                      0.1, 0.01, 1, 0.75)
    for i in range(1000):
        print i
        wordids = [d.words for d in docs.docs[(i*S):((i+1)*S)]]
        wordcts = [d.counts for d in docs.docs[(i*S):((i+1)*S)]]
        model.update_lambda(wordids, wordcts)
        n.savetxt('/tmp/lambda%d' % i, model._lambda.T)

Example 61

Project: NucleoATAC Source File: chunkmat2d.py
Function: plot
    def plot(self, filename = None, title = None, lower = None,
             upper = None):
        """Plot 2d ReadMat"""
        if upper is None:
            upper = self.upper
        if lower is None:
            lower = self.lower
        fig = plt.figure()
        plt.imshow(self.get(lower= lower, upper = upper),
                   origin="lower",interpolation='nearest',
                extent=[self.start,self.end-1,lower,upper-1],cmap=cm.get_cmap('Greys'))
        plt.xlabel(self.chrom)
        plt.ylabel("Insert size")
        if title:
            plt.title(title)
        #plt.colorbar(shrink=0.8)
        if filename:
            fig.savefig(filename)
            plt.close(fig)
            #Also save text output!
            filename2 = ".".join(filename.split(".")[:-1]+['txt'])
            np.savetxt(filename2,self.mat,delimiter="\t")
        else:
            fig.show()

Example 62

Project: pycortex Source File: align.py
def autotweak(subject, xfmname):
    """Tweak an alignment using the FLIRT boundary-based alignment (BBR) from FSL.
    Ideally this function should actually use a limited search range, but it doesn't.
    It's probably not very useful.

    Parameters
    ----------
    subject : str
        Subject identifier.
    xfmname : str
        String identifying the transform to be tweaked.
    """
    import shlex
    import shutil
    import tempfile
    import subprocess as sp

    from .database import db
    from .xfm import Transform
    from .options import config

    fsl_prefix = config.get("basic", "fsl_prefix")
    schfile = os.path.join(os.path.split(os.path.abspath(__file__))[0], "bbr.sch")

    magnet = db.get_xfm(subject, xfmname, xfmtype='magnet')
    try:
        cache = tempfile.mkdtemp()
        epifile = magnet.reference.get_filename()
        raw = db.get_anat(subject, type='raw').get_filename()
        bet = db.get_anat(subject, type='brainmask').get_filename()
        wmseg = db.get_anat(subject, type='whitematter').get_filename()
        initmat = magnet.to_fsl(db.get_anat(subject, 'raw').get_filename())
        with open(os.path.join(cache, 'init.mat'), 'w') as fp:
            np.savetxt(fp, initmat, fmt='%f')
        print('Running BBR')
        cmd = '{fslpre}flirt -in {epi} -ref {raw} -dof 6 -cost bbr -wmseg {wmseg} -init {cache}/init.mat -omat {cache}/out.mat -schedule {schfile}'
        cmd = cmd.format(cache=cache, raw=raw, wmseg=wmseg, epi=epifile)
        if sp.call(cmd, shell=True) != 0:
            raise IOError('Error calling BBR flirt')

        x = np.loadtxt(os.path.join(cache, "out.mat"))
        # Pass transform as FROM epi TO anat; transform will be inverted
        # back to anat-to-epi, standard direction for pycortex internal
        # storage by from_fsl
        Transform.from_fsl(x, epifile, raw).save(subject, xfmname+"_auto", 'coord')
        print('Saved transform as (%s, %s)'%(subject, xfmname+'_auto'))
    finally:
        shutil.rmtree(cache)

Example 63

Project: Kaggle_HomeDepot Source File: rgf_utils.py
Function: fit
    def fit(self, X, y):

        # write train data to file
        train_x_fn = "%s/data.x"%self.tmp_dir
        train_y_fn = "%s/data.y"%self.tmp_dir
        np.savetxt(train_x_fn, X, fmt="%.6f", delimiter="\t")
        np.savetxt(train_y_fn, y, fmt="%.6f", delimiter="\t")

        ## write train param to file
        params = [
            "train_x_fn=",train_x_fn,"\n",
            "train_y_fn=",train_y_fn,"\n",
            #"train_w_fn=",weight_train_path,"\n",
            "model_fn_prefix=",self.model_fn_prefix,"\n",
            "reg_L2=", self.param["reg_L2"], "\n",
            "reg_sL2=", self.param["reg_sL2"], "\n",
            #"reg_depth=", 1.01, "\n",
            "algorithm=","RGF","\n",
            "loss=","LS","\n",
            #"opt_interval=", 100, "\n",
            # save model at the end of training
            "test_interval=", self.param["max_leaf_forest"],"\n", 
            "max_leaf_forest=", self.param["max_leaf_forest"],"\n",
            "num_iteration_opt=", self.param["num_iteration_opt"], "\n",
            "num_tree_search=", self.param["num_tree_search"], "\n",
            "min_pop=", self.param["min_pop"], "\n",
            "opt_interval=", self.param["opt_interval"], "\n",
            "opt_stepsize=", self.param["opt_stepsize"], "\n",
            "NormalizeTarget"
        ]
        params = "".join([str(p) for p in params])

        rgf_setting = "%s/rgf_setting"%self.tmp_dir # DOES NOT contain ".inp"
        with open(rgf_setting+".inp", "w") as f:
            f.write(params)

        ## train rgf
        rgf_log = "%s/rgf_log"%self.tmp_dir
        cmd = "perl %s %s train %s >> %s"%(
                config.RGF_CALL_EXE, config.RGF_EXE, rgf_setting, rgf_log)
        os.system(cmd)

        return self

Example 64

Project: pyNCS Source File: experimentTools.py
Function: save_txt
def savetxt(obj, filename):
    np.savetxt(globaldata.directory + filename, obj)

Example 65

Project: Ornithokrites Source File: features.py
    def write_features_to_csv(self, features, file_name):
        csv_header = ','.join(itertools.chain(self.ExtractedFeaturesList)) + '\n'
        np.savetxt(file_name + '.csv', features, delimiter=',', header=csv_header)

Example 66

Project: apasvo Source File: rawfile.py
Function: write
    def write(self, array, **kwargs):
        """Stores an array into the text file.

        The following arguments are taken from the
        docuementation of the numpy function 'savetxt'.
        Args:
            fmt: A string format.
                Default value is '%.18e'.
            delimiter: Character separating columns.
                Default: ' '.
            newline: Character separating lines.
                Default: '\n'.
            header: String that will be written at the beginning
                of the file. Default: ''.
            footer: String that will be written at the end of the file.
                Default: ''.
            comments: String that will be prepended to header and footer
                to mark them as comments. Default: '# '.
        """
        return np.savetxt(self.filename, array, **kwargs)

Example 67

Project: pypcd Source File: pypcd.py
def point_cloud_to_fileobj(pc, fileobj, data_compression=None):
    """ write pointcloud as .pcd to fileobj.
    if data_compression is not None it overrides pc.data.
    """
    metadata = pc.get_metadata()
    if data_compression is not None:
        data_compression = data_compression.lower()
        assert(data_compression in ('ascii', 'binary', 'binary_compressed'))
        metadata['data'] = data_compression

    header = write_header(metadata)
    fileobj.write(header)
    if metadata['data'].lower() == 'ascii':
        fmtstr = build_ascii_fmtstr(pc)
        np.savetxt(fileobj, pc.pc_data, fmt=fmtstr)
    elif metadata['data'].lower() == 'binary':
        fileobj.write(pc.pc_data.tostring('C'))
    elif metadata['data'].lower() == 'binary_compressed':
        # TODO
        # a '_' field is ignored by pcl and breakes compressed point clouds.
        # changing '_' to '_padding' or other name fixes this.
        # admittedly padding shouldn't be compressed in the first place
        # reorder to column-by-column
        uncompressed_lst = []
        for fieldname in pc.pc_data.dtype.names:
            column = np.ascontiguousarray(pc.pc_data[fieldname]).tostring('C')
            uncompressed_lst.append(column)
        uncompressed = ''.join(uncompressed_lst)
        uncompressed_size = len(uncompressed)
        # print("uncompressed_size = %r"%(uncompressed_size))
        buf = lzf.compress(uncompressed)
        if buf is None:
            # compression didn't shrink the file
            # TODO what do to do in this case when reading?
            buf = uncompressed
            compressed_size = uncompressed_size
        else:
            compressed_size = len(buf)
        fmt = 'II'
        fileobj.write(struct.pack(fmt, compressed_size, uncompressed_size))
        fileobj.write(buf)
    else:
        raise ValueError('unknown DATA type')

Example 68

Project: msmtools Source File: matrix.py
def write_matrix_dense(filename, A, fmt='%.18e', header='', comments='#'):
    np.savetxt(filename, A, fmt=fmt, header=header, comments=comments)

Example 69

Project: pyNastran Source File: tecplot.py
    def write_tecplot(self, tecplot_filename, res_types=None, is_points=True, adjust_nids=True):
        """
        Only handles single type writing

        Parameters
        ----------
        tecplot_filename : str
            the path to the output file
        res_types : str; List[str, str, ...]; default=None -> all
            the results that will be written (must be consistent with self.variables)
        is_points : bool; default=True
            write in POINT format vs. BLOCK format
        adjust_nids : bool; default=True
            element_ids are 0-based in binary and must be switched to 1-based in ASCII
        """
        self.log.info('writing tecplot %s' % tecplot_filename)
        with open(tecplot_filename, 'w') as tecplot_file:
            is_results = bool(len(self.results))
            msg = 'TITLE     = "tecplot geometry and solution file"\n'
            msg += 'VARIABLES = "x"\n'
            msg += '"y"\n'
            msg += '"z"\n'
            if res_types is None:
                res_types = self.variables
            elif isinstance(res_types, string_types):
                res_types = [res_types]
            result_indices_to_write = []
            if is_results:
                #msg += '"rho"\n'
                #msg += '"u"\n'
                #msg += '"v"\n'
                #msg += '"w"\n'
                #msg += '"p"\n'
                # msg += 'ZONE T="%s"\n' % r'\"processor 1\"'
                # print('res_types =', res_types)
                # print('vars =', self.variables)
                for ivar, var in enumerate(res_types):
                    if var not in self.variables:
                        raise RuntimeError('var=%r not in variables=%s' % (var, self.variables))
                    result_indices_to_write.append(self.variables.index(var))
                ivars = unique(result_indices_to_write)
                ivars.sort()
                for ivar in ivars:
                    var = self.variables[ivar]
                    msg += '"%s"\n' % var
                # print('ivars =', ivars)
            else:
                assert len(res_types) == 0, len(res_types)
                ivars = []
            msg += 'ZONE '

            etype_elements = [
                ('CHEXA', self.hexa_elements),
                ('CTETRA', self.tet_elements),
                ('CTRIA3', self.tri_elements),
                ('CQUAD4', self.quad_elements),
            ]
            is_points = True
            is_tets = False
            is_hexas = False
            is_tris = False
            is_quads = False

            nnodes = self.nnodes
            nelements = self.nelements
            for etype, elements in etype_elements:
                if etype == 'CHEXA' and len(elements):
                    #print(etype)
                    # is_points = False
                    is_hexas = True
                    nnodes_per_element = 8
                    zone_type = 'FEBrick'
                elif etype == 'CTETRA' and len(elements):
                    #print(etype)
                    # is_points = False
                    is_tets = True
                    nnodes_per_element = 4
                    zone_type = 'FETETRAHEDRON'
                elif etype == 'CTRIA3' and len(elements):
                    #print(etype)
                    # is_points = True
                    is_tris = True
                    nnodes_per_element = 3
                    zone_type = 'FETRIANGLE'
                elif etype == 'CQUAD4' and len(elements):
                    #print(etype)
                    # is_points = True
                    is_quads = True
                    nnodes_per_element = 4
                    zone_type = 'FEQUADRILATERAL'
                else:
                    self.log.info('etype=%r' % etype)
                    self.log.info(elements)
                    continue
                break

            self.log.info('is_points = %s' % is_points)
            if is_points:
                msg += ' n=%i, e=%i, ZONETYPE=%s, DATAPACKING=POINT\n' % (nnodes, nelements, zone_type)
            else:
                msg += ' n=%i, e=%i, ZONETYPE=%s, DATAPACKING=BLOCK\n' % (nnodes, nelements, zone_type)
            tecplot_file.write(msg)

            # xyz
            assert self.nnodes > 0, 'nnodes=%s' % self.nnodes
            nresults = len(ivars)
            if is_points:
                if nresults:
                    res = self.results[:, ivars]
                    try:
                        data = hstack([self.xyz, res])
                    except ValueError:
                        msg = 'Cant hstack...\n'
                        msg += 'xyz.shape=%s\n' % str(self.xyz.shape)
                        msg += 'results.shape=%s\n' % str(self.results.shape)
                        raise ValueError(msg)
                    fmt = ' %15.9E' * (3 + nresults)
                else:
                    data = self.xyz
                    fmt = ' %15.9E %15.9E %15.9E'

                if PY3:
                    #vals = self.xyz[:, ivar].ravel()
                    for vals in data:
                        tecplot_file.write(fmt % tuple(vals))
                else:
                    savetxt(tecplot_file, data, fmt=fmt)
            else:
                #nvalues_per_line = 5
                for ivar in range(3):
                    #tecplot_file.write('# ivar=%i\n' % ivar)
                    vals = self.xyz[:, ivar].ravel()
                    msg = ''
                    for ival, val in enumerate(vals):
                        msg += ' %15.9E' % val
                        if (ival + 1) % 3 == 0:
                            tecplot_file.write(msg)
                            msg = '\n'
                    tecplot_file.write(msg.rstrip() + '\n')

                if nresults:
                    # print('nnodes_per_element =', nnodes_per_element)
                    # for ivar in range(nnodes_per_element):
                    for ivar in ivars:
                        #tecplot_file.write('# ivar=%i\n' % ivar)
                        vals = self.results[:, ivar].ravel()
                        msg = ''
                        for ival, val in enumerate(vals):
                            msg += ' %15.9E' % val
                            if (ival + 1) % 5 == 0:
                                tecplot_file.write(msg)
                                msg = '\n'
                        tecplot_file.write(msg.rstrip() + '\n')

            self.log.info('is_hexas=%s is_tets=%s is_quads=%s is_tris=%s' %
                          (is_hexas, is_tets, is_quads, is_tris))
            if is_hexas:
                # elements
                efmt = ' %i %i %i %i %i %i %i %i\n'
                elements = self.hexa_elements
            elif is_tets:
                efmt = ' %i %i %i %i\n'
                elements = self.tet_elements
            elif is_quads:
                efmt = ' %i %i %i %i\n'
                elements = self.quad_elements
            elif is_tris:
                efmt = ' %i %i %i\n'
                elements = self.tri_elements
            else:
                raise RuntimeError()

            if adjust_nids:
                elements += 1
            self.log.info('inode_min = %s' % elements.min())
            self.log.info('inode_max = %s' % elements.max())
            assert elements.min() >= 1, elements.min()
            assert elements.max() <= nnodes, elements.max()
            # assert elements.min() == 1, elements.min()
            # assert elements.max() == nnodes, elements.max()

            for element in elements:
                tecplot_file.write(efmt % tuple(element))

Example 70

Project: flopy Source File: gridgen.py
    def _mfgrid_block(self):
        # Need to adjust offsets and rotation because gridgen rotates around
        # lower left corner, whereas flopy rotates around upper left.
        # gridgen rotation is counter clockwise, whereas flopy rotation is
        # clock wise.  Crazy.
        sr = self.dis.parent.sr
        xll = sr.xul
        yll = sr.yul - sr.yedge[0]
        xllrot, yllrot = sr.rotate(xll, yll, sr.rotation, xorigin=sr.xul,
                                   yorigin=sr.yul)

        s = ''
        s += 'BEGIN MODFLOW_GRID basegrid' + '\n'
        s += '  ROTATION_ANGLE = {}\n'.format(-sr.rotation)
        s += '  X_OFFSET = {}\n'.format(xllrot)
        s += '  Y_OFFSET = {}\n'.format(yllrot)
        s += '  NLAY = {}\n'.format(self.dis.nlay)
        s += '  NROW = {}\n'.format(self.dis.nrow)
        s += '  NCOL = {}\n'.format(self.dis.ncol)

        # delr
        delr = self.dis.delr.array
        if delr.min() == delr.max():
            s += '  DELR = CONSTANT {}\n'.format(delr.min())
        else:
            s += '  DELR = OPEN/CLOSE delr.dat\n'
            fname = os.path.join(self.model_ws, 'delr.dat')
            np.savetxt(fname, delr)

        # delc
        delc = self.dis.delc.array
        if delc.min() == delc.max():
            s += '  DELC = CONSTANT {}\n'.format(delc.min())
        else:
            s += '  DELC = OPEN/CLOSE delc.dat\n'
            fname = os.path.join(self.model_ws, 'delc.dat')
            np.savetxt(fname, delc)

        # top
        top = self.dis.top.array
        if top.min() == top.max():
            s += '  TOP = CONSTANT {}\n'.format(top.min())
        else:
            s += '  TOP = OPEN/CLOSE top.dat\n'
            fname = os.path.join(self.model_ws, 'top.dat')
            np.savetxt(fname, top)

        # bot
        botm = self.dis.botm
        for k in range(self.dis.nlay):
            bot = botm[k].array
            if bot.min() == bot.max():
                s += '  BOTTOM LAYER {} = CONSTANT {}\n'.format(k + 1,
                                                                bot.min())
            else:
                s += '  BOTTOM LAYER {0} = OPEN/CLOSE bot{0}.dat\n'.format(k +
                                                                           1)
                fname = os.path.join(self.model_ws, 'bot{}.dat'.format(k + 1))
                np.savetxt(fname, bot)

        s += 'END MODFLOW_GRID' + '\n'
        return s

Example 71

Project: C-PAC Source File: create_flame_model_files.py
def write_mat_file(design_matrix, output_dir, model_name, \
                       depatsified_EV_names, current_output=None):

    import os
    import numpy as np

    dimx = None
    dimy = None

    if len(design_matrix.shape) == 1:
        dimy = 1
        dimx = design_matrix.shape[0]
    else:
        dimx, dimy = design_matrix.shape


    ppstring = '/PPheights'

    for i in range(0, dimy):

        ppstring += '\t' + '%1.5e' %(1.0)

    ppstring += '\n'


    filename = model_name + ".mat"

    out_file = os.path.join(output_dir, filename)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)


    with open(out_file, 'wt') as f:

        print >>f, '/NumWaves\t%d' %dimy
        print >>f, '/NumPoints\t%d' %dimx
        print >>f, ppstring

        # print labels for the columns - mainly for double-checking your model
        col_string = '\n'

        for col in depatsified_EV_names:
            col_string = col_string + col + '\t'

        print >>f, col_string, '\n'

        print >>f, '/Matrix'

        np.savetxt(f, design_matrix, fmt='%1.5e', delimiter='\t')

    return out_file

Example 72

Project: C-PAC Source File: create_flame_model_files.py
def create_grp_file(design_matrix, grp_file_vector, output_dir, model_name):

    import os
    import numpy as np

    dimx = None
    dimy = None

    if len(design_matrix.shape) == 1:
        dimy = 1
        dimx = design_matrix.shape[0]
    else:
        dimx, dimy = design_matrix.shape

    filename = "grouping.grp"

    out_file = os.path.join(output_dir, model_name + ".grp")

    with open(out_file, "wt") as f:

        print >>f, '/NumWaves\t1'
        print >>f, '/NumPoints\t%d\n' %dimx
        print >>f, '/Matrix'
        np.savetxt(f, grp_file_vector, fmt='%d', delimiter='\t')

    return out_file

Example 73

Project: hmf Source File: cli_tools.py
Function: write_data
    def _write_data(self,sampler):
        """
        Writes out chains and other data to longer-term readable files (ie ASCII)
        """
        with open(self.full_prefix+"chain",'w') as f:
            np.savetxt(f,sampler.flatchain,header="\t".join(self.keys))

        with open(self.full_prefix+"likelihoods",'w') as f:
            np.savetxt(f,sampler.lnprobability.T)

        # We can write out any blobs that are parameters
        if self.blobs:
            if self.n_dparams:
                numblobs = np.array([[[b[ii] for ii in range(self.n_dparams)] for b in c]
                                     for c in sampler.blobs])

                # Write out numblobs
                sh = numblobs.shape
                numblobs = numblobs.reshape(sh[0] * sh[1], sh[2])
                with open(self.full_prefix + "derived_parameters", "w") as f:
                    np.savetxt(f, numblobs,header="\t".join([self.blobs[ii] for ii in range(self.n_dparams)]))

Example 74

Project: pyksc Source File: classify_theta.py
Function: save_results
def save_results(X, peak_days, sum_views, pts_grid, theta_grid, best_by, all_confs,
        y_true, y_pred, confs, out_folder):

    valid = confs > 0
    correct = y_true == y_pred

    summ_fpath = os.path.join(out_folder, 'summ.dat')
    with open(summ_fpath, 'w') as summ_file:
        print('Params', file=summ_file)
        for cls in sorted(pts_grid):
            print('\t Cls = %d; min_pts = %d; theta = %.3f' \
                    % (cls, pts_grid[cls], theta_grid[cls]), file=summ_file)
        print(file=summ_file)

        print('All', file=summ_file)
        aux_print(X, peak_days, sum_views, best_by, y_true, y_pred, confs, valid, summ_file)
        print(file=summ_file)
        
        print('Correct Only', file=summ_file)
        aux_print(X, peak_days, sum_views, best_by, y_true, y_pred, confs, correct, summ_file)
        print(file=summ_file)
        
        print('Incorrect Only', file=summ_file)
        aux_print(X, peak_days, sum_views, best_by, y_true, y_pred, confs, ~correct, summ_file)
        print(file=summ_file)

        #print(classification_report(y_true[valid], y_pred[valid]), 
        #        file=summ_file)
        print(classification_report(y_true, y_pred), 
                file=summ_file)
        print(file=summ_file)
        print('# invalid %d' % (~valid).sum(), file=summ_file)

    ypred_fpath = os.path.join(out_folder, 'pred.dat')
    np.savetxt(ypred_fpath, y_pred)

    bestby_fpath = os.path.join(out_folder, 'best-by.dat')
    np.savetxt(bestby_fpath, best_by)

    conf_fpath = os.path.join(out_folder, 'conf.dat')
    np.savetxt(conf_fpath, confs)
    
    conf_fpath = os.path.join(out_folder, 'all-conf.dat')
    np.savetxt(conf_fpath, all_confs)

Example 75

Project: ldsc Source File: sumstats.py
def _print_cov(ldscore_reg, ofh, log):
    '''Prints covariance matrix of slopes.'''
    log.log(
        'Printing covariance matrix of the estimates to {F}.'.format(F=ofh))
    np.savetxt(ofh, ldscore_reg.coef_cov)

Example 76

Project: NucleoATAC Source File: chunkmat2d.py
Function: save
    def save(self, filename):
        """Save object in a text file"""
        head = ",".join(map(str,[self.chrom,self.start,self.end,self.lower,self.upper]))
        np.savetxt(filename,self.mat,delimiter="\t", header = head)

Example 77

Project: pycortex Source File: mni.py
def _save_fsl_xfm(filename, xfm):
    np.savetxt(filename, xfm, "%0.10f")

Example 78

Project: ldsc Source File: sumstats.py
def _print_part_delete_values(ldscore_reg, ofh, log):
    '''Prints partitioned block jackknife delete-k values'''
    log.log('Printing partitioned block jackknife delete values to {F}.'.format(F=ofh))
    np.savetxt(ofh, ldscore_reg.part_delete_values)

Example 79

Project: gwpy Source File: ascii.py
Function: write_ascii
def write_ascii(series, fobj, fmt='%.18e', delimiter=' ', newline='\n',
                header='', footer='', comments='# '):
    """Write a `Series` to a file in ASCII format

    Parameters
    ----------
    series : :class:`~gwpy.data.Series`
        data series to write
    fobj : `str`, `file`
        file object, or path to file, to write to

    See also
    --------
    numpy.savetxt : for docuementation of keyword arguments
    """
    x = series.xindex.value
    y = series.value
    return savetxt(fobj, zip(x, y), fmt=fmt, delimiter=delimiter,
                   newline=newline, header=header, footer=footer,
                   comments=comments)

Example 80

Project: chainer-libDNN Source File: visualizer.py
    def save_raw_filter(self, dst):
        for i in range(len(self.bitmap)):
            numpy.savetxt(dst + '/%d' % (i + 1) + '.csv', self.bitmap[i], delimiter=',')

Example 81

Project: hyperspy Source File: eels_cl_edge.py
    def fine_structure_coeff_to_txt(self, filename):
        np.savetxt(filename + '.dat', self.fine_structure_coeff.value,
                   fmt="%12.6G")

Example 82

Project: Kaggle_HomeDepot Source File: rgf_utils.py
Function: predict
    def predict(self, X):

        ## write data to file
        valid_x_fn = "%s/data.x"%self.tmp_dir
        valid_y_fn = "%s/data.y"%self.tmp_dir
        np.savetxt(valid_x_fn, X, fmt="%.6f", delimiter="\t")

        ## write predict params to file
        model_fn = self.model_fn_prefix + "-01"
        params = [
            "test_x_fn=", valid_x_fn,"\n",
            "model_fn=", model_fn,"\n",
            "prediction_fn=", valid_y_fn
        ]
        params = "".join([str(p) for p in params])
        
        rgf_setting = "%s/rgf_setting"%self.tmp_dir
        with open(rgf_setting+".inp", "w") as f:
            f.write(params)

        ## predict
        rgf_log = "%s/rgf_log"%self.tmp_dir
        cmd = "perl %s %s predict %s >> %s"%(
                config.RGF_CALL_EXE, config.RGF_EXE, rgf_setting, rgf_log)
        os.system(cmd)

        y_pred = np.loadtxt(valid_y_fn, dtype=float)

        return y_pred

Example 83

Project: pyNCS Source File: mapping.py
Function: save
    def save(self, filename):
        """
        Save the mapping into a file.
        """
        np.savetxt(filename, self.mapping)

Example 84

Project: pyNastran Source File: stl_to_plot3d.py
def stl_to_plot3d_filename(stl_filename, p3d_filename, log=None, ascii=True):
    model = STL(log=log)
    model.read_stl(stl_filename)

    #nodal_normals = model.get_normals_at_nodes(model.elements)

    with open(p3d_filename, 'wb') as p3d:
        nblocks = len(model.elements)
        #nblocks = 10
        p3d.write('%i\n' % nblocks)
        for iblock in range(nblocks):
            p3d.write('2 2 1\n')

        nodes = model.nodes
        elements = model.elements
        if 0:
            for i in [0, 1, 2]:
                for iblock in range(nblocks):
                    (n1, n2, n3) = elements[iblock]
                    p1 = nodes[n1, :]
                    p2 = nodes[n2, :]
                    p3 = nodes[n3, :]
                    p4 = p3
                    xi = [[p1[i], p2[i], p3[i], p4[i]]]
                    savetxt(p3d, xi, fmt='%f')
        else:
            for iblock in range(nblocks):
                for i in [0, 1, 2]:
                    (n1, n2, n3) = elements[iblock]
                    p1 = nodes[n1, :]
                    p2 = nodes[n2, :]
                    p3 = nodes[n3, :]
                    p4 = p3
                    xi = [[p1[i], p2[i], p3[i], p4[i]]]
                    savetxt(p3d, xi, fmt='%f')

Example 85

Project: automl-phase-2 Source File: util.py
def convert_mat_into_automl_folds(filename, save_folder_root, time_budget=300, n_folds=5, input_type='Numerical',
                                  random_state=0, metric='auc_metric', usage='testing', task='binary.classification',
                                  target_type='Binary'):
    """Convert a dataset in .mat format into several folds of automl format"""
    # Load data
    data = scipy.io.loadmat(filename)
    X = data['X']
    y = data['y']
    data_name = os.path.splitext(os.path.split(filename)[-1])[0]
    # Convert data if appropriate
    if task == 'binary.classification':
        y_max = y.max()
        y[y == y_max] = 1
        y[y < y_max] = 0
    # If input_type is 'infer' we now infer input types
    if input_type == 'infer':
        raise Exception('I do not know how to infer input types yet')
    else:
        input_type_list = [input_type] * X.shape[1]
    # Create info dictionary
    # TODO - some of these defaults need to be changed
    info = dict(usage=usage, name=data_name, task=task, target_type=target_type,
                feat_type='Numerical', metric=metric, feat_num=X.shape[1],
                target_num=1, label_num=0, has_categorical=0, has_missing=0, is_sparse=0,
                time_budget=time_budget, valid_num=0)
    # Now split into folds and save
    folds = KFold(n=X.shape[0], n_folds=n_folds, shuffle=True, random_state=random_state)
    for (fold, (train_index, test_index)) in enumerate(folds):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        fold_folder = os.path.join(save_folder_root + '_fold_%02d' % (fold + 1), data_name)
        mkdir(fold_folder)
        fmt = '%f'
        np.savetxt(os.path.join(fold_folder, data_name + '_train.data'), X_train, fmt=fmt, delimiter=' ')
        np.savetxt(os.path.join(fold_folder, data_name + '_test.data'), X_test, fmt=fmt, delimiter=' ')
        if task == 'binary.classification':
            fmt = '%d'
        np.savetxt(os.path.join(fold_folder, data_name + '_train.solution'), y_train, fmt=fmt, delimiter=' ')
        np.savetxt(os.path.join(fold_folder, data_name + '_test.solution'), y_test, fmt=fmt, delimiter=' ')
        info['train_num'] = X_train.shape[0]
        info['test_num'] = X_test.shape[0]
        with open(os.path.join(fold_folder, data_name + '_public.info'), 'w') as info_file:
            for (key, value) in info.iteritems():
                info_file.write('%s = %s\n' % (key, value))
        with open(os.path.join(fold_folder, data_name + '_feat.type'), 'w') as feature_file:
            for feat_type in input_type_list:
                feature_file.write('%s\n' % feat_type)

Example 86

Project: ahkab Source File: csvlib.py
Function: write_csv
def write_csv(filename, data, headers, append=False):
    """Writes data in CVS format to filename.

    The headers have to be ordered according to the data order.

    **Parameters:**

    filename : string
        the path to the file to be written.
        Use 'stdout' to write to stdout

    data : ndarray
        The data to be written. Notice that variables are swept across *rows*,
        time samples are swept along *columns*.
        Or equivalently: ``data[variable_index, sample_number]``

    headers : list of strings
        the signal names, ordered so that ``headers[i]`` corresponds to 
        ``data[i, :]``.

    append : bool, optional
        If False, the file (if it exists) will be rewritten, otherwise
        it will be appended to.

    """

    mode = 'ab' if append else 'wb'
    fp = _get_fp(filename, mode)

    if not data.shape[0] == len(headers):
        print("(W): write_csv(): data and headers don't match. Continuing anyway.")
        print("DATA: " + str(data.shape) + " headers length: " + str(len(headers)))

    headers = SEPARATOR.join(headers) if not append else ""
    np.savetxt(fp, data.T, delimiter=SEPARATOR, header=headers, comments='#')

    _close_fp(fp, filename)

Example 87

Project: ensemble_amazon Source File: amazon_main_logit_2D.py
def printfilcsve(X, filename):

    np.savetxt(filename,X) 

Example 88

Project: SfM_Init Source File: rotsolver.py
def solve_global_rotations(indices, pairwise_rotations, cc=None):
    """
    Solve the multiple rotations averaging problem using Chatterjee and Govindu's 
    L1_IRLS method. The implementation is in Matlab, so this is a wrapper that writes
    the problem to temporary text files and calls Matlab through the command line.

    Input:
        indices:    a list of pairs (i,j)
        pairwise_rotations: a list of rotation matrices Rij
        cc: a list of integers {i}. Only compute global rotations on indices in cc

    Returns:
        ind: a list of indices (vertex numbers)
        R: rotation matrices corresponding to ind, which are approximately 
            consistent with the pairwise rotations, ie Rij ~ Ri * Rj'
    """
    ############
    # Path to rot solver Matlab source dir
    ROT_SOLVER_DIR = os.path.abspath(os.path.join(
        os.path.dirname(__file__),'..','rotsolver'))
    ############

    # get a working temp directory
    tmpdir = tempfile.mkdtemp()

    # write the input files
    ccfile = os.path.join(tmpdir, 'cc.txt')
    np.savetxt(ccfile, cc, fmt='%d')
    egfile = os.path.join(tmpdir, 'eg.txt')
    EGs = []
    for (i,j), Rij in zip(indices, pairwise_rotations):
        # tij isn't used, so just spoof it to 0's
        EGs.append(EG(i, j, Rij, np.zeros((3,1))))
    write_EGs_file(egfile, EGs)

    # call the rot solver
    log = os.path.join(tmpdir, 'log.txt')
    rotfile = os.path.join(tmpdir, 'rots.txt')

    with open(os.devnull, 'wb') as devnul:
        subprocess.call(['matlab', '-nodisplay', '-nojvm', '-logfile', log, '-r', 
            "try; " +
                "rot_driver('{}', '{}', '{}');".format(egfile, rotfile, ccfile) +
            "catch err; " +
                "disp(getReport(err)); " +
            "end; " +
            "exit;" ],
            cwd=ROT_SOLVER_DIR, stdout=devnul, stderr=devnul)

    # if the rot solver wasn't successful (ie, a bug), give a message and an error
    if not os.path.exists(rotfile):
        print '[rotsolver:solve_global_rotations] Error! The Matlab rotsolver did not complete.'
        print 'LOG:'
        with open(log, 'r') as fin:
            print fin.read()
        shutil.rmtree(tmpdir)
        raise Exception("Global Rotations solver was not successful!")

    # read the result and clean up
    ind, R = read_rot_file(rotfile)
    shutil.rmtree(tmpdir)
    return ind, R

Example 89

Project: klustaviewa Source File: recluster.py
Function: write_mask
def write_mask(mask, filename, fmt="%f"):
    with open(filename, 'w') as fd:
        fd.write(str(mask.shape[1])+'\n') # number of features
        np.savetxt(fd, mask, fmt=fmt)

Example 90

Project: Outsmart Source File: outsmart.py
Function: save_state
def save_state(s, filename):
    np.savetxt(filename+'.lab', s.lab, fmt="%d")
    np.savetxt(filename+'.wild', s.wild, fmt="%d")

Example 91

Project: PyEMMA Source File: iterable.py
Function: write_to_csv
    def write_to_csv(self, filename=None, extension='.dat', overwrite=False,
                     stride=1, chunksize=100, **kw):
        """ write all data to csv with numpy.savetxt

        Parameters
        ----------
        filename : str, optional
            filename string, which may contain placeholders {itraj} and {stride}:

            * itraj will be replaced by trajetory index
            * stride is stride argument of this method

            If filename is not given, it is being tried to obtain the filenames
            from the data source of this iterator.
        extension : str, optional, default='.dat'
            filename extension of created files
        overwrite : bool, optional, default=False
            shall existing files be overwritten? If a file exists, this method will raise.
        stride : int
            omit every n'th frame
        chunksize: int
            how many frames to process at once
        kw : dict
            named arguments passed into numpy.savetxt (header, seperator etc.)

        Example
        -------
        Assume you want to save features calculated by some FeatureReader to ASCII:
        
        >>> import numpy as np, pyemma
        >>> from pyemma.util.files import TemporaryDirectory
        >>> import os
        >>> data = [np.random.random((10,3))] * 3
        >>> reader = pyemma.coordinates.source(data)
        >>> filename = "distances_{itraj}.dat"
        >>> with TemporaryDirectory() as td:
        ...    out = os.path.join(td, filename)
        ...    reader.write_to_csv(out, header='', delimiter=';')
        ...    print(sorted(os.listdir(td)))
        ['distances_0.dat', 'distances_1.dat', 'distances_2.dat']
        """
        import os
        if not filename:
            assert hasattr(self, 'filenames')
            #    raise RuntimeError("could not determine filenames")
            filenames = []
            for f in self.filenames:
                base, _ = os.path.splitext(f)
                filenames.append(base + extension)
        elif isinstance(filename, six.string_types):
            filename = filename.replace('{stride}', str(stride))
            filenames = [filename.replace('{itraj}', str(itraj)) for itraj
                         in range(self.number_of_trajectories())]
        else:
            raise TypeError("filename should be str or None")
        self.logger.debug("write_to_csv, filenames=%s" % filenames)
        # check files before starting to write
        import errno
        for f in filenames:
            try:
                st = os.stat(f)
                raise OSError(errno.EEXIST)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    if overwrite:
                        continue
                elif e.errno == errno.ENOENT:
                    continue
                raise
            else:
                continue
        f = None
        with self.iterator(stride, chunk=chunksize, return_trajindex=False) as it:
            self._progress_register(it.n_chunks, "saving to csv")
            oldtraj = -1
            for X in it:
                if oldtraj != it.current_trajindex:
                    if f is not None:
                        f.close()
                    fn = filenames[it.current_trajindex]
                    self.logger.debug("opening file %s for writing csv." % fn)
                    f = open(fn, 'wb')
                    oldtraj = it.current_trajindex
                np.savetxt(f, X, **kw)
                f.flush()
                self._progress_update(1, 0)
        if f is not None:
            f.close()
        self._progress_force_finish(0)

Example 92

Project: pyNastran Source File: rainflow.py
def rainflow_from_csv(input_csv, casenames, features,
                      write_csvs=True, delimiter=',',
                      xmax=None, legend_alpha=1.0):
    """
    Rainflow counts from csv files.

    This supports multiple features as separate columns.

    Parameters
    ----------
    fname : str
        a file as described below
    casenames : str
        allows for case splitting
    features : dict
       key : int; column id to parse
       value : str; name

    xmax : float
        the max value for the x (cycle) axis; helps to change the legend
    delimiter : str; default=','
        the delimiter for the output file (doesn't apply to input)
    legend_alpha : float; default=1.0
        the transparency
        1=solid
        0=transparent

    Returns
    -------
    files : list[str]
        filenames are of the form icase_icase_name.csv

    Input_csv
    ---------
      # name1_stress, name2_stress, ...
      0.00, 0.0 # case 0 - line 1
      20.0, 1.0 # case 1
      50.0, 2.0 # case 2
      etc.
      0.00, 0.0 # case 0
    casenames = (
       # (casename, irow_start, irow_stop)
       ('normal',  0,  62),
       ('impulse', 63, 65),
       etc.
    )
    features = {  # the indicies are column numbers
        0 : 'fillet',
        1 : 'groove',
    }
    features = ['feature1', 'feature2']
    so we get:
       feature0_normal_fillet.csv
       feature0_impulse_fillet.csv
       feature1_normal_groove.csv
       feature1_impulse_groove.csv

    We'll also get corresponding png files. of the form:
       fillet.png
       groove.png

    that show our cycling.
    """
    import matplotlib.pyplot as plt
    A = loadtxt(input_csv, delimiter=',', skiprows=1)
    if len(A.shape) == 1:
        A = A.reshape(len(A), 1)
    icase = 0

    for ifeature, feature_name in sorted(iteritems(features)):
        plt.figure(ifeature)
        legend = []
        for case_name, min_index, max_index in casenames:
            csv_out = 'feature%i_%s_%s.csv'  % (ifeature, case_name, feature_name)
            print(csv_out)

            stress_case = A[min_index:max_index, ifeature]
            min_stress, max_stress = rainflow(icase, stress_case)
            if len(min_stress) == 0:
                min_stress = [A[min_index, ifeature]]
                max_stress = [A[max_index - 1, ifeature]]

            B = vstack([min_stress, max_stress]).T
            f = open(csv_out, 'wb')
            f.write('# max stress%smin_stress\n' % delimiter)
            savetxt(f, B, delimiter=delimiter)
            plt.plot(range(min_index, max_index), stress_case)
            legend.append(case_name)
            icase += 1
        # add the legend in the middle of the plot
        leg = plt.legend(legend, fancybox=True)
        # set the alpha value of the legend: it will be translucent
        leg.get_frame().set_alpha(legend_alpha)

        plt.title(feature_name)
        if xmax:
            plt.xlim([0, xmax])
        plt.xlabel('Cycle Number')
        plt.ylabel('Stress (ksi)')
        plt.grid(True)
        plt.savefig('%s.png' % feature_name)

Example 93

Project: FaST-LMM Source File: plotp.py
def recalibrate(dirin,filepattern='*.txt', lrtpermfile=None, pnames=["P-value(50/50)"], rownames=rownames(), nullfit="qq",qmax=0.1, postfix="RECALIBRATED"):
    '''
    Read in each results file, use the null stats in lrtpermfile to re-calibrate the null distribution, 
    and then add a column to each file
    '''
    assert lrtpermfile is not None, "must provide lrtpermfile (output by FastLmmSet.py with lrt)"

    myfiles = getfiles(dirin, filepattern)
        
    ii=0
    for f in myfiles:       
        ii=ii+1
        print str(ii) + ") " + f
        pv,rowids,llnull,llalt = extractpvals(f,pnames,rownames)
        lrt = -2*(llnull-llalt)  
        alteqnull = (lrt==0)
        pv_adj = Cv.pv_adj_and_ind(nperm=0, pv_adj=None, nullfit=nullfit, lrt=lrt, lrtperm=None,
                                 alteqnull=alteqnull, alteqnullperm=None, qmax=qmax, 
                                 nullfitfile=lrtpermfile, nlocalperm=0, sort=False)[0]
        outfile = ut.appendtofilename(f,postfix)        
        np.savetxt(outfile, pv_adj)

Example 94

Project: C-PAC Source File: timeseries_analysis.py
def gen_vertices_timeseries(rh_surface_file,
                        lh_surface_file):

    """
    Method to extract timeseries from vertices
    of a freesurfer surface file 
    
    Parameters
    ----------
    rh_surface_file : string (mgz/mgh file)
        left hemisphere FreeSurfer surface file
    lh_surface_file : string (mgz/mgh file)
        right hemisphere FreeSurfer surface file
        
    Returns
    -------
    out_list : string (list of file)
        list of vertices timeseries csv files  
    
    """

    import gradunwarp
    import numpy as np
    import os

    out_list = []
    rh_file = os.path.splitext(
                    os.path.basename(rh_surface_file))[0] + '_rh.csv'
    mghobj1 = gradunwarp.mgh.MGH()

    mghobj1.load(rh_surface_file)
    vol = mghobj1.vol
    (x, y) = vol.shape
#        print "rh shape", x, y

    np.savetxt(rh_file, vol, delimiter='\t')
    out_list.append(rh_file)

    lh_file = os.path.splitext(os.path.basename(lh_surface_file))[0] + '_lh.csv'
    mghobj2 = gradunwarp.mgh.MGH()

    mghobj2.load(lh_surface_file)
    vol = mghobj2.vol
    (x, y) = vol.shape
#        print "lh shape", x, y

    np.savetxt(lh_file,
               vol,
               delimiter=',')
    out_list.append(lh_file)

    return out_list

Example 95

Project: mmdgm Source File: ndict.py
Function: save_text
def savetext(d, name):
    for i in d: np.savetxt(file('debug_'+name+'.txt', 'w'), d[i])

Example 96

Project: pyvolve Source File: model.py
    def _assign_matrix(self):
        '''
            Construct the model rate matrix, Q, based on model_type by calling the matrix_builder module. Alternatively, call the method self._assign_codon_model_matrices() if we have a heterogenous codon model.
            Note that before matrix construction, we sanity check and update, as needed, all provided parameters.
        '''
        
        
        if self.model_type == 'nucleotide':
            self.params = Nucleotide_Sanity(self.model_type, self.params, size = 4)()
            self.matrix = Nucleotide_Matrix(self.model_type, self.params)()
                
                    
        elif self.model_type in self.aa_models:
            self.params = AminoAcid_Sanity(self.model_type, self.params, size = 20)()
            self.matrix = AminoAcid_Matrix(self.model_type, self.params)()
             
             
        elif self.model_type == 'gy' or self.model_type == 'mg':
            self.params = MechCodon_Sanity(self.model_type, self.params, size = 61, hetcodon_model = self.hetcodon_model )()
            self.params["neutral_scaling"] = self.neutral_scaling
            if self.hetcodon_model:
                self._assign_hetcodon_model_matrices()
            else:
                self.matrix = MechCodon_Matrix(self.model_type, self.params )()
        
        
        elif 'ecm' in self.model_type:
            self.params = ECM_Sanity(self.model_type, self.params, size = 61)()
            self.matrix = ECM_Matrix(self.model_type, self.params)()
 
 
        elif self.model_type == 'mutsel':
            self.params = MutSel_Sanity(self.model_type, self.params)()
            self.matrix = MutSel_Matrix(self.model_type, self.params)()
            
            # Need to construct and add frequencies to the model dictionary if the matrix was built with fitness values
            if not self.params["calc_by_freqs"]:
                self._calculate_state_freqs_from_matrix()
        
        
        elif self.model_type == 'custom':
            self._assign_custom_matrix()
            np.savetxt(self._save_custom_matrix_freqs, self.params["state_freqs"]) 

        else:
            raise ValueError("\n\nYou have reached this in error! Please file a bug report, with this error, at https://github.com/sjspielman/pyvolve/issues .")

        # Double check that state frequencies made it in. 
        assert("state_freqs" in self.params), "\n\nYour model has no state frequencies."

Example 97

Project: pyNastran Source File: rainflow.py
Function: main
def main():
    input_csv = 'test.csv'
    n = 700
    n1 = n // 3
    casenames = {
       ('normal', 0, n1),
       ('impulse', n1, n - 1),
    }

    import os
    from numpy import linspace, sin, cos, tan, vstack
    x = linspace(0., 3.14*5, num=n)
    y = sin(x) * cos(201 * x)
    z = sin(x) * cos(201 * x) * tan(x)
    A = vstack([y, z])
    savetxt(input_csv, A.T, delimiter=',')
    features = {
        0 : 'fillet',
        1 : 'groove',
    }
    rainflow_from_csv(input_csv, casenames, features,
                      write_csvs=True, delimiter=',',
                      xmax=None, legend_alpha=1.0)
    os.remove(input_csv)

Example 98

Project: stingray Source File: io.py
def _save_ascii_object(object, filename, fmt="%.18e", **kwargs):
    """
    Save an array to a text file.

    Parameters
    ----------
    object : numpy.ndarray
        An array with the data to be saved

    filename : str
        The file name to save to

    fmt : str or sequence of strs, optional
        Use for formatting of columns. See `numpy.savetxt` docuementation
        for details.

    Other Parameters
    ----------------
    kwargs : any keyword argument taken by `numpy.savetxt`

    """

    try:
        np.savetxt(filename, object, fmt=fmt, **kwargs)
    except TypeError:
        raise Exception("Formatting of columns not recognized! Use 'fmt' option to "
              "format columns including strings or mixed types!")

    pass

Example 99

Project: ldsc Source File: sumstats.py
def _print_delete_values(ldscore_reg, ofh, log):
    '''Prints block jackknife delete-k values'''
    log.log('Printing block jackknife delete values to {F}.'.format(F=ofh))
    np.savetxt(ofh, ldscore_reg.tot_delete_values)

Example 100

Project: mtpy Source File: pek1dclasses.py
Function: write_data_file
    def write_datafile(self, wd = None):
        """
        write data to file
        
        """
        
        if wd is not None:
            self.working_directory = wd
        
        self.build_data()
        
        # define format list for writing data file
        fmt = ['%14.5f']+['%12.5e']*16
        
        # define file name and save data file
        fname_bas = self.edi_object.station.split('_')[0]
        self.datafile = fname_bas+'.dat'
        fname = os.path.join(self.working_directory,self.datafile)

        np.savetxt(fname,self.data,fmt=fmt,header=self.header,comments='')    
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3