numpy.sort

Here are the examples of the python api numpy.sort taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

152 Examples 7

Example 101

Project: RHEAS Source File: ncep.py
def _downloadVariable(varname, dbname, dt, bbox=None):
    """Download specific variable from the NCEP Reanalysis dataset."""
    log = logging.getLogger(__name__)
    res = 1.875
    baseurl = "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.dailyavgs/surface_gauss"
    if varname == "tmax":
        urls = ["{0}/tmax.2m.gauss.{1}.nc".format(baseurl, dt[0].year)]
        dsvar = ["tmax"]
    elif varname == "tmin":
        urls = ["{0}/tmin.2m.gauss.{1}.nc".format(baseurl, dt[0].year)]
        dsvar = ["tmin"]
    else:
        urls = ["{0}/uwnd.10m.gauss.{1}.nc".format(baseurl, dt[0].year), "{0}/vwnd.10m.gauss.{1}.nc".format(baseurl, dt[0].year)]
        dsvar = ["uwnd", "vwnd"]
    data = None
    for ui, url in enumerate(urls):
        pds = netcdf.Dataset(url)
        lat = pds.variables["lat"][:]
        lon = pds.variables["lon"][:]
        lon[lon > 180] -= 360.0
        i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox)
        t = pds.variables["time"]
        tt = netcdf.num2date(t[:], units=t.units)
        ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]]
        if len(ti) > 0:
            lati = np.argsort(lat)[::-1][i1:i2]
            loni = np.argsort(lon)[j1:j2]
            if data is None:
                data = pds.variables[dsvar[ui]][ti, lati, loni]
            else:
                data = np.sqrt(
                    data ** 2.0 + pds.variables[dsvar[ui]][ti, lati, loni] ** 2.0)
            if "temp" in dsvar:
                data -= 273.15
        lat = np.sort(lat)[::-1][i1:i2]
        lon = np.sort(lon)[j1:j2]
    table = "{0}.ncep".format(varname)
    for t in range(len(ti)):
        filename = dbio.writeGeotif(lat, lon, res, data[t, :, :])
        dbio.ingest(dbname, filename, tt[ti[t]], table)
        os.remove(filename)
    for dtt in [dt[0] + timedelta(days=tj) for tj in range((dt[-1]-dt[0]).days + 1)]:
        if dtt not in tt:
            log.warning("NCEP data not available for {0}. Skipping download!".format(
                dtt.strftime("%Y-%m-%d")))

Example 102

Project: nupic.research Source File: capacity_experiment.py
Function: runtestphase
def runTestPhase(experiment, inputSequences, seqLabels, sequenceCount,
                 sequenceLength, consoleVerbosity):
  print "\nRunning Test Phase..."
  unionSdrs = []
  for i in xrange(sequenceCount):

    # Extract next sequence
    begin = i + i * sequenceLength
    end = i + 1 + (i + 1) * sequenceLength
    seq = inputSequences[begin: end]
    lblSeq = seqLabels[begin: end]

    # Present sequence (minus reset element)
    experiment.runNetworkOnSequences(seq[:-1],
                                    lblSeq[:-1],
                                    tmLearn=False,
                                    upLearn=False,
                                    verbosity=consoleVerbosity,
                                    progressInterval=_SHOW_PROGRESS_INTERVAL)

    # Record Union SDR at end of sequence
    seqUnionSdr = experiment.up.getUnionSDR()
    unionSdrs.append(numpy.sort(seqUnionSdr))

    # Run reset element
    experiment.runNetworkOnSequences(seq[-1:],
                                    lblSeq[-1:],
                                    tmLearn=False,
                                    upLearn=False,
                                    verbosity=consoleVerbosity,
                                    progressInterval=_SHOW_PROGRESS_INTERVAL)

  return unionSdrs

Example 103

Project: plfit Source File: plfit.py
    def plfit(self, nosmall=True, finite=False, quiet=False, silent=False,
            usefortran=False, usecy=False, xmin=None, verbose=False,
            discrete=None, discrete_approx=True, discrete_n_alpha=1000):
        """
        A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m
        from http://www.santafe.edu/~aaronc/powerlaws/

        See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions
        in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062)
        http://arxiv.org/abs/0706.1062

        There are 3 implementations of xmin estimation.  The fortran version is fastest, the C (cython)
        version is ~10% slower, and the python version is ~3x slower than the fortran version.
        Also, the cython code suffers ~2% numerical error relative to the fortran and python for unknown
        reasons.

        There is also a discrete version implemented in python - it is different from the continous version!

        *discrete* [ bool | None ]
            If *discrete* is None, the code will try to determine whether the
            data set is discrete or continous based on the uniqueness of the
            data; if your data set is continuous but you have any non-unique
            data points (e.g., flagged "bad" data), the "automatic"
            determination will fail.  If *discrete* is True or False, the
            discrete or continuous fitter will be used, respectively.

        *xmin* [ float / int ]
            If you specify xmin, the fitter will only determine alpha assuming
            the given xmin; the rest of the code (and most of the complexity)
            is determining an estimate for xmin and alpha.

        *nosmall* [ bool (True) ]
            When on, the code rejects low s/n points.  WARNING: This option,
            which is on by default, may result in different answers than the
            original Matlab code and the "powerlaw" python package

        *finite* [ bool (False) ]
            There is a 'finite-size bias' to the estimator.  The "alpha" the code measures
            is "alpha-hat" s.t. ᾶ = (nα-1)/(n-1), or α = (1 + ᾶ (n-1)) / n

        *quiet* [ bool (False) ]
            If False, delivers messages about what fitter is used and the fit results

        *verbose* [ bool (False) ]
            Deliver descriptive messages about the fit parameters (only if *quiet*==False)

        *silent* [ bool (False) ]
            If True, will print NO messages
        """
        x = self.data
        if any(x < 0):
            raise ValueError("Power law distributions are only valid for "
                             "positive data.  Remove negative values before "
                             "fitting.")
        z = np.sort(x)

        # xmins = the unique values of x that can be used as the threshold for
        # the power law fit
        # argxmins = the index of each of these possible thresholds
        xmins,argxmins = np.unique(z,return_index=True)
        self._nunique = len(xmins)

        if self._nunique == len(x) and discrete is None:
            if verbose:
                print("Using CONTINUOUS fitter because there are no repeated "
                      "values.")
            discrete = False
        elif self._nunique < len(x) and discrete is None:
            if verbose:
                print("Using DISCRETE fitter because there are repeated "
                      "values.")
            discrete = True

        t = time.time()
        if xmin is None:
            if discrete:
                self.discrete_best_alpha(approximate=discrete_approx,
                                         n_alpha=discrete_n_alpha,
                                         verbose=verbose,
                                         finite=finite)
                return self._xmin,self._alpha
            elif usefortran and fortranOK:
                kstest_values,alpha_values = fplfit.plfit(z, 0)
                if not quiet:
                    print(("FORTRAN plfit executed in %f seconds" % (time.time()-t)))
            elif usecy and cyOK:
                kstest_values,alpha_values = cplfit.plfit_loop(z,
                                                               nosmall=False,
                                                               zunique=xmins,
                                                               argunique=argxmins)
                if not quiet:
                    print(("CYTHON plfit executed in %f seconds" % (time.time()-t)))
            else:
                # python (numpy) version
                f_alpha = alpha_gen(z)
                f_kstest = kstest_gen(z)
                alpha_values = np.asarray(list(map(f_alpha,xmins)),
                                          dtype='float')
                kstest_values = np.asarray(list(map(f_kstest,xmins)),
                                           dtype='float')
                if not quiet:
                    print(("PYTHON plfit executed in %f seconds" % (time.time()-t)))

            if not quiet:
                if usefortran and not fortranOK:
                    raise ImportError("fortran fplfit did not load")
                if usecy and not cyOK:
                    raise ImportError("cython cplfit did not load")

            # For each alpha, the number of included data points is
            # total data length - first index of xmin
            # No +1 is needed: xmin is included.
            sigma = (alpha_values-1)/np.sqrt(len(z)-argxmins)
            # I had changed it to this, but I think this is wrong.
            # sigma = (alpha_values-1)/np.sqrt(len(z)-np.arange(len(z)))

            if nosmall:
                # test to make sure the number of data points is high enough
                # to provide a reasonable s/n on the computed alpha
                goodvals = sigma<0.1
                nmax = argmin(goodvals)
                if nmax <= 0:
                    nmax = len(xmins) - 1
                    if not silent:
                        print("Not enough data left after flagging "
                              "low S/N points.  "
                              "Using all data.")
            else:
                # -1 to weed out the very last data point; it cannot be correct
                # (can't have a power law with 1 data point).
                nmax = len(xmins)-1

            best_ks_index = argmin(kstest_values[:nmax])
            xmin  = xmins[best_ks_index]

            self._alpha_values = alpha_values
            self._xmin_kstest = kstest_values
            if scipyOK:
                # CHECK THIS
                self._ks_prob_all = np.array([scipy.stats.ksone.sf(D_stat,
                                                                   len(kstest_values)-ii)
                                              for ii,D_stat in
                                              enumerate(kstest_values)])
            self._sigma = sigma

            # sanity check
            n = np.count_nonzero(z>=xmin)
            alpha = 1. + float(n)/sum(log(z[z>=xmin]/xmin))
            try:
                np.testing.assert_almost_equal(alpha, alpha_values[best_ks_index],
                                               decimal=5)
            except AssertionError:
                raise AssertionError("The alpha value computed was not self-"
                                     "consistent.  This should not happen.")

        z     = z[z>=xmin]
        n     = len(z)
        alpha = 1. + float(n) / sum(log(z/xmin))
        if finite:
            alpha = alpha*(n-1.)/n+1./n
        if n < 50 and not finite and not silent:
            print(('(PLFIT) Warning: finite-size bias may be present. n=%i' % n))

        ks = max(abs( np.arange(n)/float(n) - (1-(xmin/z)**(alpha-1)) ))
        # Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) =
        # (alpha-1)/xmin.  Really is Eqn B3 in paper.
        L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin))
        #requires another map... Larr = arange(len(unique(x))) * log((alpha_values-1)/unique(x)) - alpha_values*sum
        self._likelihood = L
        self._xmin = xmin
        self._xmins = xmins
        self._alpha= alpha
        self._alphaerr = (alpha-1)/np.sqrt(n)

        # this ks statistic may not have the same value as min(dat) because of unique()
        self._ks = ks

        if scipyOK:
            self._ks_prob = scipy.stats.ksone.sf(ks, n)

        self._ngtx = n
        if n == 1:
            if not silent:
                print("Failure: only 1 point kept.  Probably not a power-law distribution.")
            self._alpha = alpha = 0
            self._alphaerr = 0
            self._likelihood = L = 0
            self._ks = 0
            self._ks_prob = 0
            self._xmin = xmin
            return xmin,0
        if np.isnan(L) or np.isnan(xmin) or np.isnan(alpha):
            raise ValueError("plfit failed; returned a nan")

        if not quiet:
            if verbose: print("The lowest value included in the power-law fit, ", end=' ')
            print("xmin: %g" % xmin, end=' ')
            if verbose: print("\nThe number of values above xmin, ", end=' ')
            print("n(>xmin): %i" % n, end=' ')
            if verbose: print("\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, ", end=' ')
            print("alpha: %g +/- %g  " % (alpha,self._alphaerr), end=' ')
            if verbose: print("\nThe log of the Likelihood (the maximized parameter; you minimized the negative log likelihood), ", end=' ')
            print("Log-Likelihood: %g  " % L, end=' ')
            if verbose: print("\nThe KS-test statistic between the best-fit power-law and the data, ", end=' ')
            print("ks: %g" % (ks), end=' ')
            if scipyOK:
                if verbose: print(" occurs with probability  ", end=' ')
                print("p(ks): %g" % (self._ks_prob))
            else:
                print()

        return xmin,alpha

Example 104

Project: mne-python Source File: xdawn.py
Function: fit
    def fit(self, epochs, y=None):
        """Fit Xdawn from epochs.

        Parameters
        ----------
        epochs : Epochs object
            An instance of Epoch on which Xdawn filters will be fitted.
        y : ndarray | None (default None)
            If None, used epochs.events[:, 2].

        Returns
        -------
        self : Xdawn instance
            The Xdawn instance.
        """
        # Check data
        if not isinstance(epochs, _BaseEpochs):
            raise ValueError('epochs must be an Epochs object.')
        X = epochs.get_data()
        X = X[:, _pick_data_channels(epochs.info), :]
        y = epochs.events[:, 2] if y is None else y
        self.event_id_ = epochs.event_id

        # Check that no baseline was applied with correct overlap
        correct_overlap = self.correct_overlap
        if correct_overlap == 'auto':
            # Events are overlapped if the minimal inter-stimulus
            # interval is smaller than the time window.
            isi = np.diff(np.sort(epochs.events[:, 0]))
            window = int((epochs.tmax - epochs.tmin) * epochs.info['sfreq'])
            correct_overlap = isi.min() < window

        if epochs.baseline and correct_overlap:
            raise ValueError('Cannot apply correct_overlap if epochs'
                             ' were baselined.')

        events, tmin, sfreq = None, 0., 1.
        if correct_overlap:
            events = epochs.events
            tmin = epochs.tmin
            sfreq = epochs.info['sfreq']
        self.correct_overlap_ = correct_overlap

        # Note: In this original version of Xdawn we compute and keep all
        # components. The selection comes at transform().
        n_components = X.shape[1]

        # Main fitting function
        filters, patterns, evokeds = _fit_xdawn(
            X, y,  n_components=n_components, reg=self.reg,
            signal_cov=self.signal_cov, events=events, tmin=tmin, sfreq=sfreq)

        # Re-order filters and patterns according to event_id
        filters = filters.reshape(-1, n_components, filters.shape[-1])
        patterns = patterns.reshape(-1, n_components, patterns.shape[-1])
        self.filters_, self.patterns_, self.evokeds_ = dict(), dict(), dict()
        idx = np.argsort([value for _, value in iteritems(epochs.event_id)])
        for eid, this_filter, this_pattern, this_evo in zip(
                epochs.event_id, filters[idx], patterns[idx], evokeds[idx]):
            self.filters_[eid] = this_filter.T
            self.patterns_[eid] = this_pattern.T
            n_events = len(epochs[eid])
            evoked = EvokedArray(this_evo, epochs.info, tmin=epochs.tmin,
                                 comment=eid, nave=n_events)
            self.evokeds_[eid] = evoked
        return self

Example 105

Project: msmtools Source File: correlations.py
def time_correlations_direct(P, pi, obs1, obs2=None, times=[1]):
    r"""Compute time-correlations of obs1, or time-cross-correlation with obs2.

    The time-correlation at time=k is computed by the matrix-vector expression:
    cor(k) = obs1' diag(pi) P^k obs2


    Parameters
    ----------
    P : ndarray, shape=(n, n) or scipy.sparse matrix
        Transition matrix
    obs1 : ndarray, shape=(n)
        Vector representing observable 1 on discrete states
    obs2 : ndarray, shape=(n)
        Vector representing observable 2 on discrete states. If not given,
        the autocorrelation of obs1 will be computed
    pi : ndarray, shape=(n)
        stationary distribution vector. Will be computed if not given
    times : array-like, shape(n_t)
        Vector of time points at which the (auto)correlation will be evaluated

    Returns
    -------

    """
    n_t = len(times)
    times = np.sort(times)  # sort it to use caching of previously computed correlations
    f = np.zeros(n_t)

    # maximum time > number of rows?
    if times[-1] > P.shape[0]:
        use_diagonalization = True
        R, D, L = rdl_decomposition(P)
        # discard imaginary part, if all elements i=0
        if not np.any(np.iscomplex(R)):
            R = np.real(R)
        if not np.any(np.iscomplex(D)):
            D = np.real(D)
        if not np.any(np.iscomplex(L)):
            L = np.real(L)
        rdl = (R, D, L)

    if use_diagonalization:
        for i in range(n_t):
            f[i] = time_correlation_by_diagonalization(P, pi, obs1, obs2, times[i], rdl)
    else:
        start_values = None
        for i in range(n_t):
            f[i], start_values = \
                time_correlation_direct_by_mtx_vec_prod(P, pi, obs1, obs2,
                                                        times[i], start_values, True)
    return f

Example 106

Project: pyNCS Source File: experimentTools.py
def mksavedir(pre='Results/', exp_dir=None):
    """
    Creates a results directory in the subdirectory 'pre'. The directory name is given by ###__dd_mm_yy, where ### is the next unused 3 digit number
    """

    if pre[-1] != '/':
        pre + '/'

    if not os.path.exists(pre):
        os.makedirs(pre)
    prelist = np.sort(fnmatch.filter(os.listdir(pre), '[0-9][0-9][0-9]__*'))

    if exp_dir == None:
        if len(prelist) == 0:
            expDirN = "001"
        else:
            expDirN = "%03d" % (
                int((prelist[len(prelist) - 1].split("__"))[0]) + 1)

        direct = time.strftime(
            pre + "/" + expDirN + "__" + "%d-%m-%Y", time.localtime())
        assert not os.path.exists(direct)

    elif isinstance(exp_dir, str):
        direct = pre + exp_dir
        if os.path.exists(direct):
            print("Warning: overwriting directory {0}".format(direct))
            rmtree(direct)

    else:
        raise TypeError('exp_dir should be a string')

    os.mkdir(direct)

    globaldata.directory = direct + str('/')

    fh = file(
        globaldata.directory + time.strftime("%H:%M:%S", time.localtime()), 'w')
    fh.close()

    print("Created experiment directory {0}".format(globaldata.directory))
    return globaldata.directory

Example 107

Project: dipy Source File: sphere.py
def unique_sets(sets, return_inverse=False):
    """Remove duplicate sets.

    Parameters
    ----------
    sets : array (N, k)
        N sets of size k.
    return_inverse : bool
        If True, also returns the indices of unique_sets that can be used
        to reconstruct `sets` (the original ordering of each set may not be
        preserved).

    Return
    ------
    unique_sets : array
        Unique sets.
    inverse : array (N,)
        The indices to reconstruct `sets` from `unique_sets`.

    """
    sets = np.sort(sets, 1)
    order = np.lexsort(sets.T)
    sets = sets[order]
    flag = np.ones(len(sets), 'bool')
    flag[1:] = (sets[1:] != sets[:-1]).any(-1)
    uniqsets = sets[flag]
    if return_inverse:
        inverse = np.empty_like(order)
        inverse[order] = np.arange(len(order))
        index = flag.cuemsum() - 1
        return uniqsets, index[inverse]
    else:
        return uniqsets

Example 108

Project: hyperopt Source File: test_tpe.py
Function: work
    def work(self):
        self.worked = True
        kwargs = dict(
                weights=self.weights,
                mus=self.mus,
                sigmas=self.sigmas,
                low=self.low,
                high=self.high,
                q=self.q,
                )
        samples = GMM1(rng=self.rng,
                size=(self.n_samples,),
                **kwargs)
        samples = np.sort(samples)
        edges = samples[::self.samples_per_bin]
        #print samples

        pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
        dx = edges[1:] - edges[:-1]
        y = 1 / dx / len(dx)

        if self.show:
            plt.scatter(edges[:-1], y)
            plt.plot(edges[:-1], pdf)
            plt.show()
        err = (pdf - y) ** 2
        print np.max(err)
        print np.mean(err)
        print np.median(err)
        if not self.show:
            assert np.max(err) < .1
            assert np.mean(err) < .01
            assert np.median(err) < .01

Example 109

Project: OSCAAR Source File: dataBank.py
Function: parseinit
    def parseInit(self, initParFilePath=None):
        """
        Parses `init.par`, a plain text file that contains all of the running parameters
        that control the `differentialPhotometry.py` script. `init.par` is written by
        the OSCAAR GUI or can be edited directly by the user.

        Parameters
        ----------
        initParFilePath : str
            Optional full path to the init.par file to use for the data


        """

        if initParFilePath is None:
            init = open(os.path.join(
                os.path.dirname(os.path.abspath(oscaar.__file__)),
                'init.par'), 'r').read().splitlines()
        else:
            if os.path.exists(initParFilePath):
                init = open(os.path.abspath(initParFilePath), 'r').read().splitlines()
            else:
                raise ValueError, (
                    "PAR file {0} cannot be found.".format(initParFilePath))

        for line in init:
            if len(line.split()) > 1:
                inline = line.split(':', 1)
                name = inline[0].strip()
                value = str(inline[1].strip())
                list = [("Path to Master-Flat Frame", "flatPath"),
                        ("Path to Regions File", "regPaths"),
                        ("Ingress", "ingress"), ("Egress", "egress"),
                        ("Radius", "apertureRadius"), ("Tracking Zoom", "trackingZoom"),
                        ("CCD Gain", "ccdGain"), ("Plot Tracking", "trackPlots"),
                        ("Plot Photometry", "photPlots"), ("Smoothing Constant", "smoothConst"),
                        ("Output Path","outputPath"), ("Path to Dark Frames", "darksPath"),
                        ("Path to Data Images", "imagesPaths"), ("Exposure Time Keyword", "timeKeyword")]

                for string,save in list:
                    if string == name:
                        #if name == "Smoothing Constant" or name == "Radius" or name == "Tracking Zoom" or name == "CCD Gain":
                        if name == "Smoothing Constant" or name == "Tracking Zoom" or name == "CCD Gain":
                            self.dict[save] = float(value)
                        elif name == "Ingress" or name == "Egress":
                            self.dict[save] = mathMethods.ut2jd(value)
                        elif name == "Plot Photometry" or name == "Plot Tracking":
                            if value == "on":
                                self.dict[save] = True
                            else:
                                self.dict[save] = False
                        elif name == "Path to Dark Frames" or name == "Path to Data Images":
                            value = inline[1].strip()
                            if len(glob(value)) > 0:
                                self.dict[save] = np.sort(glob(value))
                            elif value == "":
                                self.dict[save] = ""
                            else:
                                tempArr = []
                                for path in str(inline[1]).split(','):
                                    path = path.strip()
                                    path = os.path.join(oscaarpathplus,os.path.abspath(path))
                                    tempArr.append(path)
                                self.dict[save] = np.sort(tempArr)

                        elif name == "Radius":
                            if len(value.split(',')) == 3:
                                ## If multiple aperture radii are requested by dictating the range, enumerate the range:
                                apertureRadiusMin, apertureRadiusMax, apertureRadiusStep = map(float,value.split(','))

                                if (apertureRadiusMax-apertureRadiusMin) % apertureRadiusStep == 0:
                                    apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax+apertureRadiusStep, apertureRadiusStep)
                                else:
                                    apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax, apertureRadiusStep)

                                self.dict[save] = apertureRadii
                            elif len(value.split(',')) == 1:
                                ## If only one aperture radius is requested, make a list with only that one element
                                self.dict[save] = [float(value)]
                            else:
                                self.dict[save] = [float(i) for i in value.split(',')]

                        elif name == "Output Path":
                            self.outputPath = os.path.join(oscaarpathplus,os.path.abspath(value))
                        else:
                            self.dict[save] = value

Example 110

Project: ilastik-0.5 Source File: activeLearning.py
def computeEnsembleMargin2D(pmap):
    pmap_sort = numpy.sort(pmap, axis=len(pmap.shape)-1)
    res = pmap_sort[:,:,-1] - pmap_sort[:,:,-2]
    return 1-res

Example 111

Project: trimesh Source File: intersections.py
def mesh_plane(mesh, 
               plane_normal,
               plane_origin):
    '''
    Find a the intersections between a mesh and a plane, 
    returning a set of line segments on that plane.

    Arguments
    ---------
    mesh:          Trimesh object
    plane_normal:  (3,) float, plane normal
    plane_origin:  (3,) float, plane origin

    Returns
    ----------
    (m, 2, 3) float, list of 3D line segments
    '''

    def triangle_cases(signs):
        '''
        Figure out which faces correspond to which intersection case from 
        the signs of the dot product of each vertex.
        Does this by bitbang each row of signs into an 8 bit integer.

        code : signs      : intersects
        0    : [-1 -1 -1] : No
        2    : [-1 -1  0] : No
        4    : [-1 -1  1] : Yes; 2 on one side, 1 on the other
        6    : [-1  0  0] : Yes; one edge fully on plane
        8    : [-1  0  1] : Yes; one vertex on plane, 2 on different sides
        12   : [-1  1  1] : Yes; 2 on one side, 1 on the other
        14   : [0 0 0]    : No (on plane fully)
        16   : [0 0 1]    : Yes; one edge fully on plane
        20   : [0 1 1]    : No
        28   : [1 1 1]    : No

        Arguments
        ----------
        signs: (n,3) int, all values are -1,0, or 1
               Each row contains the dot product of all three vertices
               in a face with respect to the plane
        
        Returns
        ---------
        basic:      (n,) bool, which faces are in the basic intersection case
        one_vertex: (n,) bool, which faces are in the one vertex case
        one_edge:   (n,) bool, which faces are in the one edge case
        '''

        signs_sorted = np.sort(signs, axis=1)
        coded = np.zeros(len(signs_sorted), dtype=np.int8) + 14
        for i in range(3):
            coded += signs_sorted[:,i] << 3-i

        # one edge fully on the plane
        # note that we are only accepting *one* of the on- edge cases,
        # where the other vertex has a positive dot product (16) instead
        # of both on- edge cases ([6,16])
        # this is so that for regions that are co-planar with the the section plane
        # we don't end up with an invalid boundary
        key = np.zeros(29, dtype=np.bool)
        key[16] = True
        one_edge = key[coded]

        # one vertex on plane, other two on different sides
        key[:] = False
        key[8] = True
        one_vertex = key[coded]

        # one vertex on one side of the plane, two on the other
        key[:]      = False
        key[[4,12]] = True
        basic = key[coded]

        return basic, one_vertex, one_edge

    def handle_on_vertex(signs, faces, vertices):
        # case where one vertex is on plane, two are on different sides
        vertex_plane = faces[signs == 0]
        edge_thru    = faces[signs != 0].reshape((-1,2))
        point_intersect, valid  = plane_lines(plane_origin, 
                                              plane_normal, 
                                              vertices[edge_thru.T],
                                              line_segments = False)
        lines = np.column_stack((vertices[vertex_plane[valid]],
                                 point_intersect)).reshape((-1,2,3))
        return lines

    def handle_on_edge(signs, faces, vertices):
        # case where two vertices are on the plane and one is off
        edges  = faces[signs == 0].reshape((-1,2))
        points = vertices[edges]
        return points

    def handle_basic(signs, faces, vertices):
        #case where one vertex is on one side and two are on the other
        unique_element = unique_value_in_row(signs, unique = [-1,1])
        edges = np.column_stack((faces[unique_element],
                                 faces[np.roll(unique_element, 1, axis=1)],
                                 faces[unique_element],
                                 faces[np.roll(unique_element, 2, axis=1)])).reshape((-1,2))
        intersections, valid  = plane_lines(plane_origin, 
                                            plane_normal, 
                                            vertices[edges.T],
                                            line_segments = False)
        # since the data has been pre- culled, any invalid intersections at all
        # means the culling was done incorrectly and thus things are mega-cuemed
        assert valid.all()
        return intersections.reshape((-1,2,3))

    plane_normal = np.asanyarray(plane_normal)
    plane_origin = np.asanyarray(plane_origin)
    if plane_origin.shape != (3,) or plane_normal.shape != (3,):
        raise ValueError('Plane origin and normal must be (3,)!')
        
    # dot product of each vertex with the plane normal, indexed by face
    # so for each face the dot product of each vertex is a row
    # shape is the same as mesh.faces (n,3)
    dots = np.dot(plane_normal, (mesh.vertices-plane_origin).T)[mesh.faces]

    # sign of the dot product is -1, 0, or 1
    # shape is the same as mesh.faces (n,3)
    signs = np.zeros(mesh.faces.shape, dtype=np.int8)
    signs[dots < -tol.merge] = -1
    signs[dots >  tol.merge] =  1

    # figure out which triangles are in the cross section,
    # and which of the three intersection cases they are in
    cases = triangle_cases(signs)
    # handlers for each case
    handlers = (handle_basic, 
                handle_on_vertex, 
                handle_on_edge)    

    lines = np.vstack([h(signs[c],
                         mesh.faces[c],
                         mesh.vertices) for c, h in zip(cases, handlers)])

    log.debug('mesh_cross_section found %i intersections', len(lines))

    return lines

Example 112

Project: PyEMMA Source File: feature_reader.py
Function: get_itraj_random_accessible
    def _get_itraj_random_accessible(self, itrajs, frames, dims):
        itrajs = self._get_indices(itrajs, self._source.ntraj)
        frames = self._get_indices(frames, min(self._source.trajectory_lengths(1, 0)[itrajs]))
        dims = self._get_indices(dims, self._source.ndim)

        ntrajs = len(itrajs)
        nframes = len(frames)
        ndims = len(dims)

        frames_orig = frames.argsort().argsort()
        frames_sorted = np.sort(frames)

        itraj_orig = itrajs.argsort().argsort()
        itraj_sorted = np.sort(itrajs)
        itrajs_unique, itrajs_count = np.unique(itraj_sorted, return_counts=True)

        if max(dims) > self._source.ndim:
            raise IndexError("Data only has %s dimensions, wanted to slice by dimension %s."
                             % (self._source.ndim, max(dims)))

        ra_indices = np.empty((len(itrajs_unique) * nframes, 2), dtype=int)
        for idx, itraj in enumerate(itrajs_unique):
            ra_indices[idx * nframes: (idx + 1) * nframes, 0] = itraj * np.ones(nframes, dtype=int)
            ra_indices[idx * nframes: (idx + 1) * nframes, 1] = frames_sorted

        data = np.empty((ntrajs, nframes, ndims))

        count = 0
        for X in self._source.iterator(stride=ra_indices, lag=0, chunk=0, return_trajindex=False):
            for _ in range(0, itrajs_count[itraj_orig[count]]):
                data[itraj_orig[count], :, :] = X[frames_orig][:, dims]
                count += 1

        return data

Example 113

Project: evoMPS Source File: tdvp_common.py
def restore_RCF_l(A, lm1, Gm1, sanity_checks=False):
    """Transforms a single A[n] to obtain diagonal l[n].

    Applied after restore_RCF_r(), this completes the full canonical form
    of sub-section 3.1, theorem 1 of arXiv:quant-ph/0608197v2.

    This function must be called for each n in turn, starting at 1,
    passing the gauge transformation matrix from the previous step
    as an argument.

    Finds a G[n] such that orthonormalization is fulfilled for n.

    The diagonal entries of l[n] are sorted in
    ascending order (for example l[n] = diag([0, 0, 0.1, 0.2, ...])).

    Parameters
    ----------
    A : ndarray
        The parameter tensor for the nth site A[n].
    lm1 : ndarray or object with array interface
        The matrix l[n - 1].
    Gm1 : ndarray
        The gauge transform matrix for site n obtained in the previous step (for n - 1).
    sanity_checks : bool (False)
        Whether to perform additional sanity checks.
        
    Returns
    -------
    l : ndarray or simple_diag_matrix
        The new, diagonal matrix l[n]. 
    G : ndarray
        The gauge transformation matrix for site n.
    G_i : ndarray
        Inverse of G.
    """
    if Gm1 is None:
        x = lm1
    else:
        x = Gm1.conj().T.dot(lm1.dot(Gm1))

    M = eps_l_noop(x, A, A)
    ev, EV = la.eigh(M) #wraps lapack routines, which return eigenvalues in ascending order
    
    if sanity_checks:
        assert np.all(ev == np.sort(ev)), "unexpected eigenvalue ordering"
    
    l = mm.simple_diag_matrix(ev, dtype=A.dtype)
    G_i = EV

    if Gm1 is None:
        Gm1 = EV.conj().T #for left uniform case
        lm1 = l #for sanity check

    for s in xrange(A.shape[0]):
        A[s] = Gm1.dot(A[s].dot(G_i))

    if sanity_checks:
        l_ = eps_l_noop(lm1, A, A)
        if not sp.allclose(l_, l, atol=1E-12, rtol=1E-12):
            log.warning("Sanity Fail in restore_RCF_l!: l is bad!")
            log.warning(la.norm(l_ - l))

    G = EV.conj().T

    return l, G, G_i

Example 114

Project: RHEAS Source File: decorators.py
def netcdf(fetch):
    """Decorator for fetching NetCDF files (local or from Opendap servers)."""
    @wraps(fetch)
    def wrapper(*args, **kwargs):
        url, varname, bbox, dt = fetch(*args, **kwargs)
        ds = netcdf4.Dataset(url)
        for var in ds.variables:
            if var.lower().startswith("lon") or var.lower() == "x":
                lonvar = var
            if var.lower().startswith("lat") or var.lower() == "y":
                latvar = var
            if var.lower().startswith("time") or var.lower() == "t":
                timevar = var
        lat = ds.variables[latvar][:]
        lon = ds.variables[lonvar][:]
        lon[lon > 180] -= 360
        res = abs(lat[0]-lat[1])  # assume rectangular grid
        i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox)
        t = ds.variables[timevar]
        tt = netcdf4.num2date(t[:], units=t.units)
        ti = [tj for tj in range(len(tt)) if resetDatetime(tt[tj]) >= dt[0] and resetDatetime(tt[tj]) <= dt[1]]
        if len(ti) > 0:
            lati = np.argsort(lat)[::-1][i1:i2]
            loni = np.argsort(lon)[j1:j2]
            data = ds.variables[varname][ti, lati, loni]
            dt = tt[ti]
        else:
            data = None
            dt = None
        lat = np.sort(lat)[::-1][i1:i2]
        lon = np.sort(lon)[j1:j2]
        return data, lat, lon, dt
    return wrapper

Example 115

Project: fusedwind Source File: interface.py
def fused_autodoc(cls):
    """Decorator to automatically docuement the inputs and outputs of an Assembly / Component

    """

    clsname = cls.__name__
    if not cls.__doc__:
        cls.__doc__ = '**TODO**: fill in this doc\n\n'

    inputs = [k for k, v in cls.__class_traits__.iteritems() if v.iotype == 'in'
              and k not in Component.__class_traits__
              and k not in Assembly.__class_traits__
              and k.find('_items') == -1]
    outputs = [k for k, v in cls.__class_traits__.iteritems() if v.iotype == 'out'
              and k not in Component.__class_traits__
              and k not in Assembly.__class_traits__
              and k.find('_items') == -1]

    variables = [k for k, v in cls.__class_traits__.iteritems()
                    if k not in VariableTree.__class_traits__
                    and k.find('_items') == -1]
    variables = sort(variables)
    #print inputs
    el = '\n    '
    def addl(x=''):
        cls.__doc__+=x+el
    addl()
    addl()

    if issubclass(cls, Component):
        if len(inputs) > 0:
            addl('Parameters')
            addl('----------')
            addl(el.join([i + ':    ' + cls.__class_traits__[i].trait_type.__class__.__name__ +
                          ', default=' + cls.__class_traits__[i].default.__str__() +
                          ', [%s]'%(cls.__class_traits__[i].units) +
                          el+'   ' + cls.__class_traits__[i].desc.__str__()+'.'+el
                          for i in inputs]))
            addl('')

    if issubclass(cls, VariableTree):
        if len(variables) > 0:
            addl('Parameters')
            addl('----------')
            addl(el.join([i + ':    ' + cls.__class_traits__[i].trait_type.__class__.__name__ +
                              ', default=' + cls.__class_traits__[i].default.__str__() +
                              ', [%s]'%(cls.__class_traits__[i].units) +
                              el+'   ' + cls.__class_traits__[i].desc.__str__()+'.'+el
                              for i in variables]))
            addl('')

    if issubclass(cls, Component) and len(outputs) > 0:
        addl('Returns')
        addl('-------')
        addl(el.join([i + ':    ' + cls.__class_traits__[i].trait_type.__class__.__name__ +
                          ', [%s]'%(cls.__class_traits__[i].units) +
                          el+'   ' + cls.__class_traits__[i].desc.__str__()+'.'+el
                          for i in outputs]))

    # Check if the class has some base:
    if hasattr(cls, '_fused_base'):
        addl('')
        addl('Notes')
        addl('-----')
        addl('``%s``'%(clsname) + ' implements the following interfaces: ' + ', '.join(['``%s``'%(c.__name__) for c in cls._fused_base]))
        addl('')

    return cls

Example 116

Project: Barak Source File: stats.py
def find_conf_levels(a, pvals=[0.683, 0.955, 0.997]):
    """ Find the threshold value in an array that give confidence
    levels for an array of probabilities.

    Parameters
    ----------
    a : ndarray
      Array of probabilities. Can have more than one dimension.
    pvals : list of floats, shape (N,)
      Confidence levels to find the values for (must be between 0 and
      1). Default correspond to 1, 2 and 3 sigma. Must be smallest to
      largest.

    Returns
    -------
    athresh : list of shape (N,)
      The threshold values of `a` giving the confidence ranges in
      pvals.
    """
    assert all(0 <= float(p) <= 1 for p in pvals)
    assert np.allclose(np.sort(pvals), pvals)
    a = np.asarray(a).ravel()

    assert not np.isnan(a).any()
    assert not np.isinf(a).any()

    tot = a.sum()
    conflevels = [p * tot for p in pvals]
    asorted = np.sort(a)

    out = []
    i = -2
    for iconf, clevel in enumerate(conflevels):
        while asorted[i:].sum() < clevel:
            i -= 1
            if i == -len(a):
                i += 1
                break
        #print (i, asorted[i], asorted[i:].sum(), clevel, pvals[iconf])
        out.append(asorted[i])

    return out

Example 117

Project: kaggle_diabetic_retinopathy Source File: generators.py
def load_image_and_process(im, im_dst, dim_dst, output_shape=(80, 80),
                           prefix_path='data/train_ds5_crop/',
                           transfo_params=None,
                           rand_values=None):
    im = Image.open(prefix_path + im + '.jpeg', mode='r')

    sort_dim = list(np.sort(im.size))

    dim_dst[0] = sort_dim[1] / 700.0
    dim_dst[1] = sort_dim[0] / 700.0

    im_new = im

    # Dict to keep track of random values.
    chosen_values = {}

    if transfo_params.get('extra_width_crop', False):
        w, h = im_new.size

        if w / float(h) >= 1.3:
            cols_thres = np.where(
                np.max(
                    np.max(
                        np.asarray(im_new),
                        axis=2),
                    axis=0) > 35)[0]

            # Extra cond compared to orig crop.
            if len(cols_thres) > output_shape[0] // 2:
                min_x, max_x = cols_thres[0], cols_thres[-1]
            else:
                min_x, max_x = 0, -1

            im_new = im_new.crop((min_x, 0,
                                  max_x, h))

    if transfo_params.get('crop_height', False):
        w, h = im_new.size

        if w > 1 and 0.98 <= h / float(w) <= 1.02:
            # "Normal" without height crop, do height crop.
            im_new = im_new.crop((0, int(0.05 * h),
                                  w, int(0.95 * h)))

    if transfo_params.get('crop', False) and not \
            transfo_params.get('crop_after_rotation', False):
        if rand_values:
            do_crop = rand_values['do_crop']
        else:
            do_crop = transfo_params['crop_prob'] > np.random.rand()
        chosen_values['do_crop'] = do_crop

        if do_crop:
            out_w, out_h = im_new.size
            w_dev = int(transfo_params['crop_w'] * out_w)
            h_dev = int(transfo_params['crop_h'] * out_h)

            # If values are supplied.
            if rand_values:
                w0, w1 = rand_values['w0'], rand_values['w1']
                h0, h1 = rand_values['h0'], rand_values['h1']
            else:
                w0 = np.random.randint(0, w_dev + 1)
                w1 = np.random.randint(0, w_dev + 1)
                h0 = np.random.randint(0, h_dev + 1)
                h1 = np.random.randint(0, h_dev + 1)

            # Add params to dict.
            chosen_values['w0'] = w0
            chosen_values['w1'] = w1
            chosen_values['h0'] = h0
            chosen_values['h1'] = h1

            im_new = im_new.crop((0 + w0, 0 + h0,
                                  out_w - w1, out_h - h1))

    # if transfo_params.get('new_gen', False):
    #     im_new = im_new.crop(im_new.getbbox())
    # im_new = im_new.resize(map(lambda x: x*2, output_shape),
    # resample=Image.BICUBIC)

    if transfo_params.get('shear', False):
        # http://stackoverflow.com/questions/14177744/how-does-
        # perspective-transformation-work-in-pil
        if transfo_params['shear_prob'] > np.random.rand():
            # print 'shear'
            # TODO: No chosen values because shear not really used.
            shear_min, shear_max = transfo_params['shear_range']
            m = shear_min + np.random.rand() * (shear_max - shear_min)
            out_w, out_h = im_new.size
            xshift = abs(m) * out_w
            new_width = out_w + int(round(xshift))
            im_new = im_new.transform((new_width, out_h), Image.AFFINE,
                                      (1, m, -xshift if m > 0 else 0, 0, 1, 0),
                                      Image.BICUBIC)

    if transfo_params.get('rotation_before_resize', False):
        if rand_values:
            rotation_param = rand_values['rotation_param']
        else:
            rotation_param = np.random.randint(
                transfo_params['rotation_range'][0],
                transfo_params['rotation_range'][1])
        chosen_values['rotation_param'] = rotation_param

        im_new = im_new.rotate(rotation_param, resample=Image.BILINEAR,
                               expand=transfo_params.get('rotation_expand',
                                                         False))
        if transfo_params.get('rotation_expand',
                              False):
            im_new = im_new.crop(im_new.getbbox())

    if transfo_params.get('crop_after_rotation', False):
        if rand_values:
            do_crop = rand_values['do_crop']
        else:
            do_crop = transfo_params['crop_prob'] > np.random.rand()
        chosen_values['do_crop'] = do_crop

        if do_crop:
            out_w, out_h = im_new.size
            w_dev = int(transfo_params['crop_w'] * out_w)
            h_dev = int(transfo_params['crop_h'] * out_h)

            # If values are supplied.
            if rand_values:
                w0, w1 = rand_values['w0'], rand_values['w1']
                h0, h1 = rand_values['h0'], rand_values['h1']
            else:
                w0 = np.random.randint(0, w_dev + 1)
                w1 = np.random.randint(0, w_dev + 1)
                h0 = np.random.randint(0, h_dev + 1)
                h1 = np.random.randint(0, h_dev + 1)

            # Add params to dict.
            chosen_values['w0'] = w0
            chosen_values['w1'] = w1
            chosen_values['h0'] = h0
            chosen_values['h1'] = h1

            im_new = im_new.crop((0 + w0, 0 + h0,
                                  out_w - w1, out_h - h1))

    # im_new = im_new.thumbnail(output_shape, resample=Image.BILINEAR)
    if transfo_params.get('keep_aspect_ratio', False):
        im_new = make_thumb(im_new, size=output_shape,
                           pad=transfo_params['resize_pad'])
    else:
        im_new = im_new.resize(output_shape, resample=Image.BILINEAR)
    # im_new = im_new.resize(output_shape, resample=Image.BICUBIC)
    # im_new = im_new.resize(map(lambda x: int(x * 1.2), output_shape),
    # resample=Image.BICUBIC)
    # im_new = im_new.crop(im_new.getbbox())

    if transfo_params.get('rotation', False) \
            and not transfo_params.get('rotation_before_resize', False):
        if rand_values:
            rotation_param = rand_values['rotation_param']
        else:
            rotation_param = np.random.randint(
                transfo_params['rotation_range'][0],
                transfo_params['rotation_range'][1])
        chosen_values['rotation_param'] = rotation_param

        im_new = im_new.rotate(rotation_param, resample=Image.BILINEAR,
                               expand=transfo_params.get('rotation_expand',
                                                         False))
        if transfo_params.get('rotation_expand',
                              False):
            im_new = im_new.crop(im_new.getbbox())

    # im_new = im_new.resize(output_shape, resample=Image.BICUBIC)
    if transfo_params.get('contrast', False):
        contrast_min, contrast_max = transfo_params['contrast_range']
        if rand_values:
            contrast_param = rand_values['contrast_param']
        else:
            contrast_param = np.random.uniform(contrast_min, contrast_max)
        chosen_values['contrast_param'] = contrast_param

        im_new = ImageEnhance.Contrast(im_new).enhance(contrast_param)

    if transfo_params.get('brightness', False):
        brightness_min, brightness_max = transfo_params['brightness_range']
        if rand_values:
            brightness_param = rand_values['brightness_param']
        else:
            brightness_param = np.random.uniform(brightness_min,
                                                 brightness_max)
        chosen_values['brightness_param'] = brightness_param

        im_new = ImageEnhance.Brightness(im_new).enhance(brightness_param)

    if transfo_params.get('color', False):
        color_min, color_max = transfo_params['color_range']
        if rand_values:
            color_param = rand_values['color_param']
        else:
            color_param = np.random.uniform(color_min, color_max)
        chosen_values['color_param'] = color_param

        im_new = ImageEnhance.Color(im_new).enhance(color_param)

    if transfo_params.get('flip', False):
        if rand_values:
            do_flip = rand_values['do_flip']
        else:
            do_flip = transfo_params['flip_prob'] > np.random.rand()

        chosen_values['do_flip'] = do_flip

        if do_flip:
            im_new = im_new.transpose(Image.FLIP_LEFT_RIGHT)

    if output_shape[0] < 200 and False:
        # Otherwise too slow.
        # TODO: Disabled for now
        if 'rotation' in transfo_params and transfo_params['rotation']:
            if rand_values:
                rotation_param = rand_values['rotation_param2']
            else:
                rotation_param = np.random.randint(
                    transfo_params['rotation_range'][0],
                    transfo_params['rotation_range'][1])

            im_new = im_new.rotate(rotation_param, resample=Image.BILINEAR,
                                   expand=False)
            # im_new = im_new.crop(im_new.getbbox())
            chosen_values['rotation_param2'] = rotation_param

    if transfo_params.get('zoom', False):
        if rand_values:
            do_zoom = rand_values['do_zoom']
        else:
            do_zoom = transfo_params['zoom_prob'] > np.random.rand()
        chosen_values['do_zoom'] = do_zoom

        if do_zoom:
            zoom_min, zoom_max = transfo_params['zoom_range']
            out_w, out_h = im_new.size
            if rand_values:
                w_dev = rand_values['w_dev']
            else:
                w_dev = int(np.random.uniform(zoom_min, zoom_max) / 2 * out_w)
            chosen_values['w_dev'] = w_dev

            im_new = im_new.crop((0 + w_dev,
                                  0 + w_dev,
                                  out_w - w_dev,
                                  out_h - w_dev))

    # im_new = im_new.resize(output_shape, resample=Image.BILINEAR)
    if im_new.size != output_shape:
        im_new = im_new.resize(output_shape, resample=Image.BILINEAR)

    im_new = np.asarray(im_new).astype('float32') / 255
    im_dst[:] = np.rollaxis(im_new.astype('float32'), 2, 0)

    im.close()
    del im, im_new

    return chosen_values

Example 118

Project: nipy Source File: mask.py
Function: compute_mask
def compute_mask(mean_volume, reference_volume=None, m=0.2, M=0.9,
                        cc=True, opening=2, exclude_zeros=False):
    """
    Compute a mask file from fMRI data in 3D or 4D ndarrays.

    Compute and write the mask of an image based on the grey level
    This is based on an heuristic proposed by T.Nichols:
    find the least dense point of the histogram, between fractions
    m and M of the total image histogram.

    In case of failure, it is usually advisable to increase m.

    Parameters
    ----------
    mean_volume : 3D ndarray
        mean EPI image, used to compute the threshold for the mask.
    reference_volume: 3D ndarray, optional
        reference volume used to compute the mask. If none is give, the
        mean volume is used.
    m : float, optional
        lower fraction of the histogram to be discarded.
    M: float, optional
        upper fraction of the histogram to be discarded.
    cc: boolean, optional
        if cc is True, only the largest connect component is kept.
    opening: int, optional
        if opening is larger than 0, an morphological opening is performed,
        to keep only large structures. This step is useful to remove parts of
        the skull that might have been included.
    exclude_zeros: boolean, optional
        Consider zeros as missing values for the computation of the
        threshold. This option is useful if the images have been
        resliced with a large padding of zeros.

    Returns
    -------
    mask : 3D boolean ndarray
        The brain mask
    """
    if reference_volume is None:
        reference_volume = mean_volume
    sorted_input = np.sort(mean_volume.reshape(-1))
    if exclude_zeros:
        sorted_input = sorted_input[sorted_input != 0]
    limiteinf = np.floor(m * len(sorted_input))
    limitesup = np.floor(M * len(sorted_input))

    delta = sorted_input[limiteinf + 1:limitesup + 1] \
            - sorted_input[limiteinf:limitesup]
    ia = delta.argmax()
    threshold = 0.5 * (sorted_input[ia + limiteinf]
                        + sorted_input[ia + limiteinf + 1])

    mask = (reference_volume >= threshold)

    if cc:
        mask = largest_cc(mask)

    if opening > 0:
        mask = ndimage.binary_opening(mask.astype(np.int),
                                        iterations=opening)
    return mask.astype(bool)

Example 119

Project: chaco Source File: scatter_1d.py
def _create_plot_component():

    # Create some data
    numpts = 50
    x = sort(random(numpts))
    y = random(numpts)

    # Create a plot data object and give it this data
    pd = ArrayPlotData()
    pd.set_data("index", x)
    pd.set_data("value", y)

    # Create the plot
    plot = Plot(pd, use_backbuffer=True, auto_grid=False)

    plot.plot_1d(
        'index',
        type='line_scatter_1d',
        orientation='h',
        color='lightgrey',
        line_style='dot',
    )

    plot.plot_1d(
        'index',
        type='scatter_1d',
        orientation='h',
        marker='plus',
        alignment='bottom'
    )

    plot.plot_1d(
        'value',
        type='line_scatter_1d',
        orientation='v',
        color='lightgrey',
        line_style='dot',
    )

    plot.plot_1d(
        'value',
        type='scatter_1d',
        orientation='v',
        marker='plus',
        alignment='left'
    )

    plot.plot(("index", "value"),
              type="scatter",
              marker="square",
              index_sort="ascending",
              color="orange",
              marker_size=3, #randint(1,5, numpts),
              bgcolor="white",
              use_backbuffer=True)


    # Tweak some of the plot properties
    plot.title = "1D Scatter Plots"
    plot.line_width = 0.5
    plot.padding = 50

    # Attach some tools to the plot
    plot.tools.append(PanTool(plot, constrain_key="shift"))
    zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
    plot.overlays.append(zoom)

    return plot

Example 120

Project: faces Source File: eigenfaces.py
def create_face_bundle(directory, images_list):
    """ Creates FaceBundle """
    images = validate_directory(images_list)

    img = images[0]
    width, height = img.size
    pixels = width * height
    numimgs = len(images)

    # Create a 2d array, each row holds pixvalues of a single image
    facet_matrix = zeros((numimgs, pixels))
    for i in xrange(numimgs):
        pixels = asfarray(images[i].getdata())
        facet_matrix[i] = pixels / max(pixels)

    # Create average values, one for each column (ie pixel)
    avg = average(facet_matrix, axis=0)
    if MAKE_AVERAGE:
        # Create average image in current directory just for fun of viewing
        make_image(avg, AVERAGE_FILE_NAME, (width, height))

    # Substract avg val from each orig val to get adjusted faces (phi of T&P)
    adjfaces = facet_matrix - avg
    adjfaces_transpose = adjfaces.transpose()
    L = dot(adjfaces, adjfaces_transpose)

    if USE_EIGH:
        evals1, evects1 = eigh(L)
    else:
        evects1, evals1, vt = svd(L, 0)
    reversed_evalue_order = evals1.argsort()[::-1]
    evects = evects1[:,reversed_evalue_order]
    evals = sort(evals1)[::-1]

    # Rows in eigen_space are eigenfaces
    eigen_space = dot(adjfaces_transpose, evects).transpose()
    # Normalize rows of eigen_space
    for i in xrange(numimgs):
        ui = eigen_space[i]
        ui.shape = (height, width)
        eigen_space[i] = eigen_space[i] / trace(dot(ui.transpose(), ui))

    bundle = FaceBundle(directory, images_list, width, height, adjfaces,
                        eigen_space, avg, evals)
    #create_eigenimages(bundle, eigen_space) # create eigenface images
    return bundle

Example 121

Project: regreg Source File: fused_lasso.py
def difference_transform(X, order=1, sorted=False,
                         transform=False):
    """
    Compute the divided difference matrix for X
    after sorting X.

    Parameters
    ----------

    X: np.array, np.float, ndim=1

    order: int
        What order of difference should we compute?

    sorted: bool
        Is X sorted?

    transform: bool
        If True, return a linear_transform rather
        than a sparse matrix.

    Returns
    -------

    D: np.array, ndim=2, shape=(n-order,order)
        Matrix of divided differences of sorted X.

    """
    if not sorted:
        X = np.sort(X)
    X = np.asarray(X)
    n = X.shape[0]
    Dfinal = np.identity(n)
    for j in range(1, order+1):
        D = (-np.identity(n-j+1)+np.diag(np.ones(n-j),k=1))[:-1]
        steps = X[j:]-X[:-j]
        inv_steps = np.zeros(steps.shape)
        inv_steps[steps != 0] = 1. / steps[steps != 0]
        D = np.dot(np.diag(inv_steps), D)
        Dfinal = np.dot(D, Dfinal)
    if not transform:
        return sparse.csr_matrix(Dfinal)
    return astransform(Dfinal)

Example 122

Project: klustaviewa Source File: test_viewdata.py
Function: set_up
def setup():
    # Create files.
    prm = {'waveforms_nsamples': 10, 'nchannels': nchannels,
           'nfeatures_per_channel': 1,
           'sample_rate': 20000.,
           'duration': 10.}
    prb = {0:
        {
            'channels': range(nchannels),
            'graph': [(i, i+1) for i in range(nchannels-1)],
            'geometry': {i: [0., i] for i in range(nchannels)},
        }
    }
    create_files('myexperiment', dir=DIRPATH, prm=prm, prb=prb)
    
    # Open the files.
    files = open_files('myexperiment', dir=DIRPATH, mode='a')
    
    # Add data.
    add_recording(files, 
                  sample_rate=prm['sample_rate'],
                  start_time=10., 
                  start_sample=200000.,
                  bit_depth=16,
                  band_high=100.,
                  band_low=500.,
                  nchannels=nchannels,)
    add_event_type(files, 'myevents')
    add_cluster_group(files, name='Noise')
    add_cluster_group(files, name='MUA')
    add_cluster_group(files, name='Good')
    add_cluster_group(files, name='Unsorted')
    add_cluster(files, cluster_group=0, color=1)
    add_cluster(files, cluster_group=1, color=2)
    
    exp = Experiment(files=files)
    chgrp = exp.channel_groups[0]
    nspikes = 1000
    chgrp.spikes.time_samples.append(np.sort(rndint(nspikes)))
    chgrp.spikes.clusters.main.append(np.random.randint(size=nspikes, low=0, high=2))
    chgrp.spikes.features_masks.append(rnd(nspikes, nchannels, 2))
    chgrp.spikes.features_masks[..., 1] = chgrp.spikes.features_masks[..., 1] < .5
    chgrp.spikes.waveforms_raw.append(rndint(nspikes, 10, nchannels))
    chgrp.spikes.waveforms_filtered.append(rndint(nspikes, 10, nchannels))
    
    # Close the files
    close_files(files)

Example 123

Project: chaco Source File: zoomable_colorbar.py
def _create_plot_component():

    # Create some data
    numpts = 1000
    x = sort(random(numpts))
    y = random(numpts)
    color = exp(-(x**2 + y**2))

    # Create a plot data obect and give it this data
    pd = ArrayPlotData()
    pd.set_data("index", x)
    pd.set_data("value", y)
    pd.set_data("color", color)

    # Create the plot
    plot = Plot(pd)
    plot.plot(("index", "value", "color"),
              type="cmap_scatter",
              name="my_plot",
              color_mapper=gist_earth,
              marker = "square",
              fill_alpha = 0.5,
              marker_size = 8,
              outline_color = "black",
              border_visible = True,
              bgcolor = "white")

    # Tweak some of the plot properties
    plot.title = "Colormapped Scatter Plot with Pan/Zoom Color Bar"
    plot.padding = 50
    plot.x_grid.visible = False
    plot.y_grid.visible = False
    plot.x_axis.font = "modern 16"
    plot.y_axis.font = "modern 16"

    # Add pan and zoom to the plot
    plot.tools.append(PanTool(plot, constrain_key="shift"))
    zoom = ZoomTool(plot)
    plot.overlays.append(zoom)

    # Create the colorbar, handing in the appropriate range and colormap
    colorbar = ColorBar(index_mapper=LinearMapper(range=plot.color_mapper.range),
                        color_mapper=plot.color_mapper,
                        orientation='v',
                        resizable='v',
                        width=30,
                        padding=20)
    colorbar.plot = plot
    colorbar.padding_top = plot.padding_top
    colorbar.padding_bottom = plot.padding_bottom

    # Add pan and zoom tools to the colorbar
    colorbar.tools.append(PanTool(colorbar, constrain_direction="y", constrain=True))
    zoom_overlay = ZoomTool(colorbar, axis="index", tool_mode="range",
                            always_on=True, drag_button="right")
    colorbar.overlays.append(zoom_overlay)

    # Create a container to position the plot and the colorbar side-by-side
    container = HPlotContainer(plot, colorbar, use_backbuffer=True, bgcolor="lightgray")

    return container

Example 124

Project: msmtools Source File: correlations.py
def time_relaxations_direct(P, p0, obs, times=[1]):
    r"""Compute time-relaxations of obs with respect of given initial distribution.

    relaxation(k) = p0 P^k obs

    Parameters
    ----------
    P : ndarray, shape=(n, n) or scipy.sparse matrix
        Transition matrix
    p0 : ndarray, shape=(n)
        initial distribution
    obs : ndarray, shape=(n)
        Vector representing observable on discrete states.
    times : array-like, shape(n_t)
        Vector of time points at which the (auto)correlation will be evaluated

    Returns
    -------
    relaxations : ndarray, shape(n_t)
    """
    n_t = len(times)
    times = np.sort(times)

    # maximum time > number of rows?
    if times[-1] > P.shape[0]:
        use_diagonalization = True
        R, D, L = rdl_decomposition(P)
        # discard imaginary part, if all elements i=0
        if not np.any(np.iscomplex(R)):
            R = np.real(R)
        if not np.any(np.iscomplex(D)):
            D = np.real(D)
        if not np.any(np.iscomplex(L)):
            L = np.real(L)
        rdl = (R, D, L)

    f = np.empty(n_t, dtype=D.dtype)

    if use_diagonalization:
        for i in range(n_t):
            f[i] = time_relaxation_direct_by_diagonalization(P, p0, obs, times[i], rdl)
    else:
        start_values = None
        for i in range(n_t):
            f[i], start_values = time_relaxation_direct_by_mtx_vec_prod(P, p0, obs, times[i], start_values, True)
    return f

Example 125

Project: nifty Source File: powerspectrum.py
def bin_power_indices(pindex,kindex,rho,log=False,nbin=None,binbounds=None):
    """
        Returns the (re)binned power indices associated with the Fourier grid.

        Parameters
        ----------
        pindex : ndarray
            Index of the Fourier grid points in a numpy.ndarray ordered
            following the zerocentered flag (default=None).
        kindex : ndarray
            Array of all k-vector lengths (default=None).
        rho : ndarray
            Degeneracy of the Fourier grid, indicating how many k-vectors in
            Fourier space have the same length (default=None).
        log : bool
            Flag specifying if the binning is performed on logarithmic scale
            (default: False).
        nbin : integer
            Number of used bins (default: None).
        binbounds : {list, array}
            Array-like inner boundaries of the used bins (default: None).

        Returns
        -------
        pindex, kindex, rho : ndarrays
            The (re)binned power indices.

    """
    ## boundaries
    if(binbounds is not None):
        binbounds = np.sort(binbounds)
    ## equal binning
    else:
        if(log is None):
            log = False
        if(log):
            k = np.r_[0,np.log(kindex[1:])]
        else:
            k = kindex
        dk = np.max(k[2:]-k[1:-1]) ## minimal dk
        if(nbin is None):
            nbin = int((k[-1]-0.5*(k[2]+k[1]))/dk-0.5) ## maximal nbin
        else:
            nbin = min(int(nbin),int((k[-1]-0.5*(k[2]+k[1]))/dk+2.5))
            dk = (k[-1]-0.5*(k[2]+k[1]))/(nbin-2.5)
        binbounds = np.r_[0.5*(3*k[1]-k[2]),0.5*(k[1]+k[2])+dk*np.arange(nbin-2)]
        if(log):
            binbounds = np.exp(binbounds)
    ## reordering
    reorder = np.searchsorted(binbounds,kindex)
    rho_ = np.zeros(len(binbounds)+1,dtype=rho.dtype)
    kindex_ = np.empty(len(binbounds)+1,dtype=kindex.dtype)
    for ii in range(len(reorder)):
        if(rho_[reorder[ii]]==0):
            kindex_[reorder[ii]] = kindex[ii]
            rho_[reorder[ii]] += rho[ii]
        else:
            kindex_[reorder[ii]] = (kindex_[reorder[ii]]*rho_[reorder[ii]]+kindex[ii]*rho[ii])/(rho_[reorder[ii]]+rho[ii])
            rho_[reorder[ii]] += rho[ii]

    return reorder[pindex],kindex_,rho_

Example 126

Project: faststats Source File: hist.py
Function: bayesian_blocks
def bayesian_blocks(t):
    """Bayesian Blocks Implementation

    By Jake Vanderplas.  License: BSD
    Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S

    Parameters
    ----------
    t : ndarray, length N
        data to be histogrammed

    Returns
    -------
    bins : ndarray
        array containing the (N+1) bin edges

    Notes
    -----
    This is an incomplete implementation: it may fail for some
    datasets.  Alternate fitness functions and prior forms can
    be found in the paper listed above.
    """
    # copy and sort the array
    t = np.sort(t)
    N = t.size

    # create length-(N + 1) array of cell edges
    edges = np.concatenate([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]])
    block_length = t[-1] - edges

    # arrays needed for the iteration
    nn_vec = np.ones(N)
    best = np.zeros(N, dtype=float)
    last = np.zeros(N, dtype=int)

    #-----------------------------------------------------------------
    # Start with first data cell; add one cell at each iteration
    #-----------------------------------------------------------------
    for K in range(N):
        # Compute the width and count of the final bin for all possible
        # locations of the K^th changepoint
        width = block_length[:K + 1] - block_length[K + 1]
        count_vec = np.cuemsum(nn_vec[:K + 1][::-1])[::-1]

        # evaluate fitness function for these possibilities
        fit_vec = count_vec * (np.log(count_vec) - np.log(width))
        fit_vec -= 4  # 4 comes from the prior on the number of changepoints
        fit_vec[1:] += best[:K]

        # find the max of the fitness: this is the K^th changepoint
        i_max = np.argmax(fit_vec)
        last[K] = i_max
        best[K] = fit_vec[i_max]

    #-----------------------------------------------------------------
    # Recover changepoints by iteratively peeling off the last block
    #-----------------------------------------------------------------
    change_points = np.zeros(N, dtype=int)
    i_cp = N
    ind = N
    while True:
        i_cp -= 1
        change_points[i_cp] = ind
        if ind == 0:
            break
        ind = last[ind - 1]
    change_points = change_points[i_cp:]

    return edges[change_points]

Example 127

Project: chumpy Source File: reordering.py
Function: reorder
    def reorder(self, a): return np.sort(a, self.axis, self.kind, self.order)
    def unique_reorder_id(self): return None

Example 128

Project: evoMPS Source File: tdvp_common.py
def herm_fac_with_inv(A, lower=False, zero_tol=1E-15, return_rank=False, 
                      calc_inv=True, force_evd=False, 
                      sanity_checks=False, sc_data=''):
    """Factorizes a Hermitian matrix using either Cholesky or eigenvalue decomposition.
    
    Decomposes a Hermitian A as A = X*X or, if lower == True, A = XX*.
    
    Tries Cholesky first by default, then falls back to EVD if the matrix is 
    not positive-definite. If Cholesky decomposition is used, X is upper (or lower)
    triangular. For the EVD decomposition, the inverse becomes a pseudo-inverse
    and all eigenvalues below the zero-tolerance are set to zero.
    
    Parameters
    ----------
    A : ndarray
        The Hermitian matrix to be factorized.
    lower : bool
        Refers to Cholesky factorization. If True, factorize as A = XX*, otherwise as A = X*X
    zero_tol : float
        Tolerance for detection of zeros in EVD case.
    return_rank : bool
        Whether to return the rank of A. The detected rank is affected by zero_tol.
    calc_inv : bool
        Whether to calculate (and return) the inverse of the factor.
    force_evd : bool
        Whether to force eigenvalue instead of Cholesky decomposition.
    sanity_checks : bool
        Whether to perform soem basic sanity checks.
    """    
    if not force_evd:
        try:
            x = la.cholesky(A, lower=lower)
            if calc_inv:
                xi = mm.invtr(x, lower=lower)
            else:
                xi = None
            
            nonzeros = A.shape[0]
        except sp.linalg.LinAlgError: #this usually means a is not pos. def.
            force_evd = True
            
    if force_evd:
        ev, EV = la.eigh(A, turbo=True) #wraps lapack routines, which return eigenvalues in ascending order
        
        if sanity_checks:
            assert np.all(ev == np.sort(ev)), "Sanity fail in herm_fac_with_inv(): Unexpected eigenvalue ordering"
            
            if ev.min() < -zero_tol:
                log.warning("Sanity fail in herm_fac_with_inv(): Discarding negative eigenvalues! %s %s",
                            ev.min(), sc_data)
        
        nonzeros = np.count_nonzero(ev > zero_tol) 

        ev_sq = sp.zeros_like(ev, dtype=A.dtype)
        ev_sq[-nonzeros:] = sp.sqrt(ev[-nonzeros:])
        ev_sq = mm.simple_diag_matrix(ev_sq, dtype=A.dtype)
        
        if calc_inv:
            #Replace almost-zero values with zero and perform a pseudo-inverse
            ev_sq_i = sp.zeros_like(ev, dtype=A.dtype)
            ev_sq_i[-nonzeros:] = 1. / ev_sq[-nonzeros:]
            
            ev_sq_i = mm.simple_diag_matrix(ev_sq_i, dtype=A.dtype)        
                   
        xi = None
        if lower:
            x = ev_sq.dot_left(EV)
            if calc_inv:
                xi = ev_sq_i.dot(EV.conj().T)
        else:
            x = ev_sq.dot(EV.conj().T)
            if calc_inv:
                xi = ev_sq_i.dot_left(EV)
            
    if sanity_checks:
        if not sp.allclose(A, A.conj().T, atol=1E-13, rtol=1E-13):
            log.warning("Sanity fail in herm_fac_with_inv(): A is not Hermitian! %s %s",
                        la.norm(A - A.conj().T), sc_data)
        
        eye = sp.zeros((A.shape[0]), dtype=A.dtype)
        eye[-nonzeros:] = 1
        eye = mm.simple_diag_matrix(eye)
        
        if lower:
            if calc_inv:
                if not sp.allclose(xi.dot(x), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad left inverse! %s %s",
                                la.norm(xi.dot(x) - eye), sc_data)
                                
                if not sp.allclose(xi.dot(A).dot(xi.conj().T), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad A inverse! %s %s",
                                la.norm(xi.conj().T.dot(A).dot(xi) - eye), sc_data)
    
            if not sp.allclose(x.dot(x.conj().T), A, atol=1E-13, rtol=1E-13):
                log.warning("Sanity fail in herm_fac_with_inv(): Bad decomp! %s %s",
                            la.norm(x.dot(x.conj().T) - A), sc_data)
        else:
            if calc_inv:
                if not sp.allclose(x.dot(xi), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad right inverse! %s %s",
                                la.norm(x.dot(xi) - eye), sc_data)
                if not sp.allclose(xi.conj().T.dot(A).dot(xi), eye, atol=1E-13, rtol=1E-13):
                    log.warning("Sanity fail in herm_fac_with_inv(): Bad A inverse! %s %s",
                                la.norm(xi.conj().T.dot(A).dot(xi) - eye), sc_data)

    
            if not sp.allclose(x.conj().T.dot(x), A, atol=1E-13, rtol=1E-13):
                log.warning("Sanity fail in herm_fac_with_inv(): Bad decomp! %s %s",
                            la.norm(x.conj().T.dot(x) - A), sc_data)
                    
    if calc_inv:
        if return_rank:
            return x, xi, nonzeros
        else:
            return x, xi
    else:
        if return_rank:
            return x, nonzeros
        else:
            return x

Example 129

Project: trimesh Source File: util.py
def submesh(mesh, 
            faces_sequence, 
            only_watertight = False, 
            append          = False):
    '''
    Return a subset of a mesh.

    Arguments
    ----------
    mesh: Trimesh object
    faces_sequence: sequence of face indices from mesh
    only_watertight: only return submeshes which are watertight. 
    append: return a single mesh which has the faces specified appended.
            if this flag is set, only_watertight is ignored

    Returns
    ---------
    if append: Trimesh object
    else:      list of Trimesh objects
    '''
    # evaluate generators so we can escape early
    faces_sequence = list(faces_sequence)

    if len(faces_sequence) == 0: return []

    # check to make sure we're not doing a whole bunch of work
    # to deliver a subset which ends up as the whole mesh
    if len(faces_sequence[0]) == len(mesh.faces):
        all_faces = np.array_equal(np.sort(faces_sequence),
                                   np.arange(len(faces_sequence)))
        if all_faces:
            log.debug('Subset of entire mesh requested, returning copy of original')
            return mesh.copy()

    # avoid nuking the cache on the original mesh
    original_faces    = mesh.faces.view(np.ndarray)
    original_vertices = mesh.vertices.view(np.ndarray)

    faces    = deque()
    vertices = deque()
    normals  = deque()
    visuals  = deque()

    # for reindexing faces
    mask = np.arange(len(original_vertices))    

    for faces_index in faces_sequence:        
        # sanitize indices in case they are coming in as a set or tuple
        faces_index   = np.array(list(faces_index))
        if len(faces_index) == 0:
            continue
        faces_current = original_faces[faces_index]
        unique = np.unique(faces_current.reshape(-1))
        
        # redefine face indices from zero
        mask[unique] = np.arange(len(unique))

        normals.append(mesh.face_normals[faces_index])
        faces.append(mask[faces_current])
        vertices.append(original_vertices[unique])
        visuals.extend(mesh.visual.subsets([faces_index]))
    # we use type(mesh) rather than importing Trimesh from base
    # as this causes a circular import
    trimesh_type = type_named(mesh, 'Trimesh')
    if append:
        visuals = np.array(visuals)
        vertices, faces = append_faces(vertices, faces)
        appended = trimesh_type(vertices = vertices,
                                faces = faces,
                                face_normals = np.vstack(normals),
                                visual = visuals[0].union(visuals[1:]),
                                process = False)
        return appended
    result = [trimesh_type(vertices     = v, 
                           faces        = f, 
                           face_normals = n,
                           visual       = c,
                           metadata     = mesh.metadata,
                           process = False) for v,f,n,c in zip(vertices, 
                                                               faces, 
                                                               normals,
                                                               visuals)]
    result = np.array(result)
    if len(result) > 0 and only_watertight:
        watertight = np.array([i.fill_holes() and len(i.faces) > 4 for i in result])
        result     = result[watertight]
    return result

Example 130

Project: Psignifit-3.x Source File: psignipriors.py
def default_width ( x, method="moments" ):
    """Default prior for the width of a psychometric function

    :Parameters:
        *x* :
            array of stimulus intensities used in the experiment
        *method* :
            method to determine the range spanned by the prior. This could either be
            'moments' which implies that the maximum width is mean+standard deviation of the
            gamma distribution and the minimum width is set to mean-standard deviation.
            Alternatively method could a sequence of quantiles which implies that the
            quantiles are matched with the maximum and minimum width.
    """
    xx = np.sort(x)
    wmin = np.min(np.diff(xx))
    wmax = 2*(xx[-1]-xx[0])
    if method=='moments':
        wr = wmin/wmax
        k  = ((1+wr)/(1-wr))**2
        th = wmax/(k+np.sqrt(k))
    elif isinstance ( method, (tuple,list) ):
        def error ( prm ):
            e = stats.gamma.cdf ( [wmin,wmax], prm[0], scale=prm[1] ) - np.array(method)
            e = np.sum(e**2)
            if np.isnan ( e ):
                return 10000.
            else:
                return e
        k,th = optimize.fmin ( error, [1.,4.] )

    return "Gamma(%g,%g)" % (k,th),wmin,wmax

Example 131

Project: horizont Source File: lda.py
    def score(self, X, R, random_state=None):
        """
        Calculate marginal probability of observations in X given Phi.

        Returns a list with estimates for each docuement separately, mimicking
        the behavior of scikit-learn. Uses Buntine's left-to-right sequential sampler.

        Parameters
        ----------
        X : array, [n_samples, n_features]
            The docuement-term matrix of docuements for evaluation.

        R : int
            The number of particles to use for the estimation.

        Returns
        -------
        logprobs : array of length n_samples
            Estimate of marginal log probability for each row of X.
        """
        if random_state is None:
            random_state = sklearn.utils.check_random_state(self.random_state)
            rands = self._rands
        else:
            random_state = sklearn.utils.check_random_state(random_state)
            # get rands in a known state
            rands = np.sort(self._rands)
            random_state.shuffle(rands)

        N, V = X.shape
        Phi, alpha = self.phi_, self.alpha
        np.testing.assert_equal(V, Phi.shape[1])

        # calculate marginal probability for each docuement separately
        logprobs = []
        multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
        if multiprocessing:
            with futures.ProcessPoolExecutor() as ex:
                futs = []
                for x in X:
                    # for consistency one needs to pass a copy of rands
                    # the executor takes time to pickle; it's not instantaneous
                    futs.append(ex.submit(horizont._lda._score_doc, x, Phi, alpha, R, rands.copy()))
                    random_state.shuffle(rands)
                for i, fut in enumerate(futs):
                    logprobs.append(fut.result())
        else:
            for x in X:
                logprobs.append(horizont._lda._score_doc(x, Phi, alpha, R, rands))
                random_state.shuffle(rands)
        return logprobs

Example 132

Project: evoMPS Source File: tdvp_common.py
def restore_LCF_r(A, r, Gi, sanity_checks=False):
    if Gi is None:
        x = r
    else:
        x = Gi.dot(r.dot(Gi.conj().T))

    M = eps_r_noop(x, A, A)
    ev, EV = la.eigh(M) #wraps lapack routines, which return eigenvalues in ascending order
    
    if sanity_checks:
        assert np.all(ev == np.sort(ev)), "unexpected eigenvalue ordering"
    
    rm1 = mm.simple_diag_matrix(ev, dtype=A.dtype)
    Gm1 = EV.conj().T

    if Gi is None:
        Gi = EV #for left uniform case
        r = rm1 #for sanity check

    for s in xrange(A.shape[0]):
        A[s] = Gm1.dot(A[s].dot(Gi))

    if sanity_checks:
        rm1_ = eps_r_noop(r, A, A)
        if not sp.allclose(rm1_, rm1, atol=1E-12, rtol=1E-12):
            log.warning("Sanity Fail in restore_LCF_r!: r is bad!")
            log.warning(la.norm(rm1_ - rm1))

    Gm1_i = EV

    return rm1, Gm1, Gm1_i

Example 133

Project: RHEAS Source File: merra.py
def _downloadVariable(varname, dbname, dt, bbox):
    """Download specific variable from the MERRA Reanalysis dataset."""
    # FIXME: Grid is not rectangular, but 0.5 x 0.625 degrees
    log = logging.getLogger(__name__)
    res = 0.5
    try:
        url = "http://goldsmr4.sci.gsfc.nasa.gov:80/dods/M2T1NXSLV"
        ds = netcdf.Dataset(url)
        lat = ds.variables["lat"][:]
        lon = ds.variables["lon"][:]
        lon[lon > 180] -= 360.0
        i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox)
        data = np.zeros((i2-i1, j2-j1))
        lati = np.argsort(lat)[::-1][i1:i2]
        loni = np.argsort(lon)[j1:j2]
        t = ds.variables["time"]
        tt = netcdf.num2date(t[:], units=t.units)
        ti = np.where(tt == dt)[0][0]
        if varname == "tmax":
            hdata = ds.variables["t2m"][ti:ti+24, lati, loni]
            data = np.amax(hdata, axis=0) - 273.15
        elif varname == "tmin":
            hdata = ds.variables["t2m"][ti:ti+24, lati, loni]
            data = np.amin(hdata, axis=0) - 273.15
        elif varname in ["wind"]:
            hdata = np.sqrt(ds.variables["u10m"][ti:ti+24, lati, loni]**2 + ds.variables["v10m"][ti:ti+24, lati, loni]**2)
            data = np.mean(hdata, axis=0)
        lat = np.sort(lat)[::-1][i1:i2]
        lon = np.sort(lon)[j1:j2]
        filename = dbio.writeGeotif(lat, lon, res, data)
        table = "{0}.merra".format(varname)
        dbio.ingest(dbname, filename, dt, table)
        log.info("Imported {0} in {1}".format(tt[ti].strftime("%Y-%m-%d"), table))
        os.remove(filename)
    except:
        log.warning("Cannot import MERRA dataset for {0}!".format(dt.strftime("%Y-%m-%d")))

Example 134

Project: chaco Source File: cmap_scatter.py
def _create_plot_component():

    # Create some data
    numpts = 1000
    x = sort(random(numpts))
    y = random(numpts)
    color = exp(-(x**2 + y**2))

    # Create a plot data obect and give it this data
    pd = ArrayPlotData()
    pd.set_data("index", x)
    pd.set_data("value", y)
    pd.set_data("color", color)

    # Create the plot
    plot = Plot(pd)
    plot.plot(("index", "value", "color"),
              type="cmap_scatter",
              name="my_plot",
              color_mapper=jet,
              marker = "square",
              fill_alpha = 0.5,
              marker_size = 6,
              outline_color = "black",
              border_visible = True,
              bgcolor = "white")

    # Tweak some of the plot properties
    plot.title = "Colormapped Scatter Plot with Range-selectable Data Points"
    plot.padding = 50
    plot.x_grid.visible = False
    plot.y_grid.visible = False
    plot.x_axis.font = "modern 16"
    plot.y_axis.font = "modern 16"

    # Right now, some of the tools are a little invasive, and we need the
    # actual ColomappedScatterPlot object to give to them
    cmap_renderer = plot.plots["my_plot"][0]

    # Attach some tools to the plot
    plot.tools.append(PanTool(plot, constrain_key="shift"))
    zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
    plot.overlays.append(zoom)
    selection = ColormappedSelectionOverlay(cmap_renderer, fade_alpha=0.35,
                                            selection_type="mask")
    cmap_renderer.overlays.append(selection)

    # Create the colorbar, handing in the appropriate range and colormap
    colorbar = create_colorbar(plot.color_mapper)
    colorbar.plot = cmap_renderer
    colorbar.padding_top = plot.padding_top
    colorbar.padding_bottom = plot.padding_bottom

    # Create a container to position the plot and the colorbar side-by-side
    container = HPlotContainer(use_backbuffer = True)
    container.add(plot)
    container.add(colorbar)
    container.bgcolor = "lightgray"
    return container

Example 135

Project: spinalcordtoolbox Source File: msct_smooth.py
def non_parametric(x,y,f = 0.25,iter = 3):
    """lowess(x, y, f=2./3., iter=3) -> yest

    Lowess smoother: Robust locally weighted regression.
    The lowess function fits a nonparametric regression curve to a scatterplot.
    The arrays x and y contain an equal number of elements; each pair
    (x[i], y[i]) defines a data point in the scatterplot. The function returns
    the estimated (smooth) values of y.

    The smoothing span is given by f. A larger value for f will result in a
    smoother curve. The number of robustifying iterations is given by iter. The
    function will run faster with a smaller number of iterations.
    https://gist.github.com/agramfort/850437 """
    from math import ceil
    from scipy import linalg
    from numpy import sort, abs, zeros, ones, array, sum, median, clip
    
    n = len(x)
    r = int(ceil(f*n))
    h = [sort(abs(x - x[i]))[r] for i in range(n)]
    w = clip(abs((x[:,None] - x[None,:]) / h), 0.0, 1.0)
    w = (1 - w**3)**3
    yest = zeros(n)
    delta = ones(n)
    for iteration in range(iter):
        for i in range(n):
            weights = delta * w[:,i]
            b = array([sum(weights*y), sum(weights*y*x)])
            A = array([[sum(weights), sum(weights*x)],
                   [sum(weights*x), sum(weights*x*x)]])
            beta = linalg.solve(A, b)
            yest[i] = beta[0] + beta[1]*x[i]

        residuals = y - yest
        s = median(abs(residuals))
        delta = clip(residuals / (6.0 * s), -1, 1)
        delta = (1 - delta**2)**2

    return yest

Example 136

Project: neupy Source File: art.py
    def train(self, input_data):
        input_data = format_data(input_data)

        if input_data.ndim != 2:
            raise ValueError("Input value must be 2 dimensional, got "
                             "{0}".format(input_data.ndim))

        data_size = input_data.shape[1]
        n_clusters = self.n_clusters
        step = self.step
        rho = self.rho

        if list(sort(unique(input_data))) != [0, 1]:
            raise ValueError("ART1 Network works only with binary matrix, "
                             "all matix must contains only 0 and 1")

        if not hasattr(self, 'weight_21'):
            self.weight_21 = ones((data_size, n_clusters))

        if not hasattr(self, 'weight_12'):
            self.weight_12 = step / (step + n_clusters - 1) * self.weight_21.T

        weight_21 = self.weight_21
        weight_12 = self.weight_12

        if data_size != weight_21.shape[0]:
            raise ValueError(
                "Data dimension is invalid. Get {} columns data set. "
                "Must be - {} columns".format(
                    data_size, weight_21.shape[0]
                )
            )

        classes = zeros(input_data.shape[0])

        # Train network
        for i, p in enumerate(input_data):
            disabled_neurons = []
            reseted_values = []
            reset = True

            while reset:
                output1 = p
                input2 = dot(weight_12, output1.T)

                output2 = zeros(input2.size)
                input2[disabled_neurons] = -inf
                winner_index = input2.argmax()
                output2[winner_index] = 1

                expectation = dot(weight_21, output2)
                output1 = logical_and(p, expectation).astype(int)

                reset_value = dot(output1.T, output1) / dot(p.T, p)
                reset = reset_value < rho

                if reset:
                    disabled_neurons.append(winner_index)
                    reseted_values.append((reset_value, winner_index))

                if len(disabled_neurons) >= n_clusters:
                    # Got this case only if we test all possible clusters
                    reset = False
                    winner_index = None

                if not reset:
                    if winner_index is not None:
                        weight_12[winner_index, :] = (step * output1) / (
                            step + dot(output1.T, output1) - 1
                        )
                        weight_21[:, winner_index] = output1
                    else:
                        # Get result with the best `rho`
                        winner_index = max(reseted_values)[1]

                    classes[i] = winner_index

        return classes

Example 137

Project: brainx Source File: test_modularity.py
def test_apply_module_split():
    """Test the GraphPartition operation that splits modules so that it returns
    a change in modularity that reflects the difference between the modularity
    of the new and old parititions.
    Also test that the module that was split now contains the correct nodes,
    the correct modularity update, the correct energy,and that no empty modules
    result from it."""

    # nnod_mod, av_degrees, nmods
    networks = [ [3, [2], [2, 3, 4]],
                 [4, [2, 3], [2, 4, 6]],
                 [8, [4, 6], [4, 6, 8]] ]

    for nnod_mod, av_degrees, nmods in networks:
        for nmod in nmods:
            nnod = nnod_mod*nmod
            for av_degree in av_degrees:

                g = mod.random_modular_graph(nnod, nmod, av_degree)

                # Make a "correct" partition for the graph
                ## part = mod.perfect_partition(nmod,nnod/nmod)

                # Make a random partition for the graph
                part_rand = mod.rand_partition(g, nnod//2)

                #List of modules in the partition that have two or more nodes
                r_mod = []
                for m, nodes in part_rand.items():
                    if len(nodes)>2:
                        r_mod.append(m)

                # Note: The above can be written in this more compact, if
                # slightly less intuitively clear, list comprehension:
                # r_mod = [ m for m, nodes in part_rand.items() if
                #          len(nodes)>2 ]

                #Module that we are splitting
                for m in r_mod:

                    graph_partition = mod.GraphPartition(g,part_rand)

                    #index of nodes within the original module (before split)
                    n_init = list(graph_partition.index[m])

                    #calculate modularity before splitting
                    mod_init = graph_partition.modularity()

                    # assign nodes to two groups
                    n1_orig,n2_orig = graph_partition.determine_node_split(m)

                    # make sure neither of these is empty
                    nt.assert_true(len(n1_orig)>= 1)
                    nt.assert_true(len(n2_orig)>= 1)

                    #make sure that there are no common nodes between the two
                    node_intersection = set.intersection(n1_orig,n2_orig)
                    nt.assert_equal(node_intersection,set([]))

                    #make sure that sum of the two node sets equals the
                    #original set
                    node_union = set.union(n1_orig,n2_orig)
                    npt.assert_equal(np.sort(list(node_union)),np.sort(n_init))

                    # split modules
                    split_modules,e1,a1,delta_energy_meas,type,m,n1,n2 = \
                               graph_partition.compute_module_split(m,n1_orig,n2_orig)

                    #note: n1 and n2 are output from this function (as well as
                    #inputs) because the function is called from within another
                    #(rand_mod) def but then output to the simulated script, so
                    #the node split needs to be passed along.

                    #as a simple confirmation, can make sure they match
                    npt.assert_equal(n1_orig,n1)
                    npt.assert_equal(n2_orig,n2)

                    #split_moduels should be a dictionary with two modules
                    #(0,1) that contain the node sets n1 and n2 respectively.
                    #test this.
                    npt.assert_equal(split_modules[0],n1)
                    npt.assert_equal(split_modules[1],n2)

                    #make a new graph partition equal to the old one and apply
                    #the module split to it (graph_part2)
                    graph_part2 = copy.deepcopy(graph_partition)
                    graph_part2.apply_module_split(m,n1,n2,split_modules,e1,a1)

                    #make a third graph partition using only the original graph
                    #and the partition from graph_part2
                    graph_part3 = mod.GraphPartition(g,graph_part2.index)

                    #index of nodes within the modules after splitting
                    n1_new = list(graph_part2.index[m])
                    n2_new = list(graph_part2.index[len(graph_part2)-1])
                    n_all = n1_new + n2_new

                    # recalculate modularity after splitting
                    mod_new = graph_part2.modularity()
                    mod_new_3 = graph_part3.modularity()

                    # difference between new and old modularity
                    delta_energy_true = -(mod_new - mod_init)

                    # Test that the measured change in energy by splitting a
                    # module is equal to the function output from module_split
                    npt.assert_almost_equal(delta_energy_meas,
                                                  delta_energy_true)

                    # Test that the nodes in the split modules are equal to the
                    # original nodes of the module
                    nt.assert_equal(sorted(list(n1)), sorted(n1_new))
                    nt.assert_equal(sorted(list(n2)), sorted(n2_new))

                    n_init.sort()
                    n_all.sort()
                    # Test that the initial list of nodes in the module are
                    # equal to the nodes in m1 and m2 (split modules)
                    npt.assert_equal(n_init,n_all)

                    # Test that the computed modularity found when
                    # apply_module_split is used is equal to the modularity you
                    # would find if using that partition and that graph
                    npt.assert_almost_equal(mod_new,mod_new_3)

                    # Check that there are no empty modules in the final
                    # partition
                    for m in graph_part2.index:
                        nt.assert_true(len(graph_part2.index[m]) > 0)

Example 138

Project: Dcluster Source File: rhodelta.py
def rhodelta(dist, xxdist, ND, N, percent = 2.0):
    '''
    Input file format: 3 columns ,seperated by ' '
    Return (rho,delta,ordrho)
    '''
    print('Caculating rho and delta...')
    print('average percentage of neighbours (hard coded): %5.6f'%(percent) )

    position = round(N*percent/100)
    sda=np.sort(xxdist)
    dc=sda[position]
    print('Computing Rho with gaussian kernel of radius: %12.6f\n'%(dc))

    rho = np.zeros(ND)
    # Gaussian kernel
    for i in range(ND-1):
        for j in range((i+1),ND):
            rho[i] = rho[i] + np.exp(-(dist[i,j]/dc)*(dist[i,j]/dc))
            rho[j] = rho[j] + np.exp(-(dist[i,j]/dc)*(dist[i,j]/dc))

    maxd = dist.max()
    ordrho = (-rho).argsort()
    delta = np.zeros(ND)
    nneigh = np.zeros(ND)
    delta[ordrho[0]] = -1
    nneigh[ordrho[0]] = 0

    for ii in range(1,ND):
        delta[ordrho[ii]] = maxd
        for jj in range(ii):
            if dist[ordrho[ii],ordrho[jj]]<delta[ordrho[ii]]:
                delta[ordrho[ii]] = dist[ordrho[ii],ordrho[jj]]
                nneigh[ordrho[ii]] = ordrho[jj]

    delta[ordrho[0]] = delta.max()
    print('Generated file:DECISION GRAPH')
    print('column 1:Density')
    print('column 2:Delta\n')
    dg = np.array([rho,delta]).T
    np.savetxt('DECISION_GRAPH.txt',dg,fmt='%.4f')
    return((rho,delta,ordrho,dc,nneigh))

Example 139

Project: nipy Source File: scripting.py
def space_time_realign(input, tr, slice_order='descending', slice_dim=2,
                       slice_dir=1, apply=True, make_figure=False,
                       out_name=None):
    """
    This is a scripting interface to `nipy.algorithms.registration.SpaceTimeRealign` 

    Parameters
    ----------
    input : str or list
        A full path to a file-name (4D nifti time-series) , or to a directory
        containing 4D nifti time-series, or a list of full-paths to files.
    tr : float
        The repetition time
    slice_order : str (optional)
        This is the order of slice-times in the acquisition. This is used as a
        key into the ``SLICETIME_FUNCTIONS`` dictionary from
        :mod:`nipy.algorithms.slicetiming.timefuncs`. Default: 'descending'.
    slice_dim : int (optional) 
        Denotes the axis in `images` that is the slice axis.  In a 4D image,
        this will often be axis = 2 (default).
    slice_dir : int (optional)
        1 if the slices were acquired slice 0 first (default), slice -1 last,
        or -1 if acquire slice -1 first, slice 0 last.
    apply : bool (optional)
        Whether to apply the transformation and produce an output. Default:
        True. 
    make_figure : bool (optional)
        Whether to generate a .png figure with the parameters across scans.
    out_name : bool (optional)
        Specify an output location (full path) for the files that are
        generated. Default: generate files in the path of the inputs (with an
        `_mc` suffix added to the file-names.

    Returns
    -------
    transforms : ndarray
        An (n_times_points,) shaped array containing 
       `nipy.algorithms.registration.affine.Rigid` class instances for each time
        point in the time-series. These can be used as affine transforms by
        referring to their `.as_affine` attribute.
    """
    if make_figure:
        if not HAVE_MPL:
            e_s ="You need to have matplotlib installed to run this function"
            e_s += " with `make_figure` set to `True`"
            raise RuntimeError(e_s)

    # If we got only a single file, we motion correct that one:
    if op.isfile(input):
        if not (input.endswith('.nii') or input.endswith('.nii.gz')):
            e_s = "Input needs to be a nifti file ('.nii' or '.nii.gz'"
            raise ValueError(e_s)
        fnames = [input]
        input = nib.load(input)
    # If this is a full-path to a directory containing files, it's still a
    # string: 
    elif isinstance(input, str):
        list_of_files = os.listdir(input)
        fnames = [op.join(input, f) for f in np.sort(list_of_files)
                  if (f.endswith('.nii') or f.endswith('.nii.gz')) ]
        input = [nib.load(x) for x in fnames]
    # Assume that it's a list of full-paths to files:
    else:
       input = [nib.load(x) for x in input]

    slice_times = timefuncs[slice_order]
    slice_info = [slice_dim,
                  slice_dir]

    reggy = SpaceTimeRealign(input,
                             tr,
                             slice_times,
                             slice_info)

    reggy.estimate(align_runs=True)

    # We now have the transformation parameters in here:
    transforms = np.squeeze(np.array(reggy._transforms))
    rot = np.array([t.rotation for t in transforms])
    trans = np.array([t.translation for t in transforms])

    if apply:
        new_reggy = reggy.resample(align_runs=True)
        for run_idx, new_im in enumerate(new_reggy):
            # Fix output TR - it was probably lost in the image realign step
            assert new_im.affine.shape == (5, 5)
            new_im.affine[:] = new_im.affine.dot(np.diag([1, 1, 1, tr, 1]))
            # Save it out to a '.nii.gz' file:
            froot, ext, trail_ext = splitext_addext(fnames[run_idx])
            path, fname = op.split(froot)
            # We retain the file-name adding '_mc' regardless of where it's
            # saved
            new_path = path if out_name is None else out_name
            save_image(new_im, op.join(new_path, fname + '_mc.nii.gz'))

    if make_figure:
        # Delay MPL plotting import to latest moment to avoid errors trying
        # import the default MPL backend (such as tkinter, which may not be
        # installed). See: https://github.com/nipy/nipy/issues/414
        import matplotlib.pyplot as plt
        figure, ax = plt.subplots(2)
        figure.set_size_inches([8, 6])
        ax[0].plot(rot)
        ax[0].set_xlabel('Time (TR)')
        ax[0].set_ylabel('Translation (mm)')
        ax[1].plot(trans)
        ax[1].set_xlabel('Time (TR)')
        ax[1].set_ylabel('Rotation (radians)')
        figure.savefig(op.join(os.path.split(fnames[0])[0],
                                    'mc_params.png'))

    return transforms

Example 140

Project: krypy Source File: test_utils.py
def run_angles(F, G, ip_B, compute_vectors):
    if compute_vectors:
        theta, U, V = krypy.utils.angles(F, G, ip_B=ip_B,
                                         compute_vectors=compute_vectors)
    else:
        theta = krypy.utils.angles(F, G, ip_B=ip_B,
                                   compute_vectors=compute_vectors)

    # check shape of theta
    assert(theta.shape == (max(F.shape[1], G.shape[1]),))
    # check if theta is sorted
    assert(((theta - numpy.sort(theta)) == 0).all())
    # check that 0 <= theta <= pi/2
    assert((theta >= 0).all())
    assert((theta <= numpy.pi/2).all())
    # check pi/2 angles if dimensions differ
    n = abs(F.shape[1] - G.shape[1])
    if n > 0:
        # == 0 is safe here
        assert((numpy.abs(theta[-n:] - numpy.pi/2) == 0).all())
    # check 0 angles if F==G
    if F is G:
        assert(numpy.linalg.norm(theta) <= 1e-15)

    if compute_vectors:
        # check shapes of U and V
        assert(U.shape == F.shape)
        assert(V.shape == G.shape)
        # check that inner_product(U,V) = diag(cos(theta))
        UV = krypy.utils.inner(U, V, ip_B=ip_B)
        assert(numpy.linalg.norm(UV
               - numpy.diag(numpy.cos(theta))[:F.shape[1], :G.shape[1]])
               <= 1e-14)

Example 141

Project: numba Source File: test_sort.py
def np_sort_usecase(val):
    return np.sort(val)

Example 142

Project: corner.py Source File: corner.py
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
           ax=None, color=None, plot_datapoints=True, plot_density=True,
           plot_contours=True, no_fill_contours=False, fill_contours=False,
           contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
           **kwargs):
    """
    Plot a 2-D histogram of samples.

    Parameters
    ----------
    x : array_like[nsamples,]
       The samples.

    y : array_like[nsamples,]
       The samples.

    levels : array_like
        The contour levels to draw.

    ax : matplotlib.Axes
        A axes instance on which to add the 2-D histogram.

    plot_datapoints : bool
        Draw the individual data points.

    plot_density : bool
        Draw the density colormap.

    plot_contours : bool
        Draw the contours.

    no_fill_contours : bool
        Add no filling at all to the contours (unlike setting
        ``fill_contours=False``, which still adds a white fill at the densest
        points).

    fill_contours : bool
        Fill the contours.

    contour_kwargs : dict
        Any additional keyword arguments to pass to the `contour` method.

    contourf_kwargs : dict
        Any additional keyword arguments to pass to the `contourf` method.

    data_kwargs : dict
        Any additional keyword arguments to pass to the `plot` method when
        adding the individual data points.

    """
    if ax is None:
        ax = pl.gca()

    # Set the default range based on the data range if not provided.
    if range is None:
        if "extent" in kwargs:
            logging.warn("Deprecated keyword argument 'extent'. "
                         "Use 'range' instead.")
            range = kwargs["extent"]
        else:
            range = [[x.min(), x.max()], [y.min(), y.max()]]

    # Set up the default plotting arguments.
    if color is None:
        color = "k"

    # Choose the default "sigma" contour levels.
    if levels is None:
        levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)

    # This is the color map for the density plot, over-plotted to indicate the
    # density of the points near the center.
    density_cmap = LinearSegmentedColormap.from_list(
        "density_cmap", [color, (1, 1, 1, 0)])

    # This color map is used to hide the points at the high density areas.
    white_cmap = LinearSegmentedColormap.from_list(
        "white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)

    # This "color map" is the list of colors for the contour levels if the
    # contours are filled.
    rgba_color = colorConverter.to_rgba(color)
    contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
    for i, l in enumerate(levels):
        contour_cmap[i][-1] *= float(i) / (len(levels)+1)

    # We'll make the 2D histogram to directly estimate the density.
    try:
        H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
                                 range=list(map(np.sort, range)),
                                 weights=weights)
    except ValueError:
        raise ValueError("It looks like at least one of your sample columns "
                         "have no dynamic range. You could try using the "
                         "'range' argument.")

    if smooth is not None:
        if gaussian_filter is None:
            raise ImportError("Please install scipy for smoothing")
        H = gaussian_filter(H, smooth)

    # Compute the density levels.
    Hflat = H.flatten()
    inds = np.argsort(Hflat)[::-1]
    Hflat = Hflat[inds]
    sm = np.cuemsum(Hflat)
    sm /= sm[-1]
    V = np.empty(len(levels))
    for i, v0 in enumerate(levels):
        try:
            V[i] = Hflat[sm <= v0][-1]
        except:
            V[i] = Hflat[0]
    V.sort()
    m = np.diff(V) == 0
    if np.any(m):
        logging.warning("Too few points to create valid contours")
    while np.any(m):
        V[np.where(m)[0][0]] *= 1.0 - 1e-4
        m = np.diff(V) == 0
    V.sort()

    # Compute the bin centers.
    X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])

    # Extend the array for the sake of the contours at the plot edges.
    H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
    H2[2:-2, 2:-2] = H
    H2[2:-2, 1] = H[:, 0]
    H2[2:-2, -2] = H[:, -1]
    H2[1, 2:-2] = H[0]
    H2[-2, 2:-2] = H[-1]
    H2[1, 1] = H[0, 0]
    H2[1, -2] = H[0, -1]
    H2[-2, 1] = H[-1, 0]
    H2[-2, -2] = H[-1, -1]
    X2 = np.concatenate([
        X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
        X1,
        X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
    ])
    Y2 = np.concatenate([
        Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
        Y1,
        Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
    ])

    if plot_datapoints:
        if data_kwargs is None:
            data_kwargs = dict()
        data_kwargs["color"] = data_kwargs.get("color", color)
        data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
        data_kwargs["mec"] = data_kwargs.get("mec", "none")
        data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
        ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)

    # Plot the base fill to hide the densest data points.
    if (plot_contours or plot_density) and not no_fill_contours:
        ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
                    cmap=white_cmap, antialiased=False)

    if plot_contours and fill_contours:
        if contourf_kwargs is None:
            contourf_kwargs = dict()
        contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
        contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
                                                             False)
        ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
                    **contourf_kwargs)

    # Plot the density map. This can't be plotted at the same time as the
    # contour fills.
    elif plot_density:
        ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)

    # Plot the contour edge colors.
    if plot_contours:
        if contour_kwargs is None:
            contour_kwargs = dict()
        contour_kwargs["colors"] = contour_kwargs.get("colors", color)
        ax.contour(X2, Y2, H2.T, V, **contour_kwargs)

    ax.set_xlim(range[0])
    ax.set_ylim(range[1])

Example 143

Project: nupic.fluent Source File: language_encoder.py
  def encodeRandomly(self, text):
    """Return a random bitmap representation of the sample."""
    random.seed(sample)
    return numpy.sort(random.sample(xrange(self.n), self.w))

Example 144

Project: plfit Source File: plfit.py
Function: plot_pdf
    def plotpdf(self, x=None, xmin=None, alpha=None, nbins=50, dolog=True,
                dnds=False, drawstyle='steps-post', histcolor='k', plcolor='r',
                **kwargs):
        """
        Plots PDF and powerlaw.

        kwargs is passed to pylab.hist and pylab.plot
        """
        if not(x): x=self.data
        if not(xmin): xmin=self._xmin
        if not(alpha): alpha=self._alpha

        x=np.sort(x)
        n=len(x)

        pylab.gca().set_xscale('log')
        pylab.gca().set_yscale('log')

        if dnds:
            hb = pylab.histogram(x,bins=np.logspace(log10(min(x)),log10(max(x)),nbins))
            h = hb[0]
            b = hb[1]
            db = hb[1][1:]-hb[1][:-1]
            h = h/db
            pylab.plot(b[:-1],h,drawstyle=drawstyle,color=histcolor,**kwargs)
            #alpha -= 1
        elif dolog:
            hb = pylab.hist(x, bins=np.logspace(log10(min(x)), log10(max(x)),
                                                nbins), log=True, fill=False,
                            edgecolor=histcolor, **kwargs)
            alpha -= 1
            h,b=hb[0],hb[1]
        else:
            hb = pylab.hist(x,bins=np.linspace((min(x)),(max(x)),nbins),fill=False,edgecolor=histcolor,**kwargs)
            h,b=hb[0],hb[1]
        # plotting points are at the center of each bin
        b = (b[1:]+b[:-1])/2.0

        q = x[x>=xmin]
        px = (alpha-1)/xmin * (q/xmin)**(-alpha)

        # Normalize by the median ratio between the histogram and the power-law
        # The normalization is semi-arbitrary; an average is probably just as valid
        plotloc = (b>xmin)*(h>0)
        norm = np.median( h[plotloc] / ((alpha-1)/xmin * (b[plotloc]/xmin)**(-alpha))  )
        px = px*norm

        plotx = pylab.linspace(q.min(),q.max(),1000)
        ploty = (alpha-1)/xmin * (plotx/xmin)**(-alpha) * norm

        #pylab.loglog(q,px,'r',**kwargs)
        pylab.plot(plotx,ploty,color=plcolor,**kwargs)

        axlims = pylab.axis()
        pylab.vlines(xmin,axlims[2],max(px),colors=plcolor,linestyle='dashed')

        if dolog and min(x) <= 0:
            lolim = 0.1
        else:
            lolim = min(x)
        pylab.gca().set_xlim(lolim, max(x))

Example 145

Project: ilastik-0.5 Source File: activeLearning.py
def computeEnsembleMargin(pmap):
    pmap_sort = numpy.sort(pmap, axis=len(pmap.shape)-1)
    res = pmap_sort[:,:,:,:,-1] - pmap_sort[:,:,:,:,-2]
    return 1-res

Example 146

Project: OpenModes Source File: gmsh.py
def read_mesh(filename, returned_elements=("edges", "triangles")):
    """Read a gmsh binary mesh file

    Parameters
    ----------
    filename : string
        the full name of the gmesh meshed file
    returned_elements : tuple, optional
        A tuple of string saying which types of elements are desired

    Returns
    -------
    raw_mesh : dict
        Containing the following
        nodes : ndarray
            all nodes referred to by this geometric entity
        triangles : ndarray
            the indices of nodes belonging to each triangle
        edges : ndarray

    Node references are set to be zero based indexing as per python standard

    Currently assumes gmsh binary format 2.2, little endian
    as defined in http://www.geuz.org/gmsh/doc/texinfo/#MSH-binary-file-format

    """

    wanted_element_types = set(ELEMENT_NAME_MAPPING[n] for n in
                               returned_elements)

    physical_names = None  # may not exist in file

    with open(filename, "rb") as file_handle:
        header = "Nothing"
        while True:
            header = file_handle.readline().decode('ascii').strip()
            if header == "":
                # should be at the end of the file
                break
            elif header == "$MeshFormat":
                check_format(file_handle)
            elif header == "$Nodes":
                nodes = read_nodes(file_handle)
                if len(nodes) == 0:
                    raise MeshError("No nodes in mesh")
            elif header == "$Elements":
                object_nodes, object_elements = read_elements(file_handle,
                                                          wanted_element_types)
            elif header == "$PhysicalNames":
                physical_names = read_physical_names(file_handle)
            else:
                raise MeshError("Unkown header type " + header)

            end_header = "$End"+header[1:]
            if file_handle.readline().decode('ascii').strip() != end_header:
                raise MeshError("Header %s with no matching %s" % (header, end_header))

    return_vals = []

    # Go through each entity, and work out which nodes belong to it. Nodes are
    # renumbered, so elements are updated to reflect new numbering
    for obj_nums, obj_nodes, obj_elements in zip(object_nodes.keys(),
            object_nodes.values(), object_elements.values()):

        # renumber the nodes
        orig_nodes = np.sort(list(obj_nodes))
        new_nodes = np.zeros(len(nodes), np.int)
        for node_count, node in enumerate(orig_nodes):
            new_nodes[node] = node_count

        this_part = {'nodes': nodes[orig_nodes]}

        # let the elements know about the renumbered nodes
        for elem_name in returned_elements:
            returned_type = ELEMENT_NAME_MAPPING[elem_name]
            if len(obj_elements[returned_type]) > 0:
                this_part[elem_name] = new_nodes[
                                        np.array(obj_elements[returned_type])]

        # add the physical name if it exists
        try:
            this_part["physical_name"] = physical_names[obj_nums]
        except (TypeError, KeyError):
            pass

        return_vals.append(this_part)

    return tuple(return_vals)

Example 147

Project: chaco Source File: color_mapper.py
    def _make_mapping_array(self, n, data):
        """Creates an N-element 1-D lookup table

        The *data* parameter is a list of x,y0,y1 mapping correspondences (which
        can be lists or tuples), where all the items are values between 0 and 1,
        inclusive. The items in the mapping are:

        * x: a value being mapped
        * y0: the value of y for values of x less than or equal to the given x value.
        * y1: the value of y for values of x greater than the given x value.

        The two values of y allow for discontinuous mapping functions (for
        example, as might be found in a sawtooth function)

        The list must start with x=0, end with x=1, and
        all values of x must be in increasing order. Values between
        the given mapping points are determined by simple linear interpolation.

        The function returns an array "result" where result[x*(N-1)]
        gives the closest value for values of x between 0 and 1.
        """

        try:
            adata = array(data)
        except:
            raise TypeError("data must be convertable to an array")
        shape = adata.shape
        if len(shape) != 2 and shape[1] != 3:
            raise ValueError("data must be nx3 format")

        x  = adata[:,0]
        y0 = adata[:,1]
        y1 = adata[:,2]

        if x[0] != 0. or x[-1] != 1.0:
            raise ValueError(
                "data mapping points must start with x=0. and end with x=1")
        if sometrue(sort(x)-x):
            raise ValueError(
                "data mapping points must have x in increasing order")
        # begin generation of lookup table
        x = x * (n-1)
        lut = zeros((n,), float32)
        xind = arange(float32(n), dtype=float32)
        ind = searchsorted(x, xind)[1:-1]

        lut[1:-1] = ( divide(xind[1:-1] - take(x,ind-1),
                             take(x,ind)-take(x,ind-1) )
                      *(take(y0,ind)-take(y1,ind-1)) + take(y1,ind-1))
        lut[0] = y1[0]
        lut[-1] = y0[-1]

        # ensure that the lut is confined to values between 0 and 1 by clipping it
        lut = lut.clip(0, 1)
        return lut

Example 148

Project: chaco Source File: cmap_scatter.py
def create_plot():

    # Create some data
    numpts = 200
    x = sort(random(numpts))
    y = random(numpts)
    color = exp(-(x**2 + y**2))

    # Create a plot data obect and give it this data
    pd = ArrayPlotData()
    pd.set_data("index", x)
    pd.set_data("value", y)
    pd.set_data("color", color)

    # Create the plot
    plot = Plot(pd)
    plot.plot(("index", "value", "color"),
              type="cmap_scatter",
              name="my_plot",
              color_mapper=jet,
              marker = "square",
              fill_alpha = 0.5,
              marker_size = 6,
              outline_color = "black",
              border_visible = True,
              bgcolor = "white")

    # Tweak some of the plot properties
    plot.title = "Colormapped Scatter Plot"
    plot.padding = 50
    plot.x_grid.visible = False
    plot.y_grid.visible = False
    plot.x_axis.font = "modern 16"
    plot.y_axis.font = "modern 16"

    # Set colors
    #plot.title_color = "white"
    #for axis in plot.x_axis, plot.y_axis:
    #    axis.set(title_color="white", tick_label_color="white")

    # Right now, some of the tools are a little invasive, and we need the
    # actual ColomappedScatterPlot object to give to them
    cmap_renderer = plot.plots["my_plot"][0]

    # Attach some tools to the plot
    plot.tools.append(PanTool(plot, constrain_key="shift"))
    zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
    plot.overlays.append(zoom)
    selection = ColormappedSelectionOverlay(cmap_renderer, fade_alpha=0.35,
                                            selection_type="mask")
    cmap_renderer.overlays.append(selection)
    plot.tools.append(MoveTool(plot, drag_button="right"))
    return plot

Example 149

Project: nifty Source File: powerspectrum.py
def nklength(kdict):
    return np.sort(list(set(kdict.flatten())))

Example 150

Project: hagfish Source File: hagfishUtils.py
Function: quant
def quant(x, w):
    xs = np.sort(x)
    return xs[int(len(xs) * w)]
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4