numpy.logical_or

Here are the examples of the python api numpy.logical_or taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

43 Examples 7

Example 1

Project: scikit-learn Source File: multiclass.py
Function: fit_ovo_binary
def _fit_ovo_binary(estimator, X, y, i, j):
    """Fit a single binary estimator (one-vs-one)."""
    cond = np.logical_or(y == i, y == j)
    y = y[cond]
    y_binary = np.empty(y.shape, np.int)
    y_binary[y == i] = 0
    y_binary[y == j] = 1
    indcond = np.arange(X.shape[0])[cond]
    return _fit_binary(estimator,
                       _safe_split(estimator, X, None, indices=indcond)[0],
                       y_binary, classes=[i, j]), indcond

Example 2

Project: scikit-learn Source File: multiclass.py
def _partial_fit_ovo_binary(estimator, X, y, i, j):
    """Partially fit a single binary estimator(one-vs-one)."""

    cond = np.logical_or(y == i, y == j)
    y = y[cond]
    y_binary = np.zeros_like(y)
    y_binary[y == j] = 1
    return _partial_fit_binary(estimator, X[cond], y_binary)

Example 3

Project: simpeg Source File: Optimization.py
Function: active_set
    @Utils.count
    def activeSet(self, x):
        """activeSet(x)

            If we are on a bound

        """
        return np.logical_or(x == self.lower, x == self.upper)

Example 4

Project: simpeg Source File: Optimization.py
Function: active_set
    @Utils.count
    def activeSet(self, x):
        """activeSet(x)

            If we are on a bound

        """
        return np.logical_or(x <= self.lower, x >= self.upper)

Example 5

Project: paramz Source File: transformations.py
Function: initialize
    def initialize(self, f):
        if np.any(np.logical_or(f < self.lower, f > self.upper)):
            print("Warning: changing parameters to satisfy constraints")
        #return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
        #FIXME: Max, zeros_like right?
        return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)

Example 6

Project: statsmodels Source File: data.py
def _nan_rows(*arrs):
    """
    Returns a boolean array which is True where any of the rows in any
    of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
    DataFrames or array-like.
    """
    if len(arrs) == 1:
        arrs += ([[False]],)

    def _nan_row_maybe_two_inputs(x, y):
        # check for dtype bc dataframe has dtypes
        x_is_boolean_array = hasattr(x, 'dtype') and x.dtype == bool and x
        return np.logical_or(_asarray_2d_null_rows(x),
                             (x_is_boolean_array | _asarray_2d_null_rows(y)))
    return reduce(_nan_row_maybe_two_inputs, arrs).squeeze()

Example 7

Project: Cura Source File: LayerPolygon.py
Function: build_cache
    def buildCache(self):
        # For the line mesh we do not draw Infill or Jumps. Therefore those lines are filtered out.
        self._build_cache_line_mesh_mask = numpy.logical_not(numpy.logical_or(self._jump_mask, self._types == LayerPolygon.InfillType ))
        mesh_line_count = numpy.sum(self._build_cache_line_mesh_mask)
        self._index_begin = 0
        self._index_end = mesh_line_count
        
        self._build_cache_needed_points = numpy.ones((len(self._types), 2), dtype=numpy.bool)
        # Only if the type of line segment changes do we need to add an extra vertex to change colors
        self._build_cache_needed_points[1:, 0][:, numpy.newaxis] = self._types[1:] != self._types[:-1]
        # Mark points as unneeded if they are of types we don't want in the line mesh according to the calculated mask
        numpy.logical_and(self._build_cache_needed_points, self._build_cache_line_mesh_mask, self._build_cache_needed_points )
        
        self._vertex_begin = 0
        self._vertex_end = numpy.sum( self._build_cache_needed_points )

Example 8

Project: pyphi Source File: validate.py
Function: connectivity_matrix
def connectivity_matrix(cm):
    """Validate the given connectivity matrix."""
    # Special case for empty matrices.
    if cm.size == 0:
        return True
    if (cm.ndim != 2):
        raise ValueError("Connectivity matrix must be 2-dimensional.")
    if cm.shape[0] != cm.shape[1]:
        raise ValueError("Connectivity matrix must be square.")
    if not np.all(np.logical_or(cm == 1, cm == 0)):
        raise ValueError("Connectivity matrix must contain only binary "
                         "values.")
    return True

Example 9

Project: qiime Source File: otu_significance.py
def is_computable_float(v):
    '''Return float if v can be converted to float excluding nan and inf.

    Parameters
    ----------
    v : variable
        Value to be converted to float if possible.

    Returns
    -------
    If v can be converted to a float that is not nan or inf, return v.
    Otherwise return False.
    '''
    tmp = float(v)
    if not logical_or(isnan(tmp), isinf(tmp)):
        return tmp 
    else:
        return False

Example 10

Project: sklearn-pmml Source File: utils.py
    @staticmethod
    def field_in_list(field, values):
        mv = pmml.MapValues(outputColumn='output', defaultValue=0)
        mv.append(pmml.FieldColumnPair(field=field, column='input'))
        it = pmml.InlineTable()
        for v in values:
            it.append(pmml_row(input=v, output=1))
        mv.append(it)
        return {
            DerivedFeatureTransformations.TRANSFORMATION: mv,
            DerivedFeatureTransformations.FUNCTION: lambda df: reduce(np.logical_or, [df[field] == _ for _ in values])
        }

Example 11

Project: laspy Source File: lasvalidate.py
Function: bb
    def bb(self,inFile, xval, yval, zval):
        X_invalid = np.logical_or((inFile.header.min[0] > xval), (inFile.header.max[0] < xval))
        Y_invalid = np.logical_or((inFile.header.min[1] > yval), (inFile.header.max[1] < yval))
        Z_invalid = np.logical_or((inFile.header.min[2] > zval), (inFile.header.max[2] < zval))

        bad_indices = np.where(np.logical_or(X_invalid, Y_invalid, Z_invalid))
        return(bad_indices)

Example 12

Project: mcmerge Source File: carve.py
def make_mask(shape, edge, width, seed):
    """ Make a mask representing a valley out of a countour edge specification """
    
    straights, concave, convex = get_features(edge)
    components = vec.uniques(itertools.chain.from_iterable(vec.decompose(v) for v in itertools.chain(straights, concave, convex)))
    return numpy.logical_or(
        make_mask_straights(shape, width, seed, components, straights),
        make_mask_corners(shape, width, seed, components, concave, convex)
    )

Example 13

Project: mcedit2 Source File: __init__.py
Function: box_mask
    def box_mask(self, box):
        masks = [s.box_mask(box) for s in self.selections]
        masks = [m for m in masks if m is not None]
        if not len(masks):
            return None

        m = masks.pop()

        while len(masks):
            numpy.logical_or(m, masks.pop(), m)

        return m

Example 14

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: multiclass.py
Function: fit_ovo_binary
def _fit_ovo_binary(estimator, X, y, i, j):
    """Fit a single binary estimator (one-vs-one)."""
    cond = np.logical_or(y == i, y == j)
    y = y[cond]
    y_binary = np.empty(y.shape, np.int)
    y_binary[y == i] = 0
    y_binary[y == j] = 1
    ind = np.arange(X.shape[0])
    return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])

Example 15

Project: artview Source File: display_select_region.py
    def saveRadar(self):
        '''Open a dialog box to save radar file.'''
        dirIn, fname = os.path.split(self.Vradar.value.filename)
        filename = QtGui.QFileDialog.getSaveFileName(
            self, 'Save Radar File', dirIn)
        filename = str(filename)
        if filename == '' or self.Vradar.value is None:
            print("Vradar is None!")
        else:
            radar = self.Vradar.value
            if self.Vgatefilter.value is not None:
                radar = radar.extract_sweeps(range(radar.nsweeps))
                for field in radar.fields.keys():
                    radar.fields[field]['data'].mask = np.logical_or(
                        self.Vgatefilter.value._gate_excluded,
                        radar.fields[field]['data'].mask)
            #self.Vradar.update(True)
            pyart.io.write_cfradial(filename, radar)
            print("Saved %s" % (filename))

Example 16

Project: mpop Source File: fy3_virr.py
def load_virr(satscene, options):
    """Read the VIRR hdf5 file"""

    if "filename" not in options:
        raise IOError("No 1km virr filename given, cannot load")

    values = {"orbit": satscene.orbit,
              "satname": satscene.satname,
              "instrument": satscene.instrument_name,
              "satellite": satscene.fullname
              }

    filename = \
        os.path.join(satscene.time_slot.strftime(options["dir"]) % values,
                     satscene.time_slot.strftime(
                         options["filename"])
                     % values)

    LOGGER.debug("Filename= %s", filename)

    datasets = ['EV_Emissive',
                'EV_RefSB']

    calibrate = options['calibrate']
    LOGGER.debug("Calibrate = " + str(calibrate))

    h5f = h5py.File(filename, 'r')

    # Get geolocation information
    lons = h5f['Longitude'][:]
    lats = h5f['Latitude'][:]
    # Mask out unrealistic values:
    mask = np.logical_or(lats > 90., lons > 90.)
    lons = np.ma.masked_array(lons, mask=mask)
    lats = np.ma.masked_array(lats, mask=mask)
    sunz = h5f['SolarZenith'][:]
    slope = h5f['SolarZenith'].attrs['Slope'][0]
    intercept = h5f['SolarZenith'].attrs['Intercept'][0]
    sunz = sunz * slope + intercept
    sunz = np.where(np.greater(sunz, 85.0), 85.0, sunz)

    # Get the calibration information
    # Emissive radiance coefficients:
    emis_offs = h5f['Emissive_Radiance_Offsets'][:]
    emis_scales = h5f['Emissive_Radiance_Scales'][:]

    # Central wave number (unit =  cm-1) for the three IR bands
    # It is ordered according to decreasing wave number (increasing wavelength):
    # 3.7 micron, 10.8 micron, 12 micron
    emiss_centroid_wn = h5f.attrs['Emmisive_Centroid_Wave_Number']

    # VIS/NIR calibration stuff:
    refsb_cal_coeff = h5f.attrs['RefSB_Cal_Coefficients']
    visnir_scales = refsb_cal_coeff[0::2]
    visnir_offs = refsb_cal_coeff[1::2]

    refsb_effective_wl = h5f.attrs['RefSB_Effective_Wavelength']

    # Read the band data:
    for dset in datasets:
        band_data = h5f[dset]
        valid_range = band_data.attrs['valid_range']
        LOGGER.debug("valid-range = " + str(valid_range))
        fillvalue = band_data.attrs['_FillValue']
        band_names = band_data.attrs['band_name'].split(',')
        slope = band_data.attrs['Slope']
        intercept = band_data.attrs['Intercept']
        units = band_data.attrs['units']
        long_name = band_data.attrs['long_name']

        LOGGER.debug('band names = ' + str(band_names))

        for (i, band) in enumerate(band_names):
            if band not in satscene.channels_to_load:
                continue

            LOGGER.debug("Reading channel %s, i=%d", band, i)
            data = band_data[i]

            bandmask = np.logical_or(np.less(data, valid_range[0]),
                                     np.greater(data, valid_range[1]))

            if calibrate:
                if dset in ['EV_Emissive']:
                    data = (np.array([emis_offs[:, i]]).transpose() +
                            data * np.array([emis_scales[:, i]]).transpose())
                    # Radiance to Tb conversion.
                    # Pyspectral wants SI units,
                    # but radiance data are in mW/m^2/str/cm^-1 and wavenumbers are in cm^-1
                    # Therefore multply wavenumber by 100 and radiances by
                    # 10^-5
                    data = rad2temp(emiss_centroid_wn[i] * 100., data * 1e-5)
                    LOGGER.debug("IR data calibrated")

                if dset in ['EV_RefSB']:
                    data = (visnir_offs[i] +
                            data * visnir_scales[i]) / np.cos(np.deg2rad(sunz))

            satscene[band] = np.ma.masked_array(data,
                                                mask=bandmask,
                                                copy=False)

    from pyresample import geometry
    satscene.area = geometry.SwathDefinition(lons=lons, lats=lats)

    h5f.close()

Example 17

Project: PYPOWER Source File: printpf.py
def printpf(baseMVA, bus=None, gen=None, branch=None, f=None, success=None,
            et=None, fd=None, ppopt=None):
    """Prints power flow results.

    Prints power flow and optimal power flow results to C{fd} (a file
    descriptor which defaults to C{stdout}), with the details of what
    gets printed controlled by the optional C{ppopt} argument, which is a
    PYPOWER options vector (see L{ppoption} for details).

    The data can either be supplied in a single C{results} dict, or
    in the individual arguments: C{baseMVA}, C{bus}, C{gen}, C{branch}, C{f},
    C{success} and C{et}, where C{f} is the OPF objective function value,
    C{success} is C{True} if the solution converged and C{False} otherwise,
    and C{et} is the elapsed time for the computation in seconds. If C{f} is
    given, it is assumed that the output is from an OPF run, otherwise it is
    assumed to be a simple power flow run.

    Examples::
        ppopt = ppoptions(OUT_GEN=1, OUT_BUS=0, OUT_BRANCH=0)
        fd = open(fname, 'w+b')
        results = runopf(ppc)
        printpf(results)
        printpf(results, fd)
        printpf(results, fd, ppopt)
        printpf(baseMVA, bus, gen, branch, f, success, et)
        printpf(baseMVA, bus, gen, branch, f, success, et, fd)
        printpf(baseMVA, bus, gen, branch, f, success, et, fd, ppopt)
        fd.close()

    @author: Ray Zimmerman (PSERC Cornell)
    """
    ##----- initialization -----
    ## default arguments
    if isinstance(baseMVA, dict):
        have_results_struct = 1
        results = baseMVA
        if gen is None:
            ppopt = ppoption()   ## use default options
        else:
            ppopt = gen
        if (ppopt['OUT_ALL'] == 0):
            return     ## nothin' to see here, bail out now
        if bus is None:
            fd = stdout         ## print to stdout by default
        else:
            fd = bus
        baseMVA, bus, gen, branch, success, et = \
            results["baseMVA"], results["bus"], results["gen"], \
            results["branch"], results["success"], results["et"]
        if 'f' in results:
            f = results["f"]
        else:
            f = None
    else:
        have_results_struct = 0
        if ppopt is None:
            ppopt = ppoption()   ## use default options
            if fd is None:
                fd = stdout         ## print to stdout by default
        if ppopt['OUT_ALL'] == 0:
            return     ## nothin' to see here, bail out now

    isOPF = f is not None    ## FALSE -> only simple PF data, TRUE -> OPF data

    ## options
    isDC            = ppopt['PF_DC']        ## use DC formulation?
    OUT_ALL         = ppopt['OUT_ALL']
    OUT_ANY         = OUT_ALL == 1     ## set to true if any pretty output is to be generated
    OUT_SYS_SUM     = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_SYS_SUM'])
    OUT_AREA_SUM    = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_AREA_SUM'])
    OUT_BUS         = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_BUS'])
    OUT_BRANCH      = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_BRANCH'])
    OUT_GEN         = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_GEN'])
    OUT_ANY         = OUT_ANY | ((OUT_ALL == -1) and
                        (OUT_SYS_SUM or OUT_AREA_SUM or OUT_BUS or
                         OUT_BRANCH or OUT_GEN))

    if OUT_ALL == -1:
        OUT_ALL_LIM = ppopt['OUT_ALL_LIM']
    elif OUT_ALL == 1:
        OUT_ALL_LIM = 2
    else:
        OUT_ALL_LIM = 0

    OUT_ANY         = OUT_ANY or (OUT_ALL_LIM >= 1)
    if OUT_ALL_LIM == -1:
        OUT_V_LIM       = ppopt['OUT_V_LIM']
        OUT_LINE_LIM    = ppopt['OUT_LINE_LIM']
        OUT_PG_LIM      = ppopt['OUT_PG_LIM']
        OUT_QG_LIM      = ppopt['OUT_QG_LIM']
    else:
        OUT_V_LIM       = OUT_ALL_LIM
        OUT_LINE_LIM    = OUT_ALL_LIM
        OUT_PG_LIM      = OUT_ALL_LIM
        OUT_QG_LIM      = OUT_ALL_LIM

    OUT_ANY         = OUT_ANY or ((OUT_ALL_LIM == -1) and (OUT_V_LIM or OUT_LINE_LIM or OUT_PG_LIM or OUT_QG_LIM))
    ptol = 1e-4        ## tolerance for displaying shadow prices

    ## create map of external bus numbers to bus indices
    i2e = bus[:, BUS_I].astype(int)
    e2i = zeros(max(i2e) + 1, int)
    e2i[i2e] = arange(bus.shape[0])

    ## sizes of things
    nb = bus.shape[0]      ## number of buses
    nl = branch.shape[0]   ## number of branches
    ng = gen.shape[0]      ## number of generators

    ## zero out some data to make printout consistent for DC case
    if isDC:
        bus[:, r_[QD, BS]]          = zeros((nb, 2))
        gen[:, r_[QG, QMAX, QMIN]]  = zeros((ng, 3))
        branch[:, r_[BR_R, BR_B]]   = zeros((nl, 2))

    ## parameters
    ties = find(bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] !=
                   bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA])
                            ## area inter-ties
    tap = ones(nl)                           ## default tap ratio = 1 for lines
    xfmr = find(branch[:, TAP])           ## indices of transformers
    tap[xfmr] = branch[xfmr, TAP]            ## include transformer tap ratios
    tap = tap * exp(1j * pi / 180 * branch[:, SHIFT]) ## add phase shifters
    nzld = find((bus[:, PD] != 0.0) | (bus[:, QD] != 0.0))
    sorted_areas = sort(bus[:, BUS_AREA])
    ## area numbers
    s_areas = sorted_areas[r_[1, find(diff(sorted_areas)) + 1]]
    nzsh = find((bus[:, GS] != 0.0) | (bus[:, BS] != 0.0))
    allg = find( ~isload(gen) )
    ong  = find( (gen[:, GEN_STATUS] > 0) & ~isload(gen) )
    onld = find( (gen[:, GEN_STATUS] > 0) &  isload(gen) )
    V = bus[:, VM] * exp(-1j * pi / 180 * bus[:, VA])
    out = find(branch[:, BR_STATUS] == 0)        ## out-of-service branches
    nout = len(out)
    if isDC:
        loss = zeros(nl)
    else:
        loss = baseMVA * abs(V[e2i[ branch[:, F_BUS].astype(int) ]] / tap -
                             V[e2i[ branch[:, T_BUS].astype(int) ]])**2 / \
                    (branch[:, BR_R] - 1j * branch[:, BR_X])

    fchg = abs(V[e2i[ branch[:, F_BUS].astype(int) ]] / tap)**2 * branch[:, BR_B] * baseMVA / 2
    tchg = abs(V[e2i[ branch[:, T_BUS].astype(int) ]]      )**2 * branch[:, BR_B] * baseMVA / 2
    loss[out] = zeros(nout)
    fchg[out] = zeros(nout)
    tchg[out] = zeros(nout)

    ##----- print the stuff -----
    if OUT_ANY:
        ## convergence & elapsed time
        if success:
            fd.write('\nConverged in %.2f seconds' % et)
        else:
            fd.write('\nDid not converge (%.2f seconds)\n' % et)

        ## objective function value
        if isOPF:
            fd.write('\nObjective Function Value = %.2f $/hr' % f)

    if OUT_SYS_SUM:
        fd.write('\n================================================================================')
        fd.write('\n|     System Summary                                                           |')
        fd.write('\n================================================================================')
        fd.write('\n\nHow many?                How much?              P (MW)            Q (MVAr)')
        fd.write('\n---------------------    -------------------  -------------  -----------------')
        fd.write('\nBuses         %6d     Total Gen Capacity   %7.1f       %7.1f to %.1f' % (nb, sum(gen[allg, PMAX]), sum(gen[allg, QMIN]), sum(gen[allg, QMAX])))
        fd.write('\nGenerators     %5d     On-line Capacity     %7.1f       %7.1f to %.1f' % (len(allg), sum(gen[ong, PMAX]), sum(gen[ong, QMIN]), sum(gen[ong, QMAX])))
        fd.write('\nCommitted Gens %5d     Generation (actual)  %7.1f           %7.1f' % (len(ong), sum(gen[ong, PG]), sum(gen[ong, QG])))
        fd.write('\nLoads          %5d     Load                 %7.1f           %7.1f' % (len(nzld)+len(onld), sum(bus[nzld, PD])-sum(gen[onld, PG]), sum(bus[nzld, QD])-sum(gen[onld, QG])))
        fd.write('\n  Fixed        %5d       Fixed              %7.1f           %7.1f' % (len(nzld), sum(bus[nzld, PD]), sum(bus[nzld, QD])))
        fd.write('\n  Dispatchable %5d       Dispatchable       %7.1f of %-7.1f%7.1f' % (len(onld), -sum(gen[onld, PG]), -sum(gen[onld, PMIN]), -sum(gen[onld, QG])))
        fd.write('\nShunts         %5d     Shunt (inj)          %7.1f           %7.1f' % (len(nzsh),
            -sum(bus[nzsh, VM]**2 * bus[nzsh, GS]), sum(bus[nzsh, VM]**2 * bus[nzsh, BS]) ))
        fd.write('\nBranches       %5d     Losses (I^2 * Z)     %8.2f          %8.2f' % (nl, sum(loss.real), sum(loss.imag) ))
        fd.write('\nTransformers   %5d     Branch Charging (inj)     -            %7.1f' % (len(xfmr), sum(fchg) + sum(tchg) ))
        fd.write('\nInter-ties     %5d     Total Inter-tie Flow %7.1f           %7.1f' % (len(ties), sum(abs(branch[ties, PF]-branch[ties, PT])) / 2, sum(abs(branch[ties, QF]-branch[ties, QT])) / 2))
        fd.write('\nAreas          %5d' % len(s_areas))
        fd.write('\n')
        fd.write('\n                          Minimum                      Maximum')
        fd.write('\n                 -------------------------  --------------------------------')
        minv = min(bus[:, VM])
        mini = argmin(bus[:, VM])
        maxv = max(bus[:, VM])
        maxi = argmax(bus[:, VM])
        fd.write('\nVoltage Magnitude %7.3f p.u. @ bus %-4d     %7.3f p.u. @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
        minv = min(bus[:, VA])
        mini = argmin(bus[:, VA])
        maxv = max(bus[:, VA])
        maxi = argmax(bus[:, VA])
        fd.write('\nVoltage Angle   %8.2f deg   @ bus %-4d   %8.2f deg   @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
        if not isDC:
            maxv = max(loss.real)
            maxi = argmax(loss.real)
            fd.write('\nP Losses (I^2*R)             -              %8.2f MW    @ line %d-%d' % (maxv, branch[maxi, F_BUS], branch[maxi, T_BUS]))
            maxv = max(loss.imag)
            maxi = argmax(loss.imag)
            fd.write('\nQ Losses (I^2*X)             -              %8.2f MVAr  @ line %d-%d' % (maxv, branch[maxi, F_BUS], branch[maxi, T_BUS]))
        if isOPF:
            minv = min(bus[:, LAM_P])
            mini = argmin(bus[:, LAM_P])
            maxv = max(bus[:, LAM_P])
            maxi = argmax(bus[:, LAM_P])
            fd.write('\nLambda P        %8.2f $/MWh @ bus %-4d   %8.2f $/MWh @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
            minv = min(bus[:, LAM_Q])
            mini = argmin(bus[:, LAM_Q])
            maxv = max(bus[:, LAM_Q])
            maxi = argmax(bus[:, LAM_Q])
            fd.write('\nLambda Q        %8.2f $/MWh @ bus %-4d   %8.2f $/MWh @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
        fd.write('\n')

    if OUT_AREA_SUM:
        fd.write('\n================================================================================')
        fd.write('\n|     Area Summary                                                             |')
        fd.write('\n================================================================================')
        fd.write('\nArea  # of      # of Gens        # of Loads         # of    # of   # of   # of')
        fd.write('\n Num  Buses   Total  Online   Total  Fixed  Disp    Shunt   Brchs  Xfmrs   Ties')
        fd.write('\n----  -----   -----  ------   -----  -----  -----   -----   -----  -----  -----')
        for i in range(len(s_areas)):
            a = s_areas[i]
            ib = find(bus[:, BUS_AREA] == a)
            ig = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & ~isload(gen))
            igon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & ~isload(gen))
            ildon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & isload(gen))
            inzld = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, PD], bus[:, QD]))
            inzsh = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, GS], bus[:, BS]))
            ibrch = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a))
            in_tie = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] != a))
            out_tie = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] != a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a))
            if not any(xfmr + 1):
                nxfmr = 0
            else:
                nxfmr = len(find((bus[e2i[branch[xfmr, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[xfmr, T_BUS].astype(int)], BUS_AREA] == a)))
            fd.write('\n%3d  %6d   %5d  %5d   %5d  %5d  %5d   %5d   %5d  %5d  %5d' %
                (a, len(ib), len(ig), len(igon), \
                len(inzld)+len(ildon), len(inzld), len(ildon), \
                len(inzsh), len(ibrch), nxfmr, len(in_tie)+len(out_tie)))

        fd.write('\n----  -----   -----  ------   -----  -----  -----   -----   -----  -----  -----')
        fd.write('\nTot: %6d   %5d  %5d   %5d  %5d  %5d   %5d   %5d  %5d  %5d' %
            (nb, len(allg), len(ong), len(nzld)+len(onld),
            len(nzld), len(onld), len(nzsh), nl, len(xfmr), len(ties)))
        fd.write('\n')
        fd.write('\nArea      Total Gen Capacity           On-line Gen Capacity         Generation')
        fd.write('\n Num     MW           MVAr            MW           MVAr             MW    MVAr')
        fd.write('\n----   ------  ------------------   ------  ------------------    ------  ------')
        for i in range(len(s_areas)):
            a = s_areas[i]
            ig = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & ~isload(gen))
            igon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & ~isload(gen))
            fd.write('\n%3d   %7.1f  %7.1f to %-7.1f  %7.1f  %7.1f to %-7.1f   %7.1f %7.1f' %
                (a, sum(gen[ig, PMAX]), sum(gen[ig, QMIN]), sum(gen[ig, QMAX]),
                sum(gen[igon, PMAX]), sum(gen[igon, QMIN]), sum(gen[igon, QMAX]),
                sum(gen[igon, PG]), sum(gen[igon, QG]) ))

        fd.write('\n----   ------  ------------------   ------  ------------------    ------  ------')
        fd.write('\nTot:  %7.1f  %7.1f to %-7.1f  %7.1f  %7.1f to %-7.1f   %7.1f %7.1f' %
                (sum(gen[allg, PMAX]), sum(gen[allg, QMIN]), sum(gen[allg, QMAX]),
                sum(gen[ong, PMAX]), sum(gen[ong, QMIN]), sum(gen[ong, QMAX]),
                sum(gen[ong, PG]), sum(gen[ong, QG]) ))
        fd.write('\n')
        fd.write('\nArea    Disp Load Cap       Disp Load         Fixed Load        Total Load')
        fd.write('\n Num      MW     MVAr       MW     MVAr       MW     MVAr       MW     MVAr')
        fd.write('\n----    ------  ------    ------  ------    ------  ------    ------  ------')
        Qlim = (gen[:, QMIN] == 0) * gen[:, QMAX] + (gen[:, QMAX] == 0) * gen[:, QMIN]
        for i in range(len(s_areas)):
            a = s_areas[i]
            ildon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & isload(gen))
            inzld = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, PD], bus[:, QD]))
            fd.write('\n%3d    %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f' %
                (a, -sum(gen[ildon, PMIN]),
                -sum(Qlim[ildon]),
                -sum(gen[ildon, PG]), -sum(gen[ildon, QG]),
                sum(bus[inzld, PD]), sum(bus[inzld, QD]),
                -sum(gen[ildon, PG]) + sum(bus[inzld, PD]),
                -sum(gen[ildon, QG]) + sum(bus[inzld, QD]) ))

        fd.write('\n----    ------  ------    ------  ------    ------  ------    ------  ------')
        fd.write('\nTot:   %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f' %
                (-sum(gen[onld, PMIN]),
                -sum(Qlim[onld]),
                -sum(gen[onld, PG]), -sum(gen[onld, QG]),
                sum(bus[nzld, PD]), sum(bus[nzld, QD]),
                -sum(gen[onld, PG]) + sum(bus[nzld, PD]),
                -sum(gen[onld, QG]) + sum(bus[nzld, QD])) )
        fd.write('\n')
        fd.write('\nArea      Shunt Inj        Branch      Series Losses      Net Export')
        fd.write('\n Num      MW     MVAr     Charging      MW     MVAr       MW     MVAr')
        fd.write('\n----    ------  ------    --------    ------  ------    ------  ------')
        for i in range(len(s_areas)):
            a = s_areas[i]
            inzsh   = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, GS], bus[:, BS]))
            ibrch   = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a) & branch[:, BR_STATUS].astype(bool))
            in_tie  = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] != a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a) & branch[:, BR_STATUS].astype(bool))
            out_tie = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] != a) & branch[:, BR_STATUS].astype(bool))
            fd.write('\n%3d    %7.1f %7.1f    %7.1f    %7.2f %7.2f   %7.1f %7.1f' %
                (a, -sum(bus[inzsh, VM]**2 * bus[inzsh, GS]),
                 sum(bus[inzsh, VM]**2 * bus[inzsh, BS]),
                 sum(fchg[ibrch]) + sum(tchg[ibrch]) + sum(fchg[out_tie]) + sum(tchg[in_tie]),
                 sum(real(loss[ibrch])) + sum(real(loss[r_[in_tie, out_tie]])) / 2,
                 sum(imag(loss[ibrch])) + sum(imag(loss[r_[in_tie, out_tie]])) / 2,
                 sum(branch[in_tie, PT])+sum(branch[out_tie, PF]) - sum(real(loss[r_[in_tie, out_tie]])) / 2,
                 sum(branch[in_tie, QT])+sum(branch[out_tie, QF]) - sum(imag(loss[r_[in_tie, out_tie]])) / 2  ))

        fd.write('\n----    ------  ------    --------    ------  ------    ------  ------')
        fd.write('\nTot:   %7.1f %7.1f    %7.1f    %7.2f %7.2f       -       -' %
            (-sum(bus[nzsh, VM]**2 * bus[nzsh, GS]),
             sum(bus[nzsh, VM]**2 * bus[nzsh, BS]),
             sum(fchg) + sum(tchg), sum(real(loss)), sum(imag(loss)) ))
        fd.write('\n')

    ## generator data
    if OUT_GEN:
        if isOPF:
            genlamP = bus[e2i[gen[:, GEN_BUS].astype(int)], LAM_P]
            genlamQ = bus[e2i[gen[:, GEN_BUS].astype(int)], LAM_Q]

        fd.write('\n================================================================================')
        fd.write('\n|     Generator Data                                                           |')
        fd.write('\n================================================================================')
        fd.write('\n Gen   Bus   Status     Pg        Qg   ')
        if isOPF: fd.write('   Lambda ($/MVA-hr)')
        fd.write('\n  #     #              (MW)     (MVAr) ')
        if isOPF: fd.write('     P         Q    ')
        fd.write('\n----  -----  ------  --------  --------')
        if isOPF: fd.write('  --------  --------')
        for k in range(len(ong)):
            i = ong[k]
            fd.write('\n%3d %6d     %2d ' % (i, gen[i, GEN_BUS], gen[i, GEN_STATUS]))
            if (gen[i, GEN_STATUS] > 0) & logical_or(gen[i, PG], gen[i, QG]):
                fd.write('%10.2f%10.2f' % (gen[i, PG], gen[i, QG]))
            else:
                fd.write('       -         -  ')
            if isOPF: fd.write('%10.2f%10.2f' % (genlamP[i], genlamQ[i]))

        fd.write('\n                     --------  --------')
        fd.write('\n            Total: %9.2f%10.2f' % (sum(gen[ong, PG]), sum(gen[ong, QG])))
        fd.write('\n')
        if any(onld + 1):
            fd.write('\n================================================================================')
            fd.write('\n|     Dispatchable Load Data                                                   |')
            fd.write('\n================================================================================')
            fd.write('\n Gen   Bus   Status     Pd        Qd   ')
            if isOPF: fd.write('   Lambda ($/MVA-hr)')
            fd.write('\n  #     #              (MW)     (MVAr) ')
            if isOPF: fd.write('     P         Q    ')
            fd.write('\n----  -----  ------  --------  --------')
            if isOPF: fd.write('  --------  --------')
            for k in range(len(onld)):
                i = onld[k]
                fd.write('\n%3d %6d     %2d ' % (i, gen[i, GEN_BUS], gen[i, GEN_STATUS]))
                if (gen[i, GEN_STATUS] > 0) & logical_or(gen[i, PG], gen[i, QG]):
                    fd.write('%10.2f%10.2f' % (-gen[i, PG], -gen[i, QG]))
                else:
                    fd.write('       -         -  ')

                if isOPF: fd.write('%10.2f%10.2f' % (genlamP[i], genlamQ[i]))
            fd.write('\n                     --------  --------')
            fd.write('\n            Total: %9.2f%10.2f' % (-sum(gen[onld, PG]), -sum(gen[onld, QG])))
            fd.write('\n')

    ## bus data
    if OUT_BUS:
        fd.write('\n================================================================================')
        fd.write('\n|     Bus Data                                                                 |')
        fd.write('\n================================================================================')
        fd.write('\n Bus      Voltage          Generation             Load        ')
        if isOPF: fd.write('  Lambda($/MVA-hr)')
        fd.write('\n  #   Mag(pu) Ang(deg)   P (MW)   Q (MVAr)   P (MW)   Q (MVAr)')
        if isOPF: fd.write('     P        Q   ')
        fd.write('\n----- ------- --------  --------  --------  --------  --------')
        if isOPF: fd.write('  -------  -------')
        for i in range(nb):
            fd.write('\n%5d%7.3f%9.3f' % tuple(bus[i, [BUS_I, VM, VA]]))
            if bus[i, BUS_TYPE] == REF:
                fd.write('*')
            else:
                fd.write(' ')
            g  = find((gen[:, GEN_STATUS] > 0) & (gen[:, GEN_BUS] == bus[i, BUS_I]) &
                        ~isload(gen))
            ld = find((gen[:, GEN_STATUS] > 0) & (gen[:, GEN_BUS] == bus[i, BUS_I]) &
                        isload(gen))
            if any(g + 1):
                fd.write('%9.2f%10.2f' % (sum(gen[g, PG]), sum(gen[g, QG])))
            else:
                fd.write('      -         -  ')

            if logical_or(bus[i, PD], bus[i, QD]) | any(ld + 1):
                if any(ld + 1):
                    fd.write('%10.2f*%9.2f*' % (bus[i, PD] - sum(gen[ld, PG]),
                                                bus[i, QD] - sum(gen[ld, QG])))
                else:
                    fd.write('%10.2f%10.2f ' % tuple(bus[i, [PD, QD]]))
            else:
                fd.write('       -         -   ')
            if isOPF:
                fd.write('%9.3f' % bus[i, LAM_P])
                if abs(bus[i, LAM_Q]) > ptol:
                    fd.write('%8.3f' % bus[i, LAM_Q])
                else:
                    fd.write('     -')
        fd.write('\n                        --------  --------  --------  --------')
        fd.write('\n               Total: %9.2f %9.2f %9.2f %9.2f' %
            (sum(gen[ong, PG]), sum(gen[ong, QG]),
             sum(bus[nzld, PD]) - sum(gen[onld, PG]),
             sum(bus[nzld, QD]) - sum(gen[onld, QG])))
        fd.write('\n')

    ## branch data
    if OUT_BRANCH:
        fd.write('\n================================================================================')
        fd.write('\n|     Branch Data                                                              |')
        fd.write('\n================================================================================')
        fd.write('\nBrnch   From   To    From Bus Injection   To Bus Injection     Loss (I^2 * Z)  ')
        fd.write('\n  #     Bus    Bus    P (MW)   Q (MVAr)   P (MW)   Q (MVAr)   P (MW)   Q (MVAr)')
        fd.write('\n-----  -----  -----  --------  --------  --------  --------  --------  --------')
        for i in range(nl):
            fd.write('\n%4d%7d%7d%10.2f%10.2f%10.2f%10.2f%10.3f%10.2f' %
                (i, branch[i, F_BUS], branch[i, T_BUS],
                     branch[i, PF], branch[i, QF], branch[i, PT], branch[i, QT],
                     loss[i].real, loss[i].imag))
        fd.write('\n                                                             --------  --------')
        fd.write('\n                                                    Total:%10.3f%10.2f' %
                (sum(real(loss)), sum(imag(loss))))
        fd.write('\n')

    ##-----  constraint data  -----
    if isOPF:
        ctol = ppopt['OPF_VIOLATION']   ## constraint violation tolerance
        ## voltage constraints
        if (not isDC) & (OUT_V_LIM == 2 | (OUT_V_LIM == 1 &
                             (any(bus[:, VM] < bus[:, VMIN] + ctol) |
                              any(bus[:, VM] > bus[:, VMAX] - ctol) |
                              any(bus[:, MU_VMIN] > ptol) |
                              any(bus[:, MU_VMAX] > ptol)))):
            fd.write('\n================================================================================')
            fd.write('\n|     Voltage Constraints                                                      |')
            fd.write('\n================================================================================')
            fd.write('\nBus #  Vmin mu    Vmin    |V|   Vmax    Vmax mu')
            fd.write('\n-----  --------   -----  -----  -----   --------')
            for i in range(nb):
                if (OUT_V_LIM == 2) | (OUT_V_LIM == 1 &
                             ((bus[i, VM] < bus[i, VMIN] + ctol) |
                              (bus[i, VM] > bus[i, VMAX] - ctol) |
                              (bus[i, MU_VMIN] > ptol) |
                              (bus[i, MU_VMAX] > ptol))):
                    fd.write('\n%5d' % bus[i, BUS_I])
                    if ((bus[i, VM] < bus[i, VMIN] + ctol) |
                            (bus[i, MU_VMIN] > ptol)):
                        fd.write('%10.3f' % bus[i, MU_VMIN])
                    else:
                        fd.write('      -   ')

                    fd.write('%8.3f%7.3f%7.3f' % tuple(bus[i, [VMIN, VM, VMAX]]))
                    if (bus[i, VM] > bus[i, VMAX] - ctol) | (bus[i, MU_VMAX] > ptol):
                        fd.write('%10.3f' % bus[i, MU_VMAX])
                    else:
                        fd.write('      -    ')
            fd.write('\n')

        ## generator P constraints
        if (OUT_PG_LIM == 2) | \
                ((OUT_PG_LIM == 1) & (any(gen[ong, PG] < gen[ong, PMIN] + ctol) |
                                      any(gen[ong, PG] > gen[ong, PMAX] - ctol) |
                                      any(gen[ong, MU_PMIN] > ptol) |
                                      any(gen[ong, MU_PMAX] > ptol))) | \
                ((not isDC) & ((OUT_QG_LIM == 2) |
                ((OUT_QG_LIM == 1) & (any(gen[ong, QG] < gen[ong, QMIN] + ctol) |
                                      any(gen[ong, QG] > gen[ong, QMAX] - ctol) |
                                      any(gen[ong, MU_QMIN] > ptol) |
                                      any(gen[ong, MU_QMAX] > ptol))))):
            fd.write('\n================================================================================')
            fd.write('\n|     Generation Constraints                                                   |')
            fd.write('\n================================================================================')

        if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                                 (any(gen[ong, PG] < gen[ong, PMIN] + ctol) |
                                  any(gen[ong, PG] > gen[ong, PMAX] - ctol) |
                                  any(gen[ong, MU_PMIN] > ptol) |
                                  any(gen[ong, MU_PMAX] > ptol))):
            fd.write('\n Gen   Bus                Active Power Limits')
            fd.write('\n  #     #    Pmin mu    Pmin       Pg       Pmax    Pmax mu')
            fd.write('\n----  -----  -------  --------  --------  --------  -------')
            for k in range(len(ong)):
                i = ong[k]
                if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                            ((gen[i, PG] < gen[i, PMIN] + ctol) |
                             (gen[i, PG] > gen[i, PMAX] - ctol) |
                             (gen[i, MU_PMIN] > ptol) | (gen[i, MU_PMAX] > ptol))):
                    fd.write('\n%4d%6d ' % (i, gen[i, GEN_BUS]))
                    if (gen[i, PG] < gen[i, PMIN] + ctol) | (gen[i, MU_PMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_PMIN])
                    else:
                        fd.write('     -  ')
                    if gen[i, PG]:
                        fd.write('%10.2f%10.2f%10.2f' % tuple(gen[i, [PMIN, PG, PMAX]]))
                    else:
                        fd.write('%10.2f       -  %10.2f' % tuple(gen[i, [PMIN, PMAX]]))
                    if (gen[i, PG] > gen[i, PMAX] - ctol) | (gen[i, MU_PMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_PMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## generator Q constraints
        if (not isDC) & ((OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                                 (any(gen[ong, QG] < gen[ong, QMIN] + ctol) |
                                  any(gen[ong, QG] > gen[ong, QMAX] - ctol) |
                                  any(gen[ong, MU_QMIN] > ptol) |
                                  any(gen[ong, MU_QMAX] > ptol)))):
            fd.write('\nGen  Bus              Reactive Power Limits')
            fd.write('\n #    #   Qmin mu    Qmin       Qg       Qmax    Qmax mu')
            fd.write('\n---  ---  -------  --------  --------  --------  -------')
            for k in range(len(ong)):
                i = ong[k]
                if (OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                            ((gen[i, QG] < gen[i, QMIN] + ctol) |
                             (gen[i, QG] > gen[i, QMAX] - ctol) |
                             (gen[i, MU_QMIN] > ptol) |
                             (gen[i, MU_QMAX] > ptol))):
                    fd.write('\n%3d%5d' % (i, gen[i, GEN_BUS]))
                    if (gen[i, QG] < gen[i, QMIN] + ctol) | (gen[i, MU_QMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_QMIN])
                    else:
                        fd.write('     -  ')
                    if gen[i, QG]:
                        fd.write('%10.2f%10.2f%10.2f' % tuple(gen[i, [QMIN, QG, QMAX]]))
                    else:
                        fd.write('%10.2f       -  %10.2f' % tuple(gen[i, [QMIN, QMAX]]))

                    if (gen[i, QG] > gen[i, QMAX] - ctol) | (gen[i, MU_QMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_QMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## dispatchable load P constraints
        if (OUT_PG_LIM == 2) | (OUT_QG_LIM == 2) | \
                ((OUT_PG_LIM == 1) & (any(gen[onld, PG] < gen[onld, PMIN] + ctol) |
                                      any(gen[onld, PG] > gen[onld, PMAX] - ctol) |
                                      any(gen[onld, MU_PMIN] > ptol) |
                                      any(gen[onld, MU_PMAX] > ptol))) | \
                ((OUT_QG_LIM == 1) & (any(gen[onld, QG] < gen[onld, QMIN] + ctol) |
                                      any(gen[onld, QG] > gen[onld, QMAX] - ctol) |
                                      any(gen[onld, MU_QMIN] > ptol) |
                                      any(gen[onld, MU_QMAX] > ptol))):
            fd.write('\n================================================================================')
            fd.write('\n|     Dispatchable Load Constraints                                            |')
            fd.write('\n================================================================================')
        if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                                 (any(gen[onld, PG] < gen[onld, PMIN] + ctol) |
                                  any(gen[onld, PG] > gen[onld, PMAX] - ctol) |
                                  any(gen[onld, MU_PMIN] > ptol) |
                                  any(gen[onld, MU_PMAX] > ptol))):
            fd.write('\nGen  Bus               Active Power Limits')
            fd.write('\n #    #   Pmin mu    Pmin       Pg       Pmax    Pmax mu')
            fd.write('\n---  ---  -------  --------  --------  --------  -------')
            for k in range(len(onld)):
                i = onld[k]
                if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                            ((gen[i, PG] < gen[i, PMIN] + ctol) |
                             (gen[i, PG] > gen[i, PMAX] - ctol) |
                             (gen[i, MU_PMIN] > ptol) |
                             (gen[i, MU_PMAX] > ptol))):
                    fd.write('\n%3d%5d' % (i, gen[i, GEN_BUS]))
                    if (gen[i, PG] < gen[i, PMIN] + ctol) | (gen[i, MU_PMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_PMIN])
                    else:
                        fd.write('     -  ')
                    if gen[i, PG]:
                        fd.write('%10.2f%10.2f%10.2f' % gen[i, [PMIN, PG, PMAX]])
                    else:
                        fd.write('%10.2f       -  %10.2f' % gen[i, [PMIN, PMAX]])

                    if (gen[i, PG] > gen[i, PMAX] - ctol) | (gen[i, MU_PMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_PMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## dispatchable load Q constraints
        if (not isDC) & ((OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                                 (any(gen[onld, QG] < gen[onld, QMIN] + ctol) |
                                  any(gen[onld, QG] > gen[onld, QMAX] - ctol) |
                                  any(gen[onld, MU_QMIN] > ptol) |
                                  any(gen[onld, MU_QMAX] > ptol)))):
            fd.write('\nGen  Bus              Reactive Power Limits')
            fd.write('\n #    #   Qmin mu    Qmin       Qg       Qmax    Qmax mu')
            fd.write('\n---  ---  -------  --------  --------  --------  -------')
            for k in range(len(onld)):
                i = onld[k]
                if (OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                            ((gen[i, QG] < gen[i, QMIN] + ctol) |
                             (gen[i, QG] > gen[i, QMAX] - ctol) |
                             (gen[i, MU_QMIN] > ptol) |
                             (gen[i, MU_QMAX] > ptol))):
                    fd.write('\n%3d%5d' % (i, gen(i, GEN_BUS)))
                    if (gen[i, QG] < gen[i, QMIN] + ctol) | (gen[i, MU_QMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_QMIN])
                    else:
                        fd.write('     -  ')

                    if gen[i, QG]:
                        fd.write('%10.2f%10.2f%10.2f' % gen[i, [QMIN, QG, QMAX]])
                    else:
                        fd.write('%10.2f       -  %10.2f' % gen[i, [QMIN, QMAX]])

                    if (gen[i, QG] > gen[i, QMAX] - ctol) | (gen[i, MU_QMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_QMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## line flow constraints
        if (ppopt['OPF_FLOW_LIM'] == 1) | isDC:  ## P limit
            Ff = branch[:, PF]
            Ft = branch[:, PT]
            strg = '\n  #     Bus    Pf  mu     Pf      |Pmax|      Pt      Pt  mu   Bus'
        elif ppopt['OPF_FLOW_LIM'] == 2:   ## |I| limit
            Ff = abs( (branch[:, PF] + 1j * branch[:, QF]) / V[e2i[branch[:, F_BUS].astype(int)]] )
            Ft = abs( (branch[:, PT] + 1j * branch[:, QT]) / V[e2i[branch[:, T_BUS].astype(int)]] )
            strg = '\n  #     Bus   |If| mu    |If|     |Imax|     |It|    |It| mu   Bus'
        else:                ## |S| limit
            Ff = abs(branch[:, PF] + 1j * branch[:, QF])
            Ft = abs(branch[:, PT] + 1j * branch[:, QT])
            strg = '\n  #     Bus   |Sf| mu    |Sf|     |Smax|     |St|    |St| mu   Bus'

        if (OUT_LINE_LIM == 2) | ((OUT_LINE_LIM == 1) &
                            (any((branch[:, RATE_A] != 0) & (abs(Ff) > branch[:, RATE_A] - ctol)) |
                             any((branch[:, RATE_A] != 0) & (abs(Ft) > branch[:, RATE_A] - ctol)) |
                             any(branch[:, MU_SF] > ptol) |
                             any(branch[:, MU_ST] > ptol))):
            fd.write('\n================================================================================')
            fd.write('\n|     Branch Flow Constraints                                                  |')
            fd.write('\n================================================================================')
            fd.write('\nBrnch   From     "From" End        Limit       "To" End        To')
            fd.write(strg)
            fd.write('\n-----  -----  -------  --------  --------  --------  -------  -----')
            for i in range(nl):
                if (OUT_LINE_LIM == 2) | ((OUT_LINE_LIM == 1) &
                       (((branch[i, RATE_A] != 0) & (abs(Ff[i]) > branch[i, RATE_A] - ctol)) |
                        ((branch[i, RATE_A] != 0) & (abs(Ft[i]) > branch[i, RATE_A] - ctol)) |
                        (branch[i, MU_SF] > ptol) | (branch[i, MU_ST] > ptol))):
                    fd.write('\n%4d%7d' % (i, branch[i, F_BUS]))
                    if (Ff[i] > branch[i, RATE_A] - ctol) | (branch[i, MU_SF] > ptol):
                        fd.write('%10.3f' % branch[i, MU_SF])
                    else:
                        fd.write('      -   ')

                    fd.write('%9.2f%10.2f%10.2f' %
                        (Ff[i], branch[i, RATE_A], Ft[i]))
                    if (Ft[i] > branch[i, RATE_A] - ctol) | (branch[i, MU_ST] > ptol):
                        fd.write('%10.3f' % branch[i, MU_ST])
                    else:
                        fd.write('      -   ')
                    fd.write('%6d' % branch[i, T_BUS])
            fd.write('\n')

    ## execute userfcn callbacks for 'printpf' stage
    if have_results_struct and 'userfcn' in results:
        if not isOPF:  ## turn off option for all constraints if it isn't an OPF
            ppopt = ppoption(ppopt, 'OUT_ALL_LIM', 0)
        run_userfcn(results["userfcn"], 'printpf', results, fd, ppopt)

Example 18

Project: scikit-beam Source File: arithmetic.py
def logical_nor(x1, x2, out=None):
    """Compute truth value of NOT (x1 OR x2)) element wise.

    This function enables the computation of the LOGICAL_NOR of two image or
    volume data sets. This function enables easy isolation of all data points
    NOT INCLUDED IN EITHER OF THE SOURCE DATA SETS. This function can be used
    for data comparison, material isolation, noise removal, or mask
    application/generation.

    Parameters
    ----------
    x1, x2 : array-like
        Input arrays. `x1` and `x2` must be of the same shape.

    output : array-like
        Boolean result with the same shape as `x1` and `x2` of the logical
        operation on corresponding elements of `x1` and `x2`.

    Returns
    -------
    output : {ndarray, bool}
        Boolean result with the same shape as `x1` and `x2` of the logical
        NOR operation on corresponding elements of `x1` and `x2`.

    Example
    -------
    >>> x1 = [[0,0,1,0,0], [2,1,1,1,2], [2,0,1,0,2]]
    >>> x2 = [[0,0,0,0,0], [2,1,1,1,2], [0,0,0,0,0]]
    >>> logical_nor(x1, x2)
    array([[ True,  True, False,  True,  True],
           [False, False, False, False, False],
           [False,  True, False,  True, False]], dtype=bool)
    """
    return logical_not(logical_or(x1, x2, out), out)

Example 19

Project: scikit-learn Source File: test_naive_bayes.py
def test_check_accuracy_on_digits():
    # Non regression test to make sure that any further refactoring / optim
    # of the NB models do not harm the performance on a slightly non-linearly
    # separable dataset
    digits = load_digits()
    X, y = digits.data, digits.target
    binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
    X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]

    # Multinomial NB
    scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
    assert_greater(scores.mean(), 0.86)

    scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
    assert_greater(scores.mean(), 0.94)

    # Bernoulli NB
    scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
    assert_greater(scores.mean(), 0.83)

    scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
    assert_greater(scores.mean(), 0.92)

    # Gaussian NB
    scores = cross_val_score(GaussianNB(), X, y, cv=10)
    assert_greater(scores.mean(), 0.77)

    scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
    assert_greater(scores.mean(), 0.86)

Example 20

Project: scipy Source File: coo.py
Function: set_diag
    def _setdiag(self, values, k):
        M, N = self.shape
        if values.ndim and not len(values):
            return
        idx_dtype = self.row.dtype

        # Determine which triples to keep and where to put the new ones.
        full_keep = self.col - self.row != k
        if k < 0:
            max_index = min(M+k, N)
            if values.ndim:
                max_index = min(max_index, len(values))
            keep = np.logical_or(full_keep, self.col >= max_index)
            new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
            new_col = np.arange(max_index, dtype=idx_dtype)
        else:
            max_index = min(M, N-k)
            if values.ndim:
                max_index = min(max_index, len(values))
            keep = np.logical_or(full_keep, self.row >= max_index)
            new_row = np.arange(max_index, dtype=idx_dtype)
            new_col = np.arange(k, k + max_index, dtype=idx_dtype)

        # Define the array of data consisting of the entries to be added.
        if values.ndim:
            new_data = values[:max_index]
        else:
            new_data = np.empty(max_index, dtype=self.dtype)
            new_data[:] = values

        # Update the internal structure.
        self.row = np.concatenate((self.row[keep], new_row))
        self.col = np.concatenate((self.col[keep], new_col))
        self.data = np.concatenate((self.data[keep], new_data))
        self.has_canonical_format = False

Example 21

Project: TensorflowProjects Source File: AnalogyDataLoader.py
    def get_set_from_pairs(self, pairs, set_option):
        idxes = choice(range(len(pairs)), self.batch_size)

        cur_pairs = pairs[idxes]
        cur_pairs_idx1 = cur_pairs[:, 0]
        cur_pairs_idx2 = cur_pairs[:, 1]

        default_angle1 = choice(self.angle, self.batch_size)
        default_scale1 = choice(self.scale, self.batch_size)
        default_xpos1 = choice(self.xpos, self.batch_size)
        default_ypos1 = choice(self.ypos, self.batch_size)

        default_angle2 = choice(self.angle, self.batch_size)
        default_scale2 = choice(self.scale, self.batch_size)
        default_xpos2 = choice(self.xpos, self.batch_size)
        default_ypos2 = choice(self.ypos, self.batch_size)

        angle1 = default_angle1
        angle2 = default_angle1
        angle3 = default_angle2
        angle4 = default_angle2
        scale1 = default_scale1
        scale2 = default_scale1
        scale3 = default_scale2
        scale4 = default_scale2

        xpos1 = default_xpos1
        xpos2 = default_xpos1
        xpos3 = default_xpos2
        xpos4 = default_xpos2
        ypos1 = default_ypos1
        ypos2 = default_ypos1
        ypos3 = default_ypos2
        ypos4 = default_ypos2

        if set_option != None:
            to_change = set_option
        else:
            to_change = choice(self.options)

        if to_change == "rotate":
            offset = choice(range(-2, 3), self.batch_size)

            angle1 = choice(self.angle, self.batch_size)
            angle2 = angle1 + offset
            angle2[angle2 < 0] += self.angle
            angle2[angle2 >= self.angle] -= self.angle

            angle3 = choice(range(self.angle), self.batch_size)
            angle4 = angle3 + offset
            angle4[angle4 < 0] += self.angle
            angle4[angle4 >= self.angle] -= self.angle
        elif to_change == "scale":
            offset = choice(range(-1, 2), self.batch_size)

            scale1 = choice(self.scale, self.batch_size)
            scale2 = scale1 + offset

            bound_idx = np.logical_or(scale2 < 0, scale2 >= self.scale)
            offset[bound_idx] *= -1
            scale2[bound_idx] = scale1[bound_idx] + offset[bound_idx]

            scale3 = choice(range(self.scale), self.batch_size)
            under_idx = np.logical_and(scale3 == 0, offset == -1)
            upper_idx = np.logical_and(scale3 == self.scale - 1, offset == 1)
            scale3[under_idx] = choice(range(1, self.scale), np.sum(under_idx))
            scale3[upper_idx] = choice(range(0, self.scale - 1), np.sum(upper_idx))
            scale4 = scale3 + offset
        elif to_change == "xpos":
            offset = choice(range(-1, 2), self.batch_size)

            xpos1 = choice(self.xpos, self.batch_size)
            xpos2 = xpos1 + offset

            bound_idx = np.logical_or(xpos2 < 0, xpos2 >= self.xpos)
            offset[bound_idx] *= -1
            xpos2[bound_idx] = xpos1[bound_idx] + offset[bound_idx]

            xpos3 = choice(range(self.xpos), self.batch_size)
            under_idx = np.logical_and(xpos3 == 0, offset == -1)
            upper_idx = np.logical_and(xpos3 == self.xpos - 1, offset == 1)
            xpos3[under_idx] = choice(range(1, self.xpos), np.sum(under_idx))
            xpos3[upper_idx] = choice(range(0, self.xpos - 1), np.sum(upper_idx))
            xpos4 = xpos3 + offset
        elif to_change == "ypos":
            offset = choice(range(-1, 2), self.batch_size)

            ypos1 = choice(self.ypos, self.batch_size)
            ypos2 = ypos1 + offset

            bound_idx = np.logical_or(ypos2 < 0, ypos2 >= self.ypos)
            offset[bound_idx] *= -1
            ypos2[bound_idx] = ypos1[bound_idx] + offset[bound_idx]

            ypos3 = choice(range(self.ypos), self.batch_size)
            under_idx = np.logical_and(ypos3 == 0, offset == -1)
            upper_idx = np.logical_and(ypos3 == self.ypos - 1, offset == 1)
            ypos3[under_idx] = choice(range(1, self.ypos), np.sum(under_idx))
            ypos3[upper_idx] = choice(range(0, self.ypos - 1), np.sum(upper_idx))
            ypos4 = ypos3 + offset
        else:
            raise Exception(" [!] Wrong option %s" % to_change)

        color1, shape1 = np.unravel_index(cur_pairs_idx1, [self.color, self.shape])
        color2, shape2 = np.unravel_index(cur_pairs_idx2, [self.color, self.shape])

        shape = self.data_shape[3:]
        idx1 = np.ravel_multi_index([color1, shape1, scale1, angle1, xpos1, ypos1], shape)
        idx2 = np.ravel_multi_index([color1, shape1, scale2, angle2, xpos2, ypos2], shape)
        idx3 = np.ravel_multi_index([color2, shape2, scale3, angle3, xpos3, ypos3], shape)
        idx4 = np.ravel_multi_index([color2, shape2, scale4, angle4, xpos4, ypos4], shape)

        a = np.rollaxis(self.data[:, :, :, idx1], 3)
        b = np.rollaxis(self.data[:, :, :, idx2], 3)
        c = np.rollaxis(self.data[:, :, :, idx3], 3)
        d = np.rollaxis(self.data[:, :, :, idx4], 3)

        return a, b, c, d

Example 22

Project: bayespy Source File: stochastic.py
Function: set_mask
    def _set_mask(self, mask):
        self.mask = np.logical_or(mask, self.observed)

Example 23

Project: python-gsw Source File: conversions.py
@match_args_return
def CT_from_t(SA, t, p):
    """
    Calculates Conservative Temperature of seawater from in situ
    temperature.

    Parameters
    ----------
    SA : array_like
         Absolute salinity [g kg :sup:`-1`]
    t : array_like
        in situ temperature [:math:`^\circ` C (ITS-90)]
    p : array_like
        pressure [dbar]

    Returns
    -------
    TODO

    Examples
    --------
    TODO

    References
    ----------
    .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
       of seawater - 2010: Calculation and use of thermodynamic properties.
       Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
       UNESCO (English), 196 pp. See section 3.3.
    """

    # Find values that are out of range, set them to NaN.
    invalid = np.logical_and(p < 100, np.logical_or(t > 80, t < -12))
    t[invalid] = np.ma.masked

    invalid = np.logical_and(p >= 100, np.logical_or(t > 40, t < -12))
    t[invalid] = np.ma.masked

    pt0 = pt0_from_t(SA, t, p)
    CT = CT_from_pt(SA, pt0)

    return CT

Example 24

Project: smop Source File: core.py
def logical_or(a,b):
    return numpy.logical_or(a,b)

Example 25

Project: visual-analogy-tensorflow Source File: loader.py
  def get_set_from_pairs(self, pairs, set_option):
    idxes = choice(range(len(pairs)), self.batch_size)

    cur_pairs = pairs[idxes]
    cur_pairs_idx1 = cur_pairs[:,0]
    cur_pairs_idx2 = cur_pairs[:,1]

    default_angle1 = choice(self.angle, self.batch_size)
    default_scale1 = choice(self.scale, self.batch_size)
    default_xpos1 = choice(self.xpos, self.batch_size)
    default_ypos1 = choice(self.ypos, self.batch_size)

    default_angle2 = choice(self.angle, self.batch_size)
    default_scale2 = choice(self.scale, self.batch_size)
    default_xpos2 = choice(self.xpos, self.batch_size)
    default_ypos2 = choice(self.ypos, self.batch_size)

    angle1 = default_angle1
    angle2 = default_angle1
    angle3 = default_angle2
    angle4 = default_angle2
    scale1 = default_scale1
    scale2 = default_scale1
    scale3 = default_scale2
    scale4 = default_scale2

    xpos1 = default_xpos1
    xpos2 = default_xpos1
    xpos3 = default_xpos2
    xpos4 = default_xpos2
    ypos1 = default_ypos1
    ypos2 = default_ypos1
    ypos3 = default_ypos2
    ypos4 = default_ypos2

    if set_option != None:
      to_change = set_option
    else:
      to_change = choice(self.options)

    if to_change == "rotate":
      offset = choice(range(-2, 3), self.batch_size)

      angle1 = choice(self.angle, self.batch_size)
      angle2 = angle1 + offset
      angle2[angle2 < 0] += self.angle
      angle2[angle2 >= self.angle] -= self.angle

      angle3 = choice(range(self.angle), self.batch_size)
      angle4 = angle3 + offset
      angle4[angle4 < 0] += self.angle
      angle4[angle4 >= self.angle] -= self.angle
    elif to_change == "scale":
      offset = choice(range(-1, 2), self.batch_size)

      scale1 = choice(self.scale, self.batch_size)
      scale2 = scale1 + offset

      bound_idx = np.logical_or(scale2 < 0, scale2 >= self.scale)
      offset[bound_idx] *= -1
      scale2[bound_idx] = scale1[bound_idx] + offset[bound_idx]

      scale3 = choice(range(self.scale), self.batch_size)
      under_idx = np.logical_and(scale3 == 0, offset == -1)
      upper_idx = np.logical_and(scale3 == self.scale - 1, offset == 1) 
      scale3[under_idx] = choice(range(1, self.scale), np.sum(under_idx))
      scale3[upper_idx] = choice(range(0, self.scale - 1), np.sum(upper_idx))
      scale4 = scale3 + offset
    elif to_change == "xpos":
      offset = choice(range(-1, 2), self.batch_size)

      xpos1 = choice(self.xpos, self.batch_size)
      xpos2 = xpos1 + offset

      bound_idx = np.logical_or(xpos2 < 0, xpos2 >= self.xpos)
      offset[bound_idx] *= -1
      xpos2[bound_idx] = xpos1[bound_idx] + offset[bound_idx]

      xpos3 = choice(range(self.xpos), self.batch_size)
      under_idx = np.logical_and(xpos3 == 0, offset == -1)
      upper_idx = np.logical_and(xpos3 == self.xpos - 1, offset == 1) 
      xpos3[under_idx] = choice(range(1, self.xpos), np.sum(under_idx))
      xpos3[upper_idx] = choice(range(0, self.xpos - 1), np.sum(upper_idx))
      xpos4 = xpos3 + offset
    elif to_change == "ypos":
      offset = choice(range(-1, 2), self.batch_size)

      ypos1 = choice(self.ypos, self.batch_size)
      ypos2 = ypos1 + offset

      bound_idx = np.logical_or(ypos2 < 0, ypos2 >= self.ypos)
      offset[bound_idx] *= -1
      ypos2[bound_idx] = ypos1[bound_idx] + offset[bound_idx]

      ypos3 = choice(range(self.ypos), self.batch_size)
      under_idx = np.logical_and(ypos3 == 0, offset == -1)
      upper_idx = np.logical_and(ypos3 == self.ypos - 1, offset == 1) 
      ypos3[under_idx] = choice(range(1, self.ypos), np.sum(under_idx))
      ypos3[upper_idx] = choice(range(0, self.ypos - 1), np.sum(upper_idx))
      ypos4 = ypos3 + offset
    else:
      raise Exception(" [!] Wrong option %s" % to_change)
    
    color1, shape1 = np.unravel_index(cur_pairs_idx1, [self.color, self.shape])
    color2, shape2 = np.unravel_index(cur_pairs_idx2, [self.color, self.shape])

    shape = self.data_shape[3:]
    idx1 =  np.ravel_multi_index([color1, shape1, scale1, angle1, xpos1, ypos1], shape)
    idx2 =  np.ravel_multi_index([color1, shape1, scale2, angle2, xpos2, ypos2], shape)
    idx3 =  np.ravel_multi_index([color2, shape2, scale3, angle3, xpos3, ypos3], shape)
    idx4 =  np.ravel_multi_index([color2, shape2, scale4, angle4, xpos4, ypos4], shape)

    a = np.rollaxis(self.data[:,:,:,idx1], 3)
    b = np.rollaxis(self.data[:,:,:,idx2], 3)
    c = np.rollaxis(self.data[:,:,:,idx3], 3)
    d = np.rollaxis(self.data[:,:,:,idx4], 3)

    if False: # only sued for debugging
      t = strftime("%Y-%m-%d %H:%M:%S", gmtime())
      self._get_image(a, "test/%s_1.png" % t)
      self._get_image(b, "test/%s_2.png" % t)
      self._get_image(c, "test/%s_3.png" % t)
      self._get_image(d, "test/%s_4.png" % t)

    return a, b, c, d

Example 26

Project: yatsm Source File: roi.py
Function: extract_roi
def extract_roi(raster, vector, feature_prop=None, all_touched=False):
    """ Yield pixel data from ``src`` for ROIs in ``features``

    Args:
        raster (rasterio.RasterReader): The ``rasterio`` dataset used to
            extract training data values from
        vector (list[dict]): A list of features from a polygon vector file as
            GeoJSON-like
        feature_prop (str): The name of the attribute from ``features``
            containing the ROI labels
        all_touched (bool): Rasterization option that decides if all pixels
            touching the ROI should be included, or just pixels from within
            the ROI

    Returns:
        tuple (np.ndarray, np.ndarray, np.ndarray, np.ndarray): A tuple
            containing an array of ROI data from ``src`` (``band x n``), the
            ROI data label (``n``), and the X and Y coordinates of each data
            point (``n`` and ``n`` sized)

    """
    if not feature_prop:
        feature_prop = list(vector[0]['properties'].keys())[0]

    for feat in vector:
        geom = geom_shape(feat['geometry'])
        label = feat['properties'][feature_prop]
        bounds = tuple(geom.bounds)

        window = raster.window(*bounds, boundless=True)
        data = raster.read(window=window, boundless=True)
        shape = data.shape
        transform = raster.window_transform(window)

        roi = rasterize(
            [(feat['geometry'], 1)],
            out_shape=shape[1:],
            transform=transform,
            fill=0,
            all_touched=all_touched
        )

        mask = roi == 0
        if raster.nodata:
            mask = np.logical_or((data == raster.nodata).any(axis=0), mask)

        masked = np.ma.MaskedArray(
            data,
            mask=np.ones_like(data) * mask
        )

        ys, xs = np.where(~mask)
        coord_xs, coord_ys = transform * (xs, ys)

        masked = masked.compressed()
        npix = masked.size / shape[0]
        masked = masked.reshape((shape[0], npix))

        label = np.repeat(label, coord_ys.size)

        yield (masked, label, coord_xs, coord_ys, )

Example 27

Project: pandas-qt Source File: DataSearch.py
    def freeSearch(self, searchString):
        """Execute a free text search for all columns in the dataframe.

        Args:
            searchString (str): Any string which may be contained in any column.

        Returns:
            list: A list containing all indexes with filtered data. Matches will
                be `True`, the remaining items will be `False`. If the dataFrame
                is empty, an empty list will be returned.

        """

        if not self._dataFrame.empty:
            # set question to the indexes of data and set everything to false.
            question = self._dataFrame.index == -9999
            for column in self._dataFrame.columns:
                dfColumn = self._dataFrame[column]
                dfColumn = dfColumn.apply(unicode)

                question2 = dfColumn.str.contains(searchString, flags=re.IGNORECASE, regex=True, na=False)
                question = np.logical_or(question, question2)

            return question
        else:
            return []

Example 28

Project: pandas-qt Source File: DataSearch.py
Function: index_search
    def indexSearch(self, indexes):
        """Filters the data by a list of indexes.
        
        Args:
            indexes (list of int): List of index numbers to return.

        Returns:
            list: A list containing all indexes with filtered data. Matches will
                be `True`, the remaining items will be `False`. If the dataFrame
                is empty, an empty list will be returned.

        """
        if not self._dataFrame.empty:
            filter0 = self._dataFrame.index == -9999
            for index in indexes:
                filter1 = self._dataFrame.index == index
                filter0 = np.logical_or(filter0, filter1)

            return filter0
        else:
            return []

Example 29

Project: kaggle-heart Source File: nn_hough.py
def _multi_logical_or(*args):
    res = args[0]
    for arg in args[1:]:
       res = np.logical_or(res, arg)
    return res 

Example 30

Project: geopandas Source File: geoseries.py
Function: is_null
    def isnull(self):
        """Null values in a GeoSeries are represented by empty geometric objects"""
        non_geo_null = super(GeoSeries, self).isnull()
        val = self.apply(_is_empty)
        return np.logical_or(non_geo_null, val)

Example 31

Project: hyperspy Source File: bayesian_blocks.py
Function: validate_input
    def validate_input(self, t, x, sigma):
        if not np.all(np.logical_or(x == 0, x == 1)):
            raise ValueError("Regular events must have only 0 and 1 in x")

Example 32

Project: SCW Source File: datasets.py
def load_mnist():
    def pick(X, y):
        indices = np.logical_or(y==0, y==1)
        X = X.todense()
        X = X[indices]
        y = y[indices]
        y = utils.overwrite_labels(y)
        return X, y

    n_features = 784
    training_path = join(dataset_dir, "mnist")
    test_path = join(dataset_dir, "mnist.t")

    if not exists(training_path) or not exists(test_path):
        download_mnist()

    X, y = datasets.load_svmlight_file(training_path, n_features=n_features)
    training = pick(X, y)

    X, y = datasets.load_svmlight_file(test_path, n_features=n_features)
    test = pick(X, y)
    return training, test

Example 33

Project: veusz Source File: helpers.py
def generateValidDatasetParts(datasets, breakds=True):
    """Generator to return array of valid parts of datasets.

    if breakds is True:
      Yields new datasets between rows which are invalid
    else:
      Yields single, filtered dataset
    """

    # find NaNs and INFs in input dataset
    invalid = datasets[0].invalidDataPoints()
    minlen = invalid.shape[0]
    for ds in datasets[1:]:
        if isinstance(ds, DatasetBase) and not ds.empty():
            nextinvalid = ds.invalidDataPoints()
            minlen = min(nextinvalid.shape[0], minlen)
            invalid = N.logical_or(invalid[:minlen], nextinvalid[:minlen])

    if breakds:
        # return multiple datasets, breaking at invalid values

        # get indexes of invalid points
        indexes = invalid.nonzero()[0].tolist()

        # no bad points: optimisation
        if not indexes:
            yield datasets
            return

        # add on shortest length of datasets
        indexes.append(minlen)

        lastindex = 0
        for index in indexes:
            if index != lastindex:
                retn = []
                for ds in datasets:
                    if ds is not None and (
                            not isinstance(ds, DatasetBase) or
                            not ds.empty()):
                        retn.append(ds[lastindex:index])
                    else:
                        retn.append(None)
                yield retn
            lastindex = index+1

    else:
        # in this mode we return single datasets where the invalid
        # values are masked out

        if not N.any(invalid):
            yield datasets
            return

        valid = N.logical_not(invalid)
        retn = []
        for ds in datasets:
            if ds is None:
                retn.append(None)
            else:
                retn.append(ds[valid])
        yield retn

Example 34

Project: veusz Source File: pickable.py
    def pickIndex(self, oldindex, direction, bounds):
        info = PickInfo(self.widget, labels=self.labels)

        if self.widget.settings.hide:
            return info

        if self.xvals is None or self.yvals is None:
            return info

        if oldindex.index is None:
            # no explicit index, so find the closest location to the previous
            # independent variable value
            i = N.logical_not( N.logical_or(
                    self.xvals < oldindex.ivar, self.xvals > oldindex.ivar) )

            # and pick the next
            if oldindex.sign == 1:
                i = max(N.nonzero(i)[0])
            else:
                i = min(N.nonzero(i)[0])
        else:
            i = oldindex.index

        if direction == 'right':
            incr = oldindex.sign
        elif direction == 'left':
            incr = -oldindex.sign
        else:
            assert direction == 'right' or direction == 'left'

        i += incr

        # skip points that are outside of the bounds or are not finite
        while (i >= 0 and i < len(self.xscreen) and
                ( not N.isfinite(self.xscreen[i]) or
                  not N.isfinite(self.yscreen[i]) or
                  (self.xscreen[i] < bounds[0] or self.xscreen[i] > bounds[2] or
                   self.yscreen[i] < bounds[1] or self.yscreen[i] > bounds[3]) )):
            i += incr

        if i < 0 or i >= len(self.xscreen):
            return info

        info.screenpos = self.xscreen[i], self.yscreen[i]
        info.coords = self.xvals[i], self.yvals[i]
        info.index = Index(self.xvals[i], i, oldindex.sign)

        return info

Example 35

Project: research2epub Source File: findspaces.py
@cache
def find_borders(pages):
	lrtb = [0, 0, 0, 0]
	v0 = 0
	plt.figure(figsize=(10, 5))
	region = indices[0] < 0 # empty region
	for dim in 0, 1, 2, 3:
		last_v = None
		last_flat_i = 0
		print 'constraint for %d: ' % dim + ('indices[%d] >= shape[%d] - i' % (dim/2, dim/2) if dim % 2 == 1 else 'indices[%d] <= i' % (dim/2))
		values = []
		for i in range(shape[0] / 2):
			assert i < shape[0] / 3
			region_add = indices[dim/2] >= shape[dim/2] - i if dim % 2 == 1 else indices[dim/2] <= i
			region_next = numpy.logical_or(region, region_add)
			v = similar(pages, region_next)
			print i, v, v - v0, region_next.sum() * 1. / region_next.size
			values.append([i, v - v0])
			if last_v is None or last_v == v:
				last_flat_i = i
			last_v = v
			if v - v0 > 0.001: # cut at 10% tops
				print 'in dim %d, stopping at %d; reverting back to %d' % (dim, i, last_flat_i)
				i = last_flat_i
				lrtb[dim] = i
				region_add = indices[dim/2] >= shape[dim/2] - i if dim % 2 == 1 else indices[dim/2] <= i
				region = numpy.logical_or(region, region_add)
				v0 = similar(pages, region)
				break
		values = numpy.array(values)
		plt.plot(values[:,0], values[:,1], 'x-', label='dim=%d' % dim)
	plt.legend(loc='best', ncol=2)
	if debug:
		plt.savefig('borders.pdf', bbox_inches='tight')
	plt.close()
	return lrtb, region

Example 36

Project: landlab Source File: route_flow_dn.py
    def route_flow(self, **kwds):
        """Route surface-water flow over a landscape.

        Routes surface-water flow by (1) assigning to each node a single
        drainage direction, and then (2) adding up the number of nodes that
        contribute flow to each node on the grid (including the node itself).

        Stores as ModelGrid fields:

        -  Node array of receivers (nodes that receive flow), or ITS OWN ID if
           there is no receiver: *'flow__receiver_node'*
        -  Node array of drainage areas: *'drainage_area'*
        -  Node array of discharges: *'surface_water__discharge'*
        -  Node array of steepest downhill slopes:
           *'topographic__steepest_slope'*
        -  Node array containing downstream-to-upstream ordered list of node
           IDs: *'flow__upstream_node_order'*
        -  Node array containing ID of link that leads from each node to its
           receiver, or BAD_INDEX_VALUE if no link:
           *'flow__link_to_receiver_node'*
        -  Boolean node array of all local lows: *'flow__sink_flag'*

        Returns
        -------
        ModelGrid
            The modified grid object

        Examples
        --------
        >>> import numpy as np
        >>> from landlab import RasterModelGrid
        >>> from landlab.components.flow_routing import FlowRouter
        >>> mg = RasterModelGrid((5, 4), spacing=(1, 1))
        >>> elev = np.array([0.,  0.,  0., 0.,
        ...                  0., 21., 10., 0.,
        ...                  0., 31., 20., 0.,
        ...                  0., 32., 30., 0.,
        ...                  0.,  0.,  0., 0.])
        >>> _ = mg.add_field('node','topographic__elevation', elev)
        >>> mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
        >>> fr = FlowRouter(mg)
        >>> mg = fr.route_flow()
        >>> mg.at_node['flow__receiver_node'] # doctest: +NORMALIZE_WHITESPACE
        array([  0,  1,  2,  3,
                 4,  1,  2,  7,
                 8,  6,  6, 11,
                12, 10, 10, 15,
                16, 17, 18, 19])
        >>> mg.at_node['drainage_area'] # doctest: +NORMALIZE_WHITESPACE
        array([ 0.,  1.,  5.,  0.,
                0.,  1.,  5.,  0.,
                0.,  1.,  3.,  0.,
                0.,  1.,  1.,  0.,
                0.,  0.,  0.,  0.])

        Now let's change the cell area (100.) and the runoff rates:

        >>> mg = RasterModelGrid((5, 4), spacing=(10., 10))

        Put the data back into the new grid.

        >>> _ = mg.add_field('node','topographic__elevation', elev)
        >>> mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
        >>> fr = FlowRouter(mg)
        >>> runoff_rate = np.arange(mg.number_of_nodes)
        >>> _ = mg.add_field('node', 'water__unit_flux_in', runoff_rate,
        ...                  noclobber=False)
        >>> mg = fr.route_flow()
        >>> mg.at_node['surface_water__discharge'] # doctest: +NORMALIZE_WHITESPACE
        array([    0.,   500.,  5200.,     0.,
                   0.,   500.,  5200.,     0.,
                   0.,   900.,  3700.,     0.,
                   0.,  1300.,  1400.,     0.,
                   0.,     0.,     0.,     0.])

        """
        # this retained for back compatibility - method now set in __init__.
        if 'method' in kwds:
            warnings.warn("'method' should be set at initialization now. " +
                          "Please update your code.", DeprecationWarning)
            # raise NameError
            if kwds['method'] not in ('D8', 'D4'):
                raise ValueError('method not understood ({method})'.format(
                    method=method))
            else:
                self.method = kwds['method']
            if not self._is_raster:
                self.method = None

        if self._bc_set_code != self.grid.bc_set_code:
            self.updated_boundary_conditions()
            self._bc_set_code = self.grid.bc_set_code

        # We assume that elevations are provided in a field called
        # 'topographic__elevation'
        elevs = self._grid['node']['topographic__elevation']

        node_cell_area = self._grid.cell_area_at_node.copy()
        node_cell_area[self._grid.closed_boundary_nodes] = 0.
        # closed cells can't contribute

        # Calculate the downhill-positive slopes at the d8 active links
        if self.method == 'D8':
            link_slope = - self._grid._calculate_gradients_at_d8_active_links(
                elevs)
        else:
            link_slope = - self._grid.calc_grad_of_active_link(
                elevs)

        # Find the baselevel nodes
        (baselevel_nodes, ) = numpy.where(
            numpy.logical_or(self._grid.status_at_node == FIXED_VALUE_BOUNDARY,
                             self._grid.status_at_node == FIXED_GRADIENT_BOUNDARY))

        # Calculate flow directions
        if self.method == 'D4':
            num_d4_active = self._grid.number_of_active_links  # only d4
            receiver, steepest_slope, sink, recvr_link = \
                flow_direction_DN.flow_directions(elevs, self._active_links,
                                         self._activelink_tail[:num_d4_active],
                                         self._activelink_head[:num_d4_active],
                                         link_slope,
                                         grid=self._grid,
                                         baselevel_nodes=baselevel_nodes)
        else:  # Voronoi or D8
            receiver, steepest_slope, sink, recvr_link = \
                flow_direction_DN.flow_directions(elevs, self._active_links,
                                     self._activelink_tail,
                                     self._activelink_head, link_slope,
                                     grid=self._grid,
                                     baselevel_nodes=baselevel_nodes)

        # TODO: either need a way to calculate and return the *length* of the
        # flow links, OR the caller has to handle the raster / non-raster case.

        # Calculate drainage area, discharge, and ...
        a, q, s = flow_accuem_bw.flow_accuemulation(
            receiver, sink, node_cell_area=node_cell_area,
            runoff_rate=self._grid.at_node['water__unit_flux_in'])

        # added DEJH March 2014:
        # store the generated data in the grid
        self._grid['node']['drainage_area'][:] = a
        self._grid['node']['flow__receiver_node'][:] = receiver
        self._grid['node']['topographic__steepest_slope'][:] = steepest_slope
        self._grid['node']['surface_water__discharge'][:] = q
        self._grid['node']['flow__upstream_node_order'][:] = s
        self._grid['node']['flow__link_to_receiver_node'][:] = recvr_link
        self._grid['node']['flow__sink_flag'][:] = numpy.zeros_like(receiver,
                                                                    dtype=bool)
        self._grid['node']['flow__sink_flag'][sink] = True

        return self._grid

Example 37

Project: TheCannon Source File: apogee.py
Function: get_starmask
def get_starmask(ids, labels, aspcapflag, paramflag):
    """ Identifies which APOGEE objects have unreliable physical parameters,
    as laid out in Holzman et al 2015 and on the APOGEE DR12 website

    Parameters
    ----------
    data: np array
        all APOGEE DR12 IDs and labels

    Returns
    -------
    bad: np array
        mask where 1 corresponds to a star with unreliable parameters
    """
    # teff outside range (4000,6000) K and logg < 0
    teff = labels[0,:]
    bad_teff = np.logical_or(teff < 4000, teff > 6000)
    logg = labels[1,:]
    bad_logg = logg < 0
    cuts = bad_teff | bad_logg

    # STAR_WARN flag set (TEFF, LOGG, CHI2, COLORTE, ROTATION, SN)
    # M_H_WARN, ALPHAFE_WARN not included in the above, so do them separately
    star_warn = np.bitwise_and(aspcapflag, 2**7) != 0
    star_bad = np.bitwise_and(aspcapflag, 2**23) != 0
    feh_warn = np.bitwise_and(aspcapflag, 2**3) != 0
    alpha_warn = np.bitwise_and(aspcapflag, 2**4) != 0
    aspcapflag_bad = star_warn | star_bad | feh_warn | alpha_warn

    # separate element flags
    teff_flag = paramflag[:,0] != 0
    logg_flag = paramflag[:,1] != 0
    feh_flag = paramflag[:,3] != 0
    alpha_flag = paramflag[:,4] != 0
    paramflag_bad = teff_flag | logg_flag | feh_flag | alpha_flag

    return cuts | aspcapflag_bad | paramflag_bad 

Example 38

Project: TheCannon Source File: lamost.py
Function: get_starmask
def get_starmask(ids, labels, aspcapflag, paramflag):
    """ Identifies which APOGEE objects have unreliable physical parameters,
    as laid out in Holzman et al 2015 and on the APOGEE DR12 website

    Parameters
    ----------
    data: np array
        all APOGEE DR12 IDs and labels

    Returns
    -------
    bad: np array
        mask where 1 corresponds to a star with unreliable parameters
    """
    # teff outside range (4000,6000) K and logg < 0
    teff = labels[0,:]
    bad_teff = np.logical_or(teff < 4000, teff > 6000)
    logg = labels[1,:]
    bad_logg = logg < 0
    cuts = bad_teff | bad_logg

    # STAR_WARN flag set (TEFF, LOGG, CHI2, COLORTE, ROTATION, SN)
    # M_H_WARN, ALPHAFE_WARN not included in the above, so do them separately
    star_warn = np.bitwise_and(aspcapflag, 2**7) != 0
    star_bad = np.bitwise_and(aspcapflag, 2**23) != 0
    mh_warn = np.bitwise_and(aspcapflag, 2**3) != 0
    alpha_warn = np.bitwise_and(aspcapflag, 2**4) != 0
    aspcapflag_bad = star_warn | star_bad | mh_warn | alpha_warn

    # separate element flags
    teff_flag = paramflag[:,0] != 0
    logg_flag = paramflag[:,1] != 0
    mh_flag = paramflag[:,3] != 0
    alpha_flag = paramflag[:,4] != 0
    paramflag_bad = teff_flag | logg_flag | mh_flag | alpha_flag

    return cuts | aspcapflag_bad | paramflag_bad 

Example 39

Project: flopy Source File: util_list.py
    def check_kij(self):
        names = self.dtype.names
        if ('k' not in names) or ('i' not in names) or ('j' not in names):
            warnings.warn("MfList.check_kij(): index fieldnames \'k,i,j\' " +
                          "not found in self.dtype names: " + str(names))
            return
        nr, nc, nl, nper = self.model.get_nrow_ncol_nlay_nper()
        if (nl == 0):
            warnings.warn("MfList.check_kij(): unable to get dis info from " +
                          "model")
            return
        for kper in list(self.data.keys()):
            out_idx = []
            data = self[kper]
            if (data is not None):
                k = data['k']
                k_idx = np.where(np.logical_or(k < 0, k >= nl))
                if (k_idx[0].shape[0] > 0):
                    out_idx.extend(list(k_idx[0]))
                i = data['i']
                i_idx = np.where(np.logical_or(i < 0, i >= nr))
                if (i_idx[0].shape[0] > 0):
                    out_idx.extend(list(i_idx[0]))
                j = data['j']
                j_idx = np.where(np.logical_or(j < 0, j >= nc))
                if (j_idx[0].shape[0]):
                    out_idx.extend(list(j_idx[0]))

                if (len(out_idx) > 0):
                    warn_str = "MfList.check_kij(): warning the following " + \
                               "indices are out of bounds in kper " + \
                               str(kper) + ':\n'
                    for idx in out_idx:
                        d = data[idx]
                        warn_str += " {0:9d} {1:9d} {2:9d}\n".format(d['k']
                                                                     + 1, d[
                                                                         'i'] + 1,
                                                                     d[
                                                                         'j'] + 1)
                    warnings.warn(warn_str)

Example 40

Project: openfisca-core Source File: measure_performances.py
    def function(self, simulation, period):
        period = period.start.period('year').offset('first-of')
        depcom = simulation.calculate('depcom', period)
        return period, np.logical_or(startswith(depcom, '97'), startswith(depcom, '98'))

Example 41

Project: openfisca-core Source File: test_countries.py
    def function(self, simulation, period):
        period = period.start.period(u'year').offset('first-of')
        depcom = simulation.calculate('depcom', period)

        return period, np.logical_or(startswith(depcom, '97'), startswith(depcom, '98'))

Example 42

Project: pygmi Source File: equation_editor.py
Function: settings
    def settings(self):
        """ Settings """
        self.combobox.clear()
        self.combobox.addItem('all data')

        self.bandsall = []

        if 'Cluster' in self.indata:
            intype = 'Cluster'
        elif 'Raster' in self.indata:
            intype = 'Raster'
        else:
            self.parent.showprocesslog('No raster data')
            return

        indata = dataprep.merge(self.indata[intype])

        mask = indata[-1].data.mask
        for i in indata:
            mask = np.logical_or(mask, i.data.mask)
        for i in indata:
            i.data.mask = mask

        self.localdict = {}
        j = 0
        for i in indata:
            j += 1
            self.localdict['i'+str(j)] = i.data
            self.bands[i.dataid] = 'i'+str(j)
            self.bandsall.append(i.data)
            self.combobox.addItem(i.dataid)
        self.localdict['iall'] = np.ma.array(self.bandsall)

        self.bandsall = np.ma.array(self.bandsall)

        temp = self.exec_()

        if temp == 0:
            return

        self.equation = self.textbrowser.toPlainText()

        if self.equation == '':
            return

        neweq = self.eq_fix(indata)

        try:
            findat = ne.evaluate(neweq, self.localdict)
        except:
            QtGui.QMessageBox.warning(
                self.parent, 'Error',
                ' Nothing processed! Your equation most likely had an error.',
                QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
            return

        outdata = []

        if np.size(findat) == 1:
            QtGui.QMessageBox.warning(
                self.parent, 'Warning',
                ' Nothing processed! Your equation outputs a single ' +
                'value instead of a minimum of one band.',
                QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
            return
        elif len(findat.shape) == 2:
            findat[np.isnan(findat)] = indata[0].nullvalue
            mask = indata[0].data.mask
            findat[mask] = indata[0].nullvalue

            outdata = [copy.copy(indata[0])]
            outdata[0].data = np.ma.masked_equal(findat, indata[0].nullvalue)
            outdata[0].dataid = 'equation output'
        else:
            for i, findati in enumerate(findat):
                findat[i][np.isnan(findati)] = indata[i].nullvalue
                mask = indata[i].data.mask
                findat[i][mask] = indata[i].nullvalue
                outdata.append(copy.copy(indata[i]))
                outdata[-1].data = np.ma.masked_equal(findati,
                                                      indata[i].nullvalue)

        # This is needed to get rid of bad, unmasked values etc.
        for i, _ in enumerate(outdata):
            mask = np.logical_or(mask, np.isinf(outdata[i].data))
            mask = np.logical_or(mask, np.isnan(outdata[i].data))
            outdata[i].data.mask = mask
            outdata[i].data.fill_value = indata[i].data.fill_value

        self.outdata[intype] = outdata

        return True

Example 43

Project: pygmi Source File: graphs.py
    def update_hexbin(self, data1, data2):
        """
        Update the hexbin plot

        Parameters
        ----------
        data1 : PyGMI raster Data
            raster dataset to be used
        data2 : PyGMI raster Data
            raster dataset to be used
        """
        self.figure.clear()
        self.axes = self.figure.add_subplot(111)
        x = data1.copy()
        y = data2.copy()
        msk = np.logical_or(x.mask, y.mask)
        x.mask = msk
        y.mask = msk
        x = x.compressed()
        y = y.compressed()

        xmin = x.min()
        xmax = x.max()
        ymin = y.min()
        ymax = y.max()

        hbin = self.axes.hexbin(x, y, bins='log')
        self.axes.axis([xmin, xmax, ymin, ymax])
        self.axes.set_title('Hexbin Plot')
        cbar = self.figure.colorbar(hbin)
        cbar.set_label('log10(N)')

        self.figure.tight_layout()
        self.figure.canvas.draw()