numpy.ma.masked_where

Here are the examples of the python api numpy.ma.masked_where taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

107 Examples 7

3 Source : test_shape_base.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_axis_insertion_ma(self):
        def f1to2(x):
            """produces an assymmetric non-square matrix from x"""
            assert_equal(x.ndim, 1)
            res = x[::-1] * x[1:,None]
            return np.ma.masked_where(res%5==0, res)
        a = np.arange(6*3).reshape((6, 3))
        res = apply_along_axis(f1to2, 0, a)
        assert_(isinstance(res, np.ma.masked_array))
        assert_equal(res.ndim, 3)
        assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask)
        assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask)
        assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask)

    def test_tuple_func1d(self):

3 Source : test_old_ma.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_minmax(self):
        a = arange(1, 13).reshape(3, 4)
        amask = masked_where(a   <   5, a)
        assert_equal(amask.max(), a.max())
        assert_equal(amask.min(), 5)
        assert_((amask.max(0) == a.max(0)).all())
        assert_((amask.min(0) == [5, 6, 7, 8]).all())
        assert_(amask.max(1)[0].mask)
        assert_(amask.min(1)[0].mask)

    def test_nonzero(self):

3 Source : test_regression.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_mem_masked_where(self):
        # Ticket #62
        from numpy.ma import masked_where, MaskType
        a = np.zeros((1, 1))
        b = np.zeros(a.shape, MaskType)
        c = masked_where(b, a)
        a-c

    def test_masked_array_multiply(self):

3 Source : test_shape_base.py
with MIT License
from alvarobartt

    def test_axis_insertion_ma(self):
        def f1to2(x):
            """produces an asymmetric non-square matrix from x"""
            assert_equal(x.ndim, 1)
            res = x[::-1] * x[1:,None]
            return np.ma.masked_where(res%5==0, res)
        a = np.arange(6*3).reshape((6, 3))
        res = apply_along_axis(f1to2, 0, a)
        assert_(isinstance(res, np.ma.masked_array))
        assert_equal(res.ndim, 3)
        assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask)
        assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask)
        assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask)

    def test_tuple_func1d(self):

3 Source : extractProduct.py
with GNU General Public License v3.0
from aria-tools

    def __init__(self, data_array, prod_key, outname, verbose=None):
        # Pass inputs
        self.data_array = data_array
        self.prod_key = prod_key
        self.outname = outname
        self.verbose = verbose
        self.data_array_band=data_array.GetRasterBand(1).ReadAsArray()
        #mask by nodata value
        self.data_array_band=np.ma.masked_where(self.data_array_band ==
                self.data_array.GetRasterBand(1).GetNoDataValue(),
                self.data_array_band)

        logger.setLevel(logging.DEBUG) if self.verbose else ''

        # Run class
        self.__run__()

    def __truncateArray__(self, data_array_band, Xmask, Ymask):

3 Source : ops_math.py
with GNU Lesser General Public License v3.0
from brenthuisman

	def smudge(self,mskval,frac=1.):
		'''assume mskval must be ignored'''
		tmp = np.ma.masked_where(self.imdata == mskval, self.imdata)
		self.imdata[self.imdata != mskval] = tmp.mean()

	def tolowpass(self,threshold):

3 Source : test_stats.py
with MIT License
from buds-lab

    def test_masked_3d_array(self):
        ma = np.ma.masked_where(self.array_3d > 16, self.array_3d)
        gstd_actual = stats.gstd(ma, axis=2)
        gstd_desired = stats.gstd(self.array_3d, axis=2)
        mask = [[0, 0, 0], [0, 1, 1]]
        assert_allclose(gstd_actual, gstd_desired)
        assert_equal(gstd_actual.mask, mask)


def test_binomtest():

3 Source : test_matrix.py
with MIT License
from buds-lab

    def test_mask_input(self):
        kws = self.default_kws.copy()

        mask = self.x_norm > 0
        kws['mask'] = mask
        p = mat._HeatMapper(self.x_norm, **kws)
        plot_data = np.ma.masked_where(mask, self.x_norm)

        npt.assert_array_equal(p.plot_data, plot_data)

    def test_default_vlims(self):

3 Source : util_misc.py
with MIT License
from CIRADA-Tools

def nanmedian(arr, **kwargs):
    """
    Returns median ignoring NaNs.
    """
    
    return ma.median( ma.masked_where(arr!=arr, arr), **kwargs )


#-----------------------------------------------------------------------------#
def nanmean(arr, **kwargs):

3 Source : util_misc.py
with MIT License
from CIRADA-Tools

def nanmean(arr, **kwargs):
    """
    Returns mean ignoring NaNs.
    """
    
    return ma.mean( ma.masked_where(arr!=arr, arr), **kwargs )

#-----------------------------------------------------------------------------#
def nanstd(arr, **kwargs):

3 Source : util_misc.py
with MIT License
from CIRADA-Tools

def nanstd(arr, **kwargs):
    """
    Returns standard deviation ignoring NaNs.
    """
    
    return ma.std( ma.masked_where(arr!=arr, arr), **kwargs )

#-----------------------------------------------------------------------------#
def extrap(x, xp, yp):

3 Source : test_divergences.py
with BSD 3-Clause "New" or "Revised" License
from coarse-graining

def test_js_divergence_2():
    # Tests the calculation of JS divergence for two random distributions with
    # zeros using masked arrays

    # This is the same test as test_js_divergence_1, just done using numpy
    # operations rather than loops
    dist1_masked = np.ma.masked_where(dist1 == 0, dist1)
    dist2_masked = np.ma.masked_where(dist2 == 0, dist2)
    elementwise_mean = 0.5 * (dist1_masked + dist2_masked)
    summand = 0.5 * (dist1_masked * np.ma.log(dist1_masked / elementwise_mean))
    summand += 0.5 * \
        ((dist2_masked * np.ma.log(dist2_masked / elementwise_mean)))
    manual_div = np.ma.sum(summand)

    cgnet_div = js_divergence(dist1, dist2)
    np.testing.assert_allclose(manual_div, cgnet_div)


def test_full_discrete_distr_intersection():

3 Source : test_matrix.py
with Apache License 2.0
from dashanji

    def test_mask_input(self, dtype):
        kws = self.default_kws.copy()

        mask = self.x_norm > 0
        kws['mask'] = mask
        data = self.x_norm.astype(dtype)
        p = mat._HeatMapper(data, **kws)
        plot_data = np.ma.masked_where(mask, data)

        npt.assert_array_equal(p.plot_data, plot_data)

    def test_mask_limits(self):

3 Source : test_hist.py
with BSD 3-Clause "New" or "Revised" License
from earthlab

def test_hist_masked_array(image_array_2bands):
    """ Test that a masked 2 band array plots properly"""
    masked_arr = np.ma.masked_where(
        image_array_2bands == 6, image_array_2bands
    )
    nbins = 6
    f, ax = ep.hist(masked_arr, bins=nbins)
    for a in ax:
        assert len(a.patches) == 6
    assert len(f.axes) == 2
    plt.close(f)


def test_hist_1band_masked_array(image_array_single_band):

3 Source : test_hist.py
with BSD 3-Clause "New" or "Revised" License
from earthlab

def test_hist_1band_masked_array(image_array_single_band):
    """Ensure that a masked single band arr plots & the number of bins is
    correct"""
    masked_arr = np.ma.masked_where(
        image_array_single_band == 4, image_array_single_band
    )
    nbins = 3
    f, ax = ep.hist(masked_arr, title=["TITLE HERE"], bins=nbins)
    assert len(f.axes) == 1
    assert len(ax.patches) == nbins
    plt.close(f)


def test_hist_1band_range(image_array_single_band):

3 Source : test_hist.py
with BSD 3-Clause "New" or "Revised" License
from earthlab

def test_hist_fully_masked_array(image_array_single_band):
    masked_arr = np.ma.masked_where(
        image_array_single_band >= 0, image_array_single_band
    )
    with pytest.raises(ValueError, match="is not finite"):
        f, ax = ep.hist(masked_arr)
        plt.close(f)


def test_hist_partially_masked_array(image_array_3bands):

3 Source : test_mask.py
with BSD 3-Clause "New" or "Revised" License
from earthlab

def test_masked_arr_provided(im, im_mask):
    """If a masked array is provided, mask returns masked array."""

    masked_input = np.ma.masked_where(im_mask == 0, im)
    out = mask_pixels(masked_input, im_mask, vals=[1])
    expected_in_mask = np.array([[True, True, False], [False, False, False]])
    expected_out_mask = np.array([[True, True, True], [True, False, False]])

    assert np.array_equal(expected_in_mask, masked_input.mask)
    assert np.array_equal(expected_out_mask, out.mask)


def test_boolean_mask(im, im_mask):

3 Source : test_plot_rgb.py
with BSD 3-Clause "New" or "Revised" License
from earthlab

def test_masked_im(rgb_image):
    """Test that a masked image will be plotted using an alpha channel.
    Thus it should return an array that has a 4th dimension representing
    the alpha channel."""

    im, _ = rgb_image
    im_ma = ma.masked_where(im > 140, im)

    ax = plot_rgb(im_ma)
    im_plot = ax.get_images()[0].get_array()
    assert im_plot.shape[2] == 4
    plt.close()


def test_ticks_off(rgb_image):

3 Source : _gridprop_import_grdecl.py
with GNU Lesser General Public License v3.0
from equinor

def import_grdecl_prop(pfile, name, grid):
    """Read a GRDECL ASCII property record"""
    result = dict()
    result["ncol"] = grid.ncol
    result["nrow"] = grid.nrow
    result["nlay"] = grid.nlay
    result["name"] = name
    result["filesrc"] = pfile
    actnumv = grid.get_actnum().values

    result["values"] = ma.masked_where(
        actnumv == 0, read_grdecl_3d_property(pfile.file, name, grid.dimensions, float)
    )
    return result

3 Source : xsection.py
with GNU Lesser General Public License v3.0
from equinor

    def _plot_well_traj(self, ax, zv, hv, welltrajcolor, linewidth):
        """Plot the trajectory as a black line."""
        zv_copy = ma.masked_where(zv   <   self._zmin, zv)
        hv_copy = ma.masked_where(zv  <  self._zmin, hv)

        ax.plot(hv_copy, zv_copy, linewidth=linewidth, c=welltrajcolor)

    @staticmethod

3 Source : cdl_upscale.py
with MIT License
from facebookresearch

def upscaled_cdl_postprocess(in_file, out_dir, out_file, threshold=0.0):
    fh_in = Dataset(in_file, 'r')

    cdl_fraction_1 = fh_in.variables['cdl_fraction_1'][:]
    kept_cdls = ma.masked_where(cdl_fraction_1   <   threshold, fh_in.variables['cdl_1'][:])
    cdl_id, cdl_count = np.unique(kept_cdls.compressed(), return_counts=True)
    for i, c in zip(cdl_id, cdl_count):
        print(cdl_values_to_crops[int(i)], c)


if __name__ == "__main__":

3 Source : test_regression.py
with MIT License
from ktraunmueller

    def test_mem_masked_where(self,level=rlevel):
        """Ticket #62"""
        from numpy.ma import masked_where, MaskType
        a = np.zeros((1, 1))
        b = np.zeros(a.shape, MaskType)
        c = masked_where(b, a)
        a-c

    def test_masked_array_multiply(self,level=rlevel):

3 Source : pfp_plot.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def get_yarray(ds,ThisOne):
    yarray = numpy.ma.masked_where(abs(ds.series[ThisOne]['Data']-float(c.missing_value))  <  c.eps,
                                        ds.series[ThisOne]['Data'])
    nRecs = numpy.ma.size(yarray)
    nNotM = numpy.ma.count(yarray)
    nMskd = numpy.ma.count_masked(yarray)
    if numpy.ma.count(yarray)==0:
        yarray = numpy.ma.zeros(numpy.size(yarray))
    return yarray,nRecs,nNotM,nMskd

def get_yaxislimitsfromcf(cf, nFig, maxkey, minkey, nSer, YArray):

3 Source : pfp_ts.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def TransformAlternate(TList,DateTime,Series,ts=30):
    # Apply polynomial transform to data series being used as replacement data for gap filling
    si = pfp_utils.GetDateIndex(DateTime,TList[0],ts=ts,default=0,match='exact')
    ei = pfp_utils.GetDateIndex(DateTime,TList[1],ts=ts,default=-1,match='exact')
    Series = numpy.ma.masked_where(abs(Series-float(c.missing_value))  <  c.eps,Series)
    Series[si:ei] = pfp_utils.polyval(TList[2],Series[si:ei])
    Series = numpy.ma.filled(Series,float(c.missing_value))

3 Source : test_old_ma.py
with Apache License 2.0
from pierreant

    def test_minmax(self):
        a = arange(1, 13).reshape(3, 4)
        amask = masked_where(a   <   5, a)
        self.assertEqual(amask.max(), a.max())
        self.assertEqual(amask.min(), 5)
        self.assertTrue((amask.max(0) == a.max(0)).all())
        self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all())
        self.assertTrue(amask.max(1)[0].mask)
        self.assertTrue(amask.min(1)[0].mask)

    def test_nonzero(self):

3 Source : test_regression.py
with Apache License 2.0
from pierreant

    def test_mem_masked_where(self,level=rlevel):
        # Ticket #62
        from numpy.ma import masked_where, MaskType
        a = np.zeros((1, 1))
        b = np.zeros(a.shape, MaskType)
        c = masked_where(b, a)
        a-c

    def test_masked_array_multiply(self,level=rlevel):

3 Source : LST.py
with MIT License
from richieBao

    def NDVI(self,RED,NIR):
        RED=np.ma.masked_where(NIR+RED==0,RED)
        NDVI=(NIR-RED)/(NIR+RED)
        NDVI=NDVI.filled(-999)
#        print(NDVI)
        print("!"+"_min:%f,max:%f"%(NDVI.min(),NDVI.max()))
        return NDVI
    
# 计算 Land Surface Emissivity (LSE):  
    def LSE(self,NDVI):

3 Source : datadict.py
with MIT License
from toolsforexperiments

    def mask_invalid(self: T) -> T:
        """
        Mask all invalid data in all values.
        :return: copy of the dataset with invalid entries (nan/None) masked.
        """
        ret = self.copy()
        for d, _ in self.data_items():
            arr = self.data_vals(d)
            vals = np.ma.masked_where(num.is_invalid(arr), arr, copy=True)
            try:
                vals.fill_value = np.nan
            except TypeError:
                vals.fill_value = -9999
            ret[d]['values'] = vals

        return ret


class DataDict(DataDictBase):

3 Source : insects.py
with MIT License
from tukiains

def _get_smoothed_v(obs, sigma=(5, 5)):
    smoothed_v = gaussian_filter(obs.v, sigma)
    smoothed_v = ma.masked_where(obs.v.mask, smoothed_v)
    return smoothed_v


def _calc_prob_from_ldr(prob):

3 Source : drizzle.py
with MIT License
from tukiains

def _append_data(drizzle_data, results):
    """Save retrieved fields to the drizzle_data object."""
    for key, value in results.items():
        value = ma.masked_where(value == 0, value)
        drizzle_data.append_data(value, key)


DRIZZLE_ATTRIBUTES = {

3 Source : test_droplet.py
with MIT License
from tukiains

def test_intepolate_lwp():
    class Obs:
        def __init__(self):
            self.time = np.arange(11)
            self.lwp_orig = np.linspace(1, 11, 11)
            self.lwp = ma.masked_where(self.lwp_orig % 2 == 0, self.lwp_orig)
    obs = Obs()
    lwp_interpolated = droplet.interpolate_lwp(obs)
    assert_array_equal(obs.lwp_orig, lwp_interpolated)


@pytest.mark.parametrize("is_freezing, top_above, result", [

2 Source : pfp_plot.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def plot_fcvsustar(ds):
    """
    Purpose:
     Plots Fc versus u* for each year and for each season
     (summer=DJF, autumn=MAM, winter=JJA, spring=SON) in
     each year.
    """
    site_name = ds.globalattributes["site_name"]
    nrecs = int(ds.globalattributes["nc_nrecs"])
    ts = int(ds.globalattributes["time_step"])
    ldt = pfp_utils.GetVariable(ds, "DateTime")
    nbins = 20
    # plot each year
    plt.ion()
    start_year = ldt["Data"][0].year
    end_year = ldt["Data"][-1].year
    logger.info(" Doing annual Fc versus u* plots")
    for year in range(start_year, end_year+1):
        # get the start and end datetimes
        start = datetime.datetime(year, 1, 1, 0, 30, 0)
        end = datetime.datetime(year+1, 1, 1, 0, 0, 0)
        # get the variables from the data structure
        Fc = pfp_utils.GetVariable(ds, "Fc", start=start, end=end)
        Fsd = pfp_utils.GetVariable(ds, "Fsd", start=start, end=end)
        ustar = pfp_utils.GetVariable(ds, "ustar", start=start, end=end)
        # get the observations and night time filters
        obs = (Fc["Flag"] == 0) & (ustar["Flag"] == 0)
        night = (Fsd["Data"]   <  = 10)
        obs_night_filter = obs & night
        # mask anything that is not an observation and at night
        ustar["Data"] = numpy.ma.masked_where(obs_night_filter == False, ustar["Data"])
        Fc["Data"] = numpy.ma.masked_where(obs_night_filter == False, Fc["Data"])
        # get mask when either ustar or Fc masked
        mask = numpy.ma.mask_or(numpy.ma.getmaskarray(ustar["Data"]), numpy.ma.getmaskarray(Fc["Data"]))
        # apply mask
        ustar["Data"] = numpy.ma.masked_where(mask == True, ustar["Data"])
        Fc["Data"] = numpy.ma.masked_where(mask == True, Fc["Data"])
        # remove masked elements
        ustar["Data"] = numpy.ma.compressed(ustar["Data"])
        Fc["Data"] = numpy.ma.compressed(Fc["Data"])
        # get the binned statistics
        count, edges, numbers = stats.binned_statistic(ustar["Data"],Fc["Data"], statistic='count', bins=nbins)
        means, edges, numbers = stats.binned_statistic(ustar["Data"],Fc["Data"], statistic='mean', bins=nbins)
        stdevs, edges, numbers = stats.binned_statistic(ustar["Data"],Fc["Data"], statistic='std', bins=nbins)
        mids = (edges[:-1]+edges[1:])/2
        # drop bins with less than 10 counts
        mids = numpy.array(mids[count >= 10])
        means = numpy.array(means[count >= 10])
        stdevs = numpy.array(stdevs[count >= 10])
        # do the plot
        fig = plt.figure()
        fig.canvas.set_window_title("Fc versus u*: "+str(year))
        plt.plot(ustar["Data"], Fc["Data"], 'b.', alpha=0.25)
        plt.errorbar(mids, means, yerr=stdevs, fmt='ro')
        plt.xlabel("u* ("+ustar["Attr"]["units"]+")")
        plt.ylabel("Fc ("+Fc["Attr"]["units"]+")")
        plt.title(site_name+": "+str(year))
        plt.draw()
    # plot 4 seasons for each year
    logger.info(" Doing seasonal Fc versus u* plots")
    seasons = {"summer":[12, 1, 2], "autumn":[3, 4, 5], "winter":[6, 7, 8], "spring":[9, 10, 11]}
    nrows = 2
    ncols = 2
    for year in range(start_year, end_year+1):
        fig, axs = plt.subplots(nrows=nrows, ncols=ncols)
        fig.canvas.set_window_title("Fc versus u*: "+str(year))
        for n, season in enumerate(["Summer", "Autumn", "Winter", "Spring"]):
            col = numpy.mod(n, ncols)
            row = n/ncols
            if season == "Summer":
                start = datetime.datetime(year-1, 12, 1, 0, 0, 0) + datetime.timedelta(minutes=ts)
                end = datetime.datetime(year, 3, 1, 0, 0, 0)
            elif season == "Autumn":
                start = datetime.datetime(year, 3, 1, 0, 0, 0) + datetime.timedelta(minutes=ts)
                end = datetime.datetime(year, 6, 1, 0, 0, 0)
            elif season == "Winter":
                start = datetime.datetime(year, 6, 1, 0, 0, 0) + datetime.timedelta(minutes=ts)
                end = datetime.datetime(year, 9, 1, 0, 0, 0)
            elif season == "Spring":
                start = datetime.datetime(year, 9, 1, 0, 0, 0) + datetime.timedelta(minutes=ts)
                end = datetime.datetime(year, 12, 1, 0, 0, 0)
            if (end  <  ldt["Data"][0]) or (start > ldt["Data"][-1]):
                fig.delaxes(axs[row, col])
                continue
            # get the variables from the data structure
            Fc = pfp_utils.GetVariable(ds, "Fc", start=start, end=end)
            Fsd = pfp_utils.GetVariable(ds, "Fsd", start=start, end=end)
            ustar = pfp_utils.GetVariable(ds, "ustar", start=start, end=end)
            # get the observations and night time filters
            obs = (Fc["Flag"] == 0) & (ustar["Flag"] == 0)
            night = (Fsd["Data"]  < = 10)
            obs_night_filter = obs & night
            # mask anything that is not an observation and at night
            ustar["Data"] = numpy.ma.masked_where(obs_night_filter == False, ustar["Data"])
            Fc["Data"] = numpy.ma.masked_where(obs_night_filter == False, Fc["Data"])
            # get mask when either ustar or Fc masked
            mask = numpy.ma.mask_or(numpy.ma.getmaskarray(ustar["Data"]), numpy.ma.getmaskarray(Fc["Data"]))
            # apply mask
            ustar["Data"] = numpy.ma.masked_where(mask == True, ustar["Data"])
            Fc["Data"] = numpy.ma.masked_where(mask == True, Fc["Data"])
            # remove masked elements
            ustar["Data"] = numpy.ma.compressed(ustar["Data"])
            Fc["Data"] = numpy.ma.compressed(Fc["Data"])
            # if no data, skip this plot and delete the axes
            if (len(ustar["Data"]) == 0) or (len(Fc["Data"]) == 0):
                fig.delaxes(axs[row, col])
                continue
            # get the binned statistics
            count, edges, numbers = stats.binned_statistic(ustar["Data"],Fc["Data"], statistic='count', bins=nbins)
            means, edges, numbers = stats.binned_statistic(ustar["Data"],Fc["Data"], statistic='mean', bins=nbins)
            stdevs, edges, numbers = stats.binned_statistic(ustar["Data"],Fc["Data"], statistic='std', bins=nbins)
            mids = (edges[:-1]+edges[1:])/2
            # drop bins with less than 10 counts
            mids = numpy.array(mids[count >= 10])
            means = numpy.array(means[count >= 10])
            stdevs = numpy.array(stdevs[count >= 10])
            axs[row, col].plot(ustar["Data"], Fc["Data"], 'b.', alpha=0.25)
            axs[row, col].errorbar(mids, means, yerr=stdevs, fmt='ro')
            axs[row, col].set_title(site_name+": "+str(year)+" "+season)
            axs[row, col].set_xlabel("u* ("+ustar["Attr"]["units"]+")")
            axs[row, col].set_ylabel("Fc ("+Fc["Attr"]["units"]+")")
        fig.tight_layout()
        plt.draw()
    plt.ioff()
    return

def pltfingerprint_createdict(cf,ds):

2 Source : pfp_plot.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def plottimeseries(cf, nFig, dsa, dsb):
    SiteName = dsa.globalattributes['site_name']
    Level = dsb.globalattributes['nc_level']
    ts = int(dsa.globalattributes['time_step'])
    ldt = dsa.series["DateTime"]["Data"]
    Month = ldt[0].month
    Hdh = [dt.hour+(dt.minute+dt.second/float(60))/float(60) for dt in ldt]
    p = plot_setup(cf,nFig)
    logger.info(' Plotting series: '+str(p['SeriesList']))
    L1XArray = dsa.series['DateTime']['Data']
    L2XArray = dsb.series['DateTime']['Data']
    p['XAxMin'] = min(L2XArray)
    p['XAxMax'] = max(L2XArray)
    p['loc'],p['fmt'] = get_ticks(p['XAxMin'],p['XAxMax'])
    plt.ion()
    # check to see if a figure with the same title already exists
    fig_titles = []
    # get a list of figure titles
    for i in plt.get_fignums():
        # get the figure
        figa = plt.figure(i)
        # get the figure title
        fig_title = figa.texts[0].get_text()
        # strip out the site name
        idx = fig_title.index(":")
        # and append to the figure title list
        fig_titles.append(fig_title[idx+2:])
    # check to see if a figure with the same title already exists
    if p['PlotDescription'] in fig_titles:
        # if it does, get the figure number (figure numbers start from 1)
        fig_num = fig_titles.index(p['PlotDescription']) + 1
        # get the figure
        fig = plt.figure(fig_num)
        # clear the figure (we should only update axes, not the whole figure)
        fig.clf()
    else:
        # create the figure if it doesn't already exist
        fig = plt.figure(figsize=(p['PlotWidth'],p['PlotHeight']))
    fig.canvas.set_window_title(p['PlotDescription'])
    plt.figtext(0.5,0.95,SiteName+': '+p['PlotDescription'],ha='center',size=16)
    for ThisOne, n in zip(p['SeriesList'],range(p['nGraphs'])):
        if ThisOne in dsa.series.keys():
            aflag = dsa.series[ThisOne]['Flag']
            p['Units'] = dsa.series[ThisOne]['Attr']['units']
            p['YAxOrg'] = p['ts_YAxOrg'] + n*p['yaxOrgOffset']
            L1YArray,p['nRecs'],p['nNotM'],p['nMskd'] = get_yarray(dsa, ThisOne)
            # check the control file to see if the Y axis minima have been specified
            nSer = p['SeriesList'].index(ThisOne)
            p['LYAxMax'],p['LYAxMin'] = get_yaxislimitsfromcf(cf,nFig,'YLMax','YLMin',nSer,L1YArray)
            plot_onetimeseries_left(fig,n,ThisOne,L1XArray,L1YArray,p)
        if ThisOne in dsb.series.keys():
            bflag = dsb.series[ThisOne]['Flag']
            p['Units'] = dsb.series[ThisOne]['Attr']['units']
            p['YAxOrg'] = p['ts_YAxOrg'] + n*p['yaxOrgOffset']
            #Plot the Level 2 data series on the same X axis but with the scale on the right Y axis.
            L2YArray,p['nRecs'],p['nNotM'],p['nMskd'] = get_yarray(dsb, ThisOne)
            # check the control file to see if the Y axis minima have been specified
            nSer = p['SeriesList'].index(ThisOne)
            p['RYAxMax'],p['RYAxMin'] = get_yaxislimitsfromcf(cf,nFig,'YRMax','YRMin',nSer,L2YArray)
            plot_onetimeseries_right(fig,n,ThisOne,L2XArray,L2YArray,p)

            #Plot the diurnal averages.
            Hr2, Av2, Sd2, Mx2, Mn2 = get_diurnalstats(Hdh, dsb.series[ThisOne]['Data'], ts)
            Av2 = numpy.ma.masked_where(Av2==c.missing_value,Av2)
            Sd2 = numpy.ma.masked_where(Sd2==c.missing_value,Sd2)
            Mx2 = numpy.ma.masked_where(Mx2==c.missing_value,Mx2)
            Mn2 = numpy.ma.masked_where(Mn2==c.missing_value,Mn2)
            hr2_ax = fig.add_axes([p['hr1_XAxOrg'],p['YAxOrg'],p['hr2_XAxLen'],p['ts_YAxLen']])
            #hr2_ax.hold(True)
            hr2_ax.plot(Hr2,Av2,'y-',Hr2,Mx2,'r-',Hr2,Mn2,'b-')
            section = pfp_utils.get_cfsection(cf, ThisOne, mode='quiet')
            if section != None:
                if 'DiurnalCheck' in cf[section][ThisOne].keys():
                    NSdarr = numpy.array(pfp_ck.parse_rangecheck_limit(cf[section][ThisOne]['DiurnalCheck']['numsd']))
                    nSd = NSdarr[Month-1]
                    hr2_ax.plot(Hr2,Av2+nSd*Sd2,'r.',Hr2,Av2-nSd*Sd2,'b.')
            plt.xlim(0,24)
            plt.xticks([0,6,12,18,24])
            if n==0:
                hr2_ax.set_xlabel('Hour',visible=True)
            else:
                hr2_ax.set_xlabel('',visible=False)
                plt.setp(hr2_ax.get_xticklabels(), visible=False)
            #if n > 0: plt.setp(hr2_ax.get_xticklabels(), visible=False)

            # vertical lines to show frequency distribution of flags
            bins = numpy.arange(0.5,23.5)
            ind = bins[:len(bins)-1]+0.5
            index = numpy.where(numpy.mod(bflag,10)==0)    # find the elements with flag = 0, 10, 20 etc
            bflag[index] = 0                               # set them all to 0
            hist, bin_edges = numpy.histogram(bflag, bins=bins)
            ymin = hist*0
            delta = 0.01*(numpy.max(hist)-numpy.min(hist))
            bar_ax = fig.add_axes([p['hr2_XAxOrg'],p['YAxOrg'],p['bar_XAxLen'],p['ts_YAxLen']])
            bar_ax.set_ylim(0,numpy.max([1,numpy.max(hist)]))
            bar_ax.vlines(ind,ymin,hist)
            for i,j in zip(ind,hist):
                if j>0.05*numpy.max(hist): bar_ax.text(i,j+delta,str(int(i)),ha='center',size='small')
            if n==0:
                bar_ax.set_xlabel('Flag',visible=True)
            else:
                bar_ax.set_xlabel('',visible=False)
                plt.setp(bar_ax.get_xticklabels(), visible=False)
            #if n > 0: plt.setp(bar_ax.get_xticklabels(), visible=False)
        else:
            logger.error('  plttimeseries: series '+ThisOne+' not in data structure')
    #fig.show()
    plt.draw()
    mypause(0.5)
    if "plot_path" in cf["Files"]:
        plot_path = os.path.join(cf["Files"]["plot_path"],Level)
    else:
        plot_path = "plots/"
    if not os.path.exists(plot_path):
        os.makedirs(plot_path)
    fname = os.path.join(plot_path, SiteName.replace(' ','')+'_'+Level+'_'+p['PlotDescription'].replace(' ','')+'.png')
    fig.savefig(fname,format='png')

def plot_quickcheck_seb(nFig, plot_title, figure_name, data, daily):

2 Source : pfp_plot.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def plot_quickcheck_get_seb(daily):
    # get the SEB ratio
    # get the daytime data, defined by Fsd>10 W/m2
    nm = daily["night_mask"]["Data"]
    Fa_daily = daily["Fa"]["Data"]
    Fe_daily = daily["Fe"]["Data"]
    Fh_daily = daily["Fh"]["Data"]
    Fa_day = numpy.ma.masked_where(nm == True, Fa_daily)
    Fe_day = numpy.ma.masked_where(nm == True, Fe_daily)
    Fh_day = numpy.ma.masked_where(nm == True, Fh_daily)
    # mask based on dependencies, set all to missing if any missing
    mask = numpy.ma.mask_or(Fa_day.mask, Fe_day.mask)
    mask = numpy.ma.mask_or(mask, Fh_day.mask)
    # apply the mask
    Fa_day = numpy.ma.array(Fa_day, mask=mask)
    Fe_day = numpy.ma.array(Fe_day, mask=mask)
    Fh_day = numpy.ma.array(Fh_day, mask=mask)
    # get the daily averages
    Fa_day_avg = numpy.ma.average(Fa_day, axis=1)
    Fe_day_avg = numpy.ma.average(Fe_day, axis=1)
    Fh_day_avg = numpy.ma.average(Fh_day, axis=1)
    SEB = {"label": "(Fh+Fe)/Fa"}
    # get the number of values in the daily average
    SEB["Count"] = numpy.ma.count(Fh_day, axis=1)
    # get the SEB ratio
    SEB["Avg"] = (Fe_day_avg + Fh_day_avg)/Fa_day_avg
    SEB["Avg"] = numpy.ma.masked_where(SEB["Count"]   <  = 5, SEB["Avg"])
    idx = numpy.where(numpy.ma.getmaskarray(SEB["Avg"]) == True)[0]
    SEB["Count"][idx] = 0
    return SEB

def plot_quickcheck_get_ef(daily):

2 Source : pfp_plot.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def plot_quickcheck_get_ef(daily):
    # get the EF
    # get the daytime data, defined by Fsd>10 W/m2
    nm = daily["night_mask"]["Data"]
    Fa_daily = daily["Fa"]["Data"]
    Fe_daily = daily["Fe"]["Data"]
    Fa_day = numpy.ma.masked_where(nm == True, Fa_daily)
    Fe_day = numpy.ma.masked_where(nm == True, Fe_daily)
    # mask based on dependencies, set all to missing if any missing
    mask = numpy.ma.mask_or(Fa_day.mask, Fe_day.mask)
    # apply the mask
    Fa_day = numpy.ma.array(Fa_day, mask=mask)
    Fe_day = numpy.ma.array(Fe_day, mask=mask)
    # get the daily averages
    Fa_day_avg = numpy.ma.average(Fa_day, axis=1)
    Fe_day_avg = numpy.ma.average(Fe_day, axis=1)
    # get the number of values in the daily average
    EF = {"label": "EF=Fe/Fa"}
    EF["Count"] = numpy.ma.count(Fe_day, axis=1)
    # get the EF ratio
    EF["Avg"] = Fe_day_avg/Fa_day_avg
    EF["Avg"] = numpy.ma.masked_where(EF["Count"]   <  = 5, EF["Avg"])
    idx = numpy.where(numpy.ma.getmaskarray(EF["Avg"]) == True)[0]
    EF["Count"][idx] = 0
    return EF

def plot_quickcheck_get_br(daily):

2 Source : pfp_plot.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def plot_quickcheck_get_br(daily):
    # get the BR
    # get the daytime data, defined by Fsd>10 W/m2
    nm = daily["night_mask"]["Data"]
    Fh_daily = daily["Fh"]["Data"]
    Fe_daily = daily["Fe"]["Data"]
    Fe_day = numpy.ma.masked_where(nm == True, Fe_daily)
    Fh_day = numpy.ma.masked_where(nm == True, Fh_daily)
    # mask based on dependencies, set all to missing if any missing
    mask = numpy.ma.mask_or(Fe_day.mask, Fh_day.mask)
    # apply the mask
    Fe_day = numpy.ma.array(Fe_day, mask=mask)
    Fh_day = numpy.ma.array(Fh_day, mask=mask)
    # get the daily averages
    Fe_day_avg = numpy.ma.average(Fe_day, axis=1)
    Fh_day_avg = numpy.ma.average(Fh_day, axis=1)
    # get the number of values in the daily average
    BR = {"label": "BR=Fh/Fe"}
    BR["Count"] = numpy.ma.count(Fh_day, axis=1)
    # get the BR ratio
    BR["Avg"] = Fh_day_avg/Fe_day_avg
    BR["Avg"] = numpy.ma.masked_where(BR["Count"]   <  = 5, BR["Avg"])
    idx = numpy.where(numpy.ma.getmaskarray(BR["Avg"]) == True)[0]
    BR["Count"][idx] = 0
    return BR

def plot_quickcheck_get_wue(daily):

2 Source : pfp_plot.py
with BSD 3-Clause "New" or "Revised" License
from OzFlux

def plot_quickcheck_get_wue(daily):
    # get the Wue
    # get the daytime data, defined by Fsd>10 W/m2
    nm = daily["night_mask"]["Data"]
    Fc_daily = daily["Fc"]["Data"]
    Fe_daily = daily["Fe"]["Data"]
    Fe_day = numpy.ma.masked_where(nm == True, Fe_daily)
    Fc_day = numpy.ma.masked_where(nm == True, Fc_daily)
    # mask based on dependencies, set all to missing if any missing
    mask = numpy.ma.mask_or(Fe_day.mask, Fc_day.mask)
    # apply the mask
    Fe_day = numpy.ma.array(Fe_day,mask=mask)
    Fc_day = numpy.ma.array(Fc_day,mask=mask)
    # get the daily averages
    Fe_day_avg = numpy.ma.average(Fe_day, axis=1)
    Fc_day_avg = numpy.ma.average(Fc_day, axis=1)
    # get the number of values in the daily average
    WUE = {"label": "WUE=Fc/Fe"}
    WUE["Count"] = numpy.ma.count(Fc_day, axis=1)
    WUE["Avg"] = Fc_day_avg/Fe_day_avg
    WUE["Avg"] = numpy.ma.masked_where(WUE["Count"]   <  = 5, WUE["Avg"])
    idx = numpy.where(numpy.ma.getmaskarray(WUE["Avg"]) == True)[0]
    WUE["Avg"][idx] = 0
    return WUE

def plot_quickcheck_get_avg(daily, label, filter_type=None):

0 Source : qmap.py
with GNU General Public License v3.0
from acolite

def qmap(data,lon,lat, mask=None, outputfile=None, mscale=None,
         colorbar_edge=True, colorbar = "horizontal", cbsize='3%', cbpad=0.5, cmap=None,
         limit=None,
         scalebar=False, scalepos='UR', scalelen=None, scalecolor='Black', map_fillcolor='White',
         points=None, axes_linewidth=2,
         range=None, label='', rescale=1., log=False,
         max_edge_tick=(0.33,0.25), max_scale_frac=0.33, verbose=False, geo_minticks=2, geo_maxticks=5,
         title=None, projection='tmerc', dpi=72, **kwargs):

    import time
    from acolite.shared import distance_in_ll, read_points, scale_dist

    def getstep(drange):
        dstep = 0.005
        if drange > 0.02: dstep=0.01
        if drange > 0.04: dstep=0.02
        if drange > 0.15: dstep=0.05
        if drange > 0.25: dstep=0.1
        if drange > 1.: dstep=0.5
        if drange > 5.: dstep=1.
        return(dstep)

    def getstep2(drange, mint=2, maxt=5):
        from acolite.shared import closest_idx
        steps = [0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2., 5., 10., 20., 50.]
        numb = [int(drange/s) for s in steps]
        idmax, ntmax = closest_idx(numb, maxt)
        idmin, ntmin = closest_idx(numb, mint)
        return(steps[idmin],steps[idmax])

    from mpl_toolkits.axes_grid1 import make_axes_locatable
    from mpl_toolkits.basemap import Basemap
    import matplotlib.text
    import matplotlib.pyplot as plt
    import matplotlib.cm as cm

    import pyproj
    from numpy.ma import masked_where
    from numpy import loadtxt, insert, arange

    if len(data.shape) == 2:
        y_size, x_size = data.shape
    if len(data.shape) == 3:
        y_size, x_size, z_size = data.shape

    x_size_r = x_size * rescale
    y_size_r = y_size * rescale

    #x0, y0, x1, y1
    plot_window = [0.,0.,1.,1.]

    ## these should be the approximate borders to the plot area
    border_y = [70, 120]
    border_x = [150, 100]

    ## add some space for the colorbar
    ## add to border_x and _y
    if (colorbar_edge):
        if (colorbar == 'vertical'):
            border_x[0] += 100
            border_x[1] += 400
            border_y[1] += 25


        if (colorbar == 'horizontal'):
            border_y[0] += 100
            border_y[1] += 150
            border_x[0] += 0
            border_x[1] += 25


    ## testing output shape
    if True:
        figsize = [x_size_r / dpi, y_size_r / dpi]

        border_y0 = border_y[0]/dpi
        border_y1 = border_y[1]/dpi
        figsize[1] += border_y0 + border_y1
        plot_window[1] = border_y0/figsize[1]
        plot_window[3] = 1-(border_y1/figsize[1])

        border_x0 = border_x[0]/dpi
        border_x1 = border_x[1]/dpi
        figsize[0] += border_x0 + border_x1
        plot_window[0] = border_x0/figsize[0]
        plot_window[2] = 1-(border_x1/figsize[0])

    if limit is not None:
        if len(limit) == 4:
            lonw, lone = limit[1],limit[3]
            lats, latn = limit[0],limit[2]
    else:
        lone = lon[0,x_size-1]
        lonw = lon[y_size-1,0]
        latn = lat[0,x_size-1]
        lats = lat[y_size-1,0]

    lonm = (lonw+lone)/2
    latm = (lats+latn)/2

    fig = plt.figure(figsize=figsize)
    ax = fig.add_axes(plot_window,zorder=20)

    ## set axis thickness
    for axis in ['top','bottom','left','right']:
      ax.spines[axis].set_linewidth(axes_linewidth)

    fig.patch.set_facecolor('white')
    fig.patch.set_alpha(1.0)

    lonrange = abs(lonw-lone)
    latrange = abs(latn-lats)
    t0 = time.time()
    if ('m' not in kwargs):
        if projection == 'wgs':
            m = Basemap(llcrnrlon=lon[x_size-1,0],llcrnrlat=lat[x_size-1,0],
                        urcrnrlon=lon[0,y_size-1],urcrnrlat=lat[0,y_size-1],
                    resolution = 'c', epsg=4326, suppress_ticks=True)
        if projection == 'tmerc':
            m = Basemap(llcrnrlon=lonw,llcrnrlat=lats,urcrnrlon=lone,urcrnrlat=latn,
                resolution = 'c', projection='tmerc', suppress_ticks=True, lat_0=latm,lon_0=lonm)
        if projection == 'cyl':
            m = Basemap(llcrnrlon=lonw,llcrnrlat=lats,urcrnrlon=lone,urcrnrlat=latn,
                resolution = 'c', projection=projection, suppress_ticks=True, lat_0=latm,lon_0=lonm)
        if projection == 'stere':

            proj = 'npstere' if latm > 0 else 'spstere'

            prj = Basemap(projection=proj, lon_0=lonm, lat_0=latm,
                          boundinglat=0, resolution='c')

            lonl = [lonw, lone, lonw, lone, lonm, lonm]
            latl = [lats, lats, latn, latn, lats, latn]
            xm, ym = prj(lonl, latl)
            ll_lon, ll_lat = prj(min(xm), min(ym), inverse=True)
            ur_lon, ur_lat = prj(max(xm), max(ym), inverse=True)
            m = Basemap(projection=projection, lat_ts=latm, lat_0=latm, lon_0=lonm, boundinglat=0,
                        llcrnrlon=lonw,llcrnrlat=lats,urcrnrlon=lone,urcrnrlat=latn, suppress_ticks=True, resolution = 'c')
    else:
        m = kwargs['m']
    t1 = time.time()
    if verbose: print('set up basemap {:.2f} sec'.format(t1-t0))

    # transform coordinates from lat/lon to map coordinates
    if ('xx' not in kwargs) | ('yy' not in kwargs):
        xx, yy = m(lon, lat)
    else:
        xx, yy = kwargs['xx'], kwargs['yy']
    t2 = time.time()
    if verbose: print('set up xx yy {:.2f} sec'.format(t2-t1))

    ## get LL, UR corners
    xll, yll = m(lonw,lats)
    xur, yur = m(lone,latn)

    xd = xur-xll
    yd = yur-yll

    ## different treatment of 2D and 3D arrays (maps and rgbs)
    if len(data.shape) == 2:
        # load colormap
        from matplotlib.colors import ListedColormap
        if cmap == None:
            cmap = cm.get_cmap('viridis') #loadtxt("Planck_Parchment_RGB.txt")/255.
        else:
            if type(cmap) == str:
                 cmap = cm.get_cmap(cmap) #loadtxt("Planck_Parchment_RGB.txt")/255.

        cmap.set_bad(map_fillcolor)
        cmap.set_under(map_fillcolor)

        if len(range) != 2:
            from numpy import nanpercentile
            range = nanpercentile(data, (10,90))

        if log:
            from matplotlib.colors import LogNorm
            norm = LogNorm(vmin=range[0], vmax=range[1])
        else:
            norm = None
            from matplotlib.colors import Normalize
            norm = Normalize(vmin=range[0], vmax=range[1])

        if mask is None:
            #img=m.pcolormesh(xx, yy, data, cmap=cmap, norm=norm, vmin=range[0], vmax=range[1]) #zorder=15,
            img=m.pcolormesh(xx, yy, data, cmap=cmap, norm=norm, shading='auto')
        else:
            #img=m.pcolormesh(xx, yy, masked_where(mask,data), cmap=cmap, norm=norm, vmin=range[0], vmax=range[1])#zorder=15,
            img=m.pcolormesh(xx, yy, masked_where(mask,data), cmap=cmap, norm=norm, shading='auto')

    ## rgb colorTuple takes a bit longer
    if len(data.shape) == 3:
        mesh_rgb = data[:, :-1, :]
        colorTuple = mesh_rgb.reshape((mesh_rgb.shape[0] * mesh_rgb.shape[1]), 3)
        colorTuple = insert(colorTuple,3,1.0,axis=1)
        img = m.pcolormesh(xx, yy, data[:,:,0], latlon=False,color=colorTuple, shading='flat')

    t3 = time.time()
    if verbose: print('set up colormesh {:.2f} sec'.format(t3-t2))

    lonsteps = getstep2(lonrange, mint=geo_minticks, maxt=geo_maxticks)
    latsteps = getstep2(latrange, mint=geo_minticks, maxt=geo_maxticks)

    for ri in (0,1):
        lonstep = lonsteps[ri]
        lonedge=lonstep*max_edge_tick[0]
        meridians = arange(-180, 180, lonstep)
        meridians = meridians[(meridians >= lonw) & (meridians   <  = lone)]
        meridians = [mr for mr in meridians if (abs(mr-lone) > lonedge) & (abs(mr-lonw) > lonedge)]
        if len(meridians) >= geo_minticks: break

    for ri in (0,1):
        latstep = latsteps[ri]
        latedge=latstep*max_edge_tick[1]
        parallels = arange(-90, 90, latstep)
        parallels = parallels[(parallels >= lats) & (parallels  < = latn)]
        parallels = [pl for pl in parallels if (abs(pl-lats) > latedge) & (abs(pl-latn) > latedge)]
        if len(parallels) >= geo_minticks: break


    flon = (len(str(lonstep))-2)
    xtl,xtv=[],[]
    for mr in meridians:
        xs, ys = m(mr,lats)
        xtl.append(('{'+':.{}'.format(flon)+'f}'+'°{}').format(abs(mr), 'E' if mr >=0 else 'W'))
        xtv.append(xs)
    ax.set_xticks(xtv)
    ax.set_xticklabels(xtl)

    flat = (len(str(latstep))-2)
    ytl,ytv=[],[]
    for pl in parallels:
        xs, ys = m(lonw,pl)
        ytl.append(('{'+':.{}'.format(flat)+'f}'+'°{}').format(abs(pl), 'N' if pl >=0 else 'S'))
        ytv.append(ys)
    ax.set_yticks(ytv)
    ax.set_yticklabels(ytl)

    t4 = time.time()
    if verbose: print('draw grid {:.2f} sec'.format(t4-t3))

    ## add scale bar
    if scalebar:
            if scalepos not in ['UR','UL','LL','LR']:
                print('Scalepos {} not recognised. Using default.'.format(scalepos))
                scalepos = 'UR'

            xlab_pix, ylab_pix = 100, 125
            ylab_off = (20/(figsize[1]*dpi))

            pos = {}

            pos['Upper'] = 1. - (ylab_pix/(figsize[1]*dpi))
            pos['Lower'] = (ylab_pix/(figsize[1]*dpi))

            pos['Right'] = 1. - (xlab_pix/(figsize[0]*dpi))
            pos['Left'] = (xlab_pix/(figsize[0]*dpi))


            if scalepos[0]=='U':
                latsc = lats+abs(latn-lats)*pos['Upper'] #0.87
            if scalepos[0]=='L':
                latsc = lats+abs(latn-lats)*pos['Lower'] #0.08
            if scalepos[1]=='R':
                lonsc = lonw+abs(lone-lonw)*pos['Right'] #0.92
                scale_sign=-1.
            if scalepos[1]=='L':
                lonsc = lonw+abs(lone-lonw)*pos['Left'] #0.08
                scale_sign=1.

            ## length of the scalebar in degrees
            dist = distance_in_ll(latm)[0]
            if scalelen is None:
                dd = dist*abs(lone-lonw)
                scalelen = dd * max_scale_frac #* rescale
                scalelen, unit = scale_dist(scalelen)
                sf = 1
                if unit == 'm':
                    scaleline = (scalelen / 1000) / dist
                else:
                    scaleline = scalelen / dist
            else:
                scalelen = float(scalelen)
                if scalelen  <  1:
                    unit = 'm'
                    sf = 1000
                else:
                    unit = 'km'
                    sf = 1
                scaleline = scalelen / dist

            ## find start and end points in the scene
            xs, ys = m(lonsc,latsc)
            xse, ys = m(lonsc+scale_sign*scaleline,latsc)

            ## plot the line
            plt.plot((xs, xse), (ys,ys), '-', color=scalecolor, zorder=17, linewidth=2)

            ## add the label
            fz=14
            sclabel = '{} {}'.format(scalelen*sf, unit)
            if abs(scalelen-3.14) < 0.01:
                sclabel = '{} {}'.format(r'$\pi$', unit)
            # label offset from scale bar
            sc_l_off = yd * ylab_off
            plt.text(xs+(xse-xs)/2, ys+sc_l_off, sclabel, color=scalecolor, zorder=17,
                                                 horizontalalignment='center', fontsize=fz)
    ##### end scale bar


    ## plot provided points
    if points is not None:
        if type(points) is str:
            points = read_points(points)

        ypts_off = (50/(figsize[1]*dpi))
        xpts_off = (30/(figsize[0]*dpi))

        for pt in points:
            p=points[pt]
            sm = 'o' if 'sym' not in p else p['sym']
            cl = 'Black' if 'color' not in p else p['color']
            lb = None if 'label' not in p else p['label']
            fz = 14 if 'label_size' not in p else p['label_size']

            xp, yp = m(p['lon'], p['lat'])
            plt.plot(xp, yp, sm, color=cl, zorder=17)

            label_side = 'right' if 'label_side' not in p else p['label_side']
            va = 'center' if 'va' not in p else p['va']

            ## offset for labels
            h_l_off = xd * 0.025
            v_l_off = yd * 0.035

            h_l_off = xd * xpts_off
            v_l_off = yd * ypts_off

            if label_side == 'right':
                xpt, ypt = xp+h_l_off, yp*1
                ha = 'left' if 'ha' not in p else p['ha']
            if label_side == 'left':
                xpt, ypt = xp-h_l_off, yp*1
                ha = 'right' if 'ha' not in p else p['ha']
            if label_side == 'top':
                xpt, ypt = xp*1, yp+v_l_off
                ha = 'center' if 'ha' not in p else p['ha']
            if label_side == 'bottom':
                xpt, ypt = xp*1, yp-v_l_off
                ha = 'center' if 'ha' not in p else p['ha']

            plt.text(xpt, ypt, lb, color=cl, zorder=17,
                     horizontalalignment=ha,
                     verticalalignment=va, fontsize=fz)

    if title is not None:
        plt.title(title)

    ## make room for colorbar and plot if 2D dataset
    divider = make_axes_locatable(ax)
    if colorbar_edge:
        if colorbar == "horizontal":
            cax = divider.append_axes("bottom", size=cbsize, pad=cbpad)
            orientation='horizontal'
        else:
            cax = divider.append_axes("right", size=cbsize, pad=cbpad)
            orientation='vertical'


        if len(data.shape) == 2:
            cbar = plt.colorbar(img, cax=cax, orientation=orientation)
            cbar.set_label(label=label)
        else:
            cax.axis('off')
    else:
        pad = [0.1,0.1,0.1,0.3]
        pad = [0.15,0.15,0.15,0.15]
        for si, side in enumerate(['bottom','top','left', 'right']):
            cax = divider.append_axes(side, size='1%', pad=pad[si])
            cax.axis('off')

    if outputfile is None:
        plt.show()
    else:
        t5 = time.time()
        plt.savefig(outputfile, dpi=dpi) # , bbox_inches='tight'
        t6 = time.time()
        if verbose: print('write file {:.2f} sec'.format(t6-t5))
        if verbose: print('total {:.2f} sec'.format(t6-t0))

    plt.close()
    return xx, yy, m

0 Source : test_old_ma.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))

    def test_testMinMax2(self):

0 Source : _linprog.py
with GNU General Public License v3.0
from adityaprakash-bobby

def _pivot_col(T, tol=1.0E-12, bland=False):
    """
    Given a linear programming simplex tableau, determine the column
    of the variable to enter the basis.

    Parameters
    ----------
    T : 2D ndarray
        The simplex tableau.
    tol : float
        Elements in the objective row larger than -tol will not be considered
        for pivoting.  Nominally this value is zero, but numerical issues
        cause a tolerance about zero to be necessary.
    bland : bool
        If True, use Bland's rule for selection of the column (select the
        first column with a negative coefficient in the objective row,
        regardless of magnitude).

    Returns
    -------
    status: bool
        True if a suitable pivot column was found, otherwise False.
        A return of False indicates that the linear programming simplex
        algorithm is complete.
    col: int
        The index of the column of the pivot element.
        If status is False, col will be returned as nan.
    """
    ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
    if ma.count() == 0:
        return False, np.nan
    if bland:
        return True, np.where(ma.mask == False)[0][0]
    return True, np.ma.where(ma == ma.min())[0][0]


def _pivot_row(T, basis, pivcol, phase, tol=1.0E-12, bland=False):

0 Source : _linprog.py
with GNU General Public License v3.0
from adityaprakash-bobby

def _pivot_row(T, basis, pivcol, phase, tol=1.0E-12, bland=False):
    """
    Given a linear programming simplex tableau, determine the row for the
    pivot operation.

    Parameters
    ----------
    T : 2D ndarray
        The simplex tableau.
    basis : array
        A list of the current basic variables.
    pivcol : int
        The index of the pivot column.
    phase : int
        The phase of the simplex algorithm (1 or 2).
    tol : float
        Elements in the pivot column smaller than tol will not be considered
        for pivoting.  Nominally this value is zero, but numerical issues
        cause a tolerance about zero to be necessary.
    bland : bool
        If True, use Bland's rule for selection of the row (if more than one
        row can be used, choose the one with the lowest variable index).

    Returns
    -------
    status: bool
        True if a suitable pivot row was found, otherwise False.  A return
        of False indicates that the linear programming problem is unbounded.
    row: int
        The index of the row of the pivot element.  If status is False, row
        will be returned as nan.
    """
    if phase == 1:
        k = 2
    else:
        k = 1
    ma = np.ma.masked_where(T[:-k, pivcol]   <  = tol, T[:-k, pivcol], copy=False)
    if ma.count() == 0:
        return False, np.nan
    mb = np.ma.masked_where(T[:-k, pivcol]  < = tol, T[:-k, -1], copy=False)
    q = mb / ma
    min_rows = np.ma.where(q == q.min())[0]
    if bland:
        return True, min_rows[np.argmin(np.take(basis, min_rows))]
    return True, min_rows[0]


def _solve_simplex(T, n, basis, maxiter=1000, phase=2, callback=None,

0 Source : utils.py
with GNU General Public License v3.0
from akpetty

def get_day_concSN_NRT(datapath, year, month, day, alg=0, pole='A', vStr='v1.1', mask=1, maxConc=0, lowerConc=0, monthMean=0):
	if (alg==0):
		team = 'NASA_TEAM'
		team_s = 'nt'
		header = 300
		datatype='uint8'
		scale_factor=250.
	if (alg==1):
		team = 'BOOTSTRAP'
		team_s = 'bt'
		header = 0
		datatype='  <  i2'
		scale_factor=1000.
	
	if (pole=='A'):
		poleStr='ARCTIC'
		rows=448
		cols=304
	if (pole=='AA'):
		poleStr='ANTARCTIC'
		rows=332
		cols=316

	month_str = '%02d' % (month+1)
	day_str = '%02d' % (day+1)
	year_str=str(year)
	
	files = glob(datapath+'/ICE_CONC/'+team+'/'+poleStr+'/NRT/*'+str(year)+month_str+day_str+'*')
	if (np.size(files)>0):
		print('Same day conc file exists:')

	if (np.size(files)==0):
		# first try day before
		day_str = '%02d' % (day)
		files = glob(datapath+'/ICE_CONC/'+team+'/'+poleStr+'/NRT/*'+str(year)+month_str+day_str+'*')
		if (np.size(files)>0):
			print('Using day before file:')

	# If still nothing try day after
	if (np.size(files)==0):
		# try day after
		day_str = '%02d' % (day+2)
		files = glob(datapath+'/ICE_CONC/'+team+'/'+poleStr+'/NRT/*'+str(year)+month_str+day_str+'*')
		if (np.size(files)>0):
			print('Using day after file:')

	
	fd = open(files[0], 'r')
	data = np.fromfile(file=fd, dtype=datatype)
	data = data[header:]
	#FIRST 300 FILES ARE HEADER INFO
	ice_conc = np.reshape(data, [rows, cols])
	#divide by 250 to express in concentration
	ice_conc = ice_conc/scale_factor
	#GREATER THAN 250 is mask/land etc

	if (mask==1):
		ice_conc = ma.masked_where(ice_conc>1., ice_conc)
	
	if (maxConc==1):
		ice_conc = ma.where(ice_conc>1.,0, ice_conc)

	if (lowerConc==1):
		ice_conc = ma.where(ice_conc < 0.15,0, ice_conc)

	if (monthMean==1):
		ice_conc=ma.mean(ice_conc, axis=0)

	return ice_conc

def get_month_concSN_daily(datapath, year, month, alg=0, pole='A', vStr='v1.1', mask=1, maxConc=0, lowerConc=0, monthMean=0):

0 Source : utils.py
with GNU General Public License v3.0
from akpetty

def getCDRconcproj(proj, fileT, mask=1, maxConc=0, lowerConc=0):
	"""
	Grab the CDR ice concentration

	"""

	f = Dataset(fileT, 'r')
	print(fileT)

	# read lat/lon (start points) and lat1/lon1 (end points)
	lon = (f.variables['longitude'][:])
	lat = (f.variables['latitude'][:])
	# Convert percent to conc!
	conc = (f.variables['seaice_conc_cdr'][0])
	f.close()

	if (mask==1):
		conc = ma.masked_where(conc>1., conc)
	
	if (maxConc==1):
		conc = ma.where(conc>1.,0, conc)

	if (lowerConc==1):
		conc = ma.where(conc  <  0.15,0, conc)

	# transform to map project coordinates (basemap's axes, assume they have unit of m)
	x0, y0=proj(lon, lat)
	
	return conc, lat, lon, x0, y0


def read_icebridge_snowdepths(proj, dataPath, year, mask=1):

0 Source : utils.py
with GNU General Public License v3.0
from akpetty

def get_budgets2layers_day(outStrings, outPath, folderStr, dayT, totalOutStr, region='', converttocm=0):

	data=xr.open_dataset(outPath+folderStr+'/budgets/'+totalOutStr+'.nc') 

	iceConcDay=np.array(data['iceConc'][dayT])
	#iceConcDay=ma.masked_where(iceConcDay  <  0.15, iceConcDay)


	snowBudget=[]
	for outString in outStrings:
		if (outString=='density'):
			snowDataT=data['density'][dayT]
			#snowDataT= ma.masked_where(iceConcDays < 0.15, snowDataT)
			snowDataT= ma.masked_where(iceConcDay < 0.15, snowDataT)
			# Need at least 2 cm for a density to be used (stop weird behaviour at very low snow depth)
			snowDepthT=data['snowDepth'][dayT, 0] + data['snowDepth'][dayT, 1]
			snowDataT= ma.masked_where(snowDepthT < 0.02, snowDataT)

		elif (outString=='snowDepthNew'):
			snowDataT=data['snowDepth'][dayT, 0]
			#snowDataT= ma.masked_where(iceConcDays < 0.15, snowDataT)
			snowDataT= ma.masked_where(iceConcDay < 0.15, snowDataT)
		elif (outString=='snowDepthOld'):
			snowDataT=data['snowDepth'][dayT, 1]
			snowDataT= ma.masked_where(iceConcDay < 0.15, snowDataT)
		elif (outString=='snowDepthTotal'):
			snowDataT=data['snowDepth'][dayT, 0] + data['snowDepth'][dayT, 1]
			snowDataT= ma.masked_where(iceConcDay < 0.15, snowDataT)
		elif (outString=='snowDepthTotalConc'):
			snowDataT=(data['snowDepth'][dayT, 0] + data['snowDepth'][dayT, 1])/iceConcDay
			snowDataT= ma.masked_where(iceConcDay < 0.15, snowDataT)
		elif (outString=='Precip'):
			# Meters of water equivalent (summed over the time period)
			if (dayT>0):
				precipT=data[outString][0:dayT] #.fillna(0)
			else:
				precipT=data[outString]
			snowDataT = np.sum(precipT/200., axis=0)

		else:

			snowDataT = data[outString][dayT]

		#print 'region len', len(region)
		if (len(region)>0):
			print ('masking region')
			regionM=load(outPath+region)
			snowDataT=ma.masked_where(regionM < 0.5, snowDataT)

		snowDataT= ma.masked_where(np.isnan(snowDataT), snowDataT)

		if (converttocm==1):
			snowDataT=snowDataT*100.

		snowBudget.append(snowDataT)

	if (np.size(outStrings)>1):
		return snowBudget
	else:
		print ('1 var')
		return snowDataT

def densityClim(dayT, dataPath):

0 Source : test_image.py
with MIT License
from alvarobartt

def test_mask_image_over_under():
    delta = 0.025
    x = y = np.arange(-3.0, 3.0, delta)
    X, Y = np.meshgrid(x, y)
    Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)
    Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /
          (2 * np.pi * 0.5 * 1.5))
    Z = 10*(Z2 - Z1)  # difference of Gaussians

    palette = copy(plt.cm.gray)
    palette.set_over('r', 1.0)
    palette.set_under('g', 1.0)
    palette.set_bad('b', 1.0)
    Zm = ma.masked_where(Z > 1.2, Z)
    fig, (ax1, ax2) = plt.subplots(1, 2)
    im = ax1.imshow(Zm, interpolation='bilinear',
                    cmap=palette,
                    norm=colors.Normalize(vmin=-1.0, vmax=1.0, clip=False),
                    origin='lower', extent=[-3, 3, -3, 3])
    ax1.set_title('Green=low, Red=high, Blue=bad')
    fig.colorbar(im, extend='both', orientation='horizontal',
                 ax=ax1, aspect=10)

    im = ax2.imshow(Zm, interpolation='nearest',
                    cmap=palette,
                    norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
                                             ncolors=256, clip=False),
                    origin='lower', extent=[-3, 3, -3, 3])
    ax2.set_title('With BoundaryNorm')
    fig.colorbar(im, extend='both', spacing='proportional',
                 orientation='horizontal', ax=ax2, aspect=10)


@image_comparison(baseline_images=['mask_image'],

0 Source : extractProduct.py
with GNU General Public License v3.0
from aria-tools

    def __run__(self):
        from scipy.linalg import lstsq

        # Get R^2/standard error across range
        rsquaredarr_rng, std_errarr_rng = self.__getCovar__('range')
        # Get R^2/standard error across azimuth
        rsquaredarr_az, std_errarr_az = self.__getCovar__('azimuth')

        #filter out normal values from arrays
        rsquaredarr = [0.97] ; std_errarr=[0.0015]
        if min(rsquaredarr_rng)   <   0.97 and max(std_errarr_rng) > 0.0015:
            rsquaredarr.append(min(rsquaredarr_rng))
            std_errarr.append(max(std_errarr_rng))
        if min(rsquaredarr_az)  <  0.97 and max(std_errarr_az) > 0.0015:
            rsquaredarr.append(min(rsquaredarr_az))
            std_errarr.append(max(std_errarr_az))

        #if R^2 and standard error anomalous, fix array
        if min(rsquaredarr)  <  0.97 and max(std_errarr) > 0.0015:
            #Cycle through each band
            for i in range(1,5):
                self.data_array_band=self.data_array.GetRasterBand(i).ReadAsArray()
                #mask by nodata value
                self.data_array_band=np.ma.masked_where(self.data_array_band == \
                    self.data_array.GetRasterBand(i).GetNoDataValue(),
                                        self.data_array_band)
                negs_percent=((self.data_array_band  <  0).sum() \
                                /self.data_array_band.size)*100

                # Unique bug-fix for bPerp layers with sign-flips
                if (self.prod_key=='bPerpendicular' and min(rsquaredarr)  <  0.8 \
                                    and max(std_errarr) > 0.1) \
                                and (negs_percent != 100 or negs_percent != 0):
                    #Circumvent Bperp sign-flip bug by comparing percentage of positive and negative values
                    self.data_array_band=abs(self.data_array_band)
                    if negs_percent>50:
                        self.data_array_band*=-1
                else:
                    # regular grid covering the domain of the data
                    X,Y = np.meshgrid(np.arange(0, self.data_array_band.shape[1], 1),
                                np.arange(0, self.data_array_band.shape[0], 1))
                    Xmask,Ymask = np.meshgrid(np.arange(0, self.data_array_band.shape[1], 1),
                                    np.arange(0, self.data_array_band.shape[0], 1))
                    # best-fit linear plane: for very large artifacts, must mask array for outliers to get best fit
                    if min(rsquaredarr)  <  0.85 and max(std_errarr) > 0.0015:
                        maj_percent=((self.data_array_band  <  \
                                self.data_array_band.mean()).sum() \
                                / self.data_array_band.size)*100
                        #mask all values above mean
                        if maj_percent>50:
                            self.data_array_band = np.ma.masked_where(
                                self.data_array_band > self.data_array_band.mean(),
                                self.data_array_band)
                        #mask all values below mean
                        else:
                            self.data_array_band = np.ma.masked_where(
                                self.data_array_band  <  self.data_array_band.mean(),
                                self.data_array_band)
                    # Mask columns/rows which are entirely made up of 0s
                    if self.data_array_band.mask.size!=1 and \
                                    True in self.data_array_band.mask:
                        self.data_array_band, Xmask, Ymask = self.__truncateArray__(
                                    self.data_array_band, Xmask, Ymask)

                    # truncated grid covering the domain of the data
                    Xmask=Xmask[~self.data_array_band.mask]
                    Ymask=Ymask[~self.data_array_band.mask]
                    self.data_array_band = self.data_array_band[~self.data_array_band.mask]
                    XX = Xmask.flatten()
                    YY = Ymask.flatten()
                    A = np.c_[XX, YY, np.ones(len(XX))]
                    C,_,_,_ = lstsq(A, self.data_array_band.data.flatten())
                    # evaluate it on grid
                    self.data_array_band = C[0]*X + C[1]*Y + C[2]
                    #mask by nodata value
                    self.data_array_band=np.ma.masked_where(
                        self.data_array_band == self.data_array.GetRasterBand(i
                                    ).GetNoDataValue(), self.data_array_band)

                    np.ma.set_fill_value(self.data_array_band,
                            self.data_array.GetRasterBand(i).GetNoDataValue())
                #update band
                self.data_array.GetRasterBand(i).WriteArray(self.data_array_band.filled())
                # Pass warning and get R^2/standard error across range/azimuth (only do for first band)
                if i==1:
                    # make sure appropriate unit is passed to print statement
                    lyrunit = "\N{DEGREE SIGN}"
                    if self.prod_key=='bPerpendicular' or self.prod_key=='bParallel':
                        lyrunit = 'm'
                    log.warning("%s layer for IFG %s has R\u00b2 of %.4f and standard error of %.4f%s, automated correction applied",
                                self.prod_key, os.path.basename(self.outname), min(rsquaredarr), max(std_errarr), lyrunit)
                    rsquaredarr_rng, std_errarr_rng = self.__getCovar__('range', profprefix='corrected')
                    rsquaredarr_az, std_errarr_az = self.__getCovar__('azimuth', profprefix='corrected')
        del self.data_array_band

        return self.data_array


def prep_dem(demfilename, bbox_file, prods_TOTbbox, prods_TOTbbox_metadatalyr,

0 Source : unwrapStitching.py
with GNU General Public License v3.0
from aria-tools

    def __createImages__(self):
        '''
            This function will write the final merged unw and conencted component file. As intermediate step tiff   files are generated with integer values which will represent the shift to be applied to connected componenet and the moduli shift to be applied to the unwrapped phase.
        '''

        ## Will first make intermediate files in a temp folder.
        # For each product there will be a connComp file and 3 files related unw files.
        # The connected component file will show the unique wrt to all merged files
        # For the unwrapped related files, there will be an integer offset tif file, a vrt file which scale this integer map by 2pi, and a vrt which combines the orginal unw phase file with the scaled map. The latter will be used for merging of the unwrapped phase.
        tempdir = tempfile.mkdtemp(prefix='IntermediateFiles_',dir='.')

        # will try multi-core version and default to for loop in case of failure
        try:
            # need to combine all inputs together as single argument tuple
            all_inputs = ()
            for counter in  range(len(self.fileMappingDict)):
                fileMappingDict = self.fileMappingDict[counter]
                fileMappingDict['saveDir'] = tempdir
                fileMappingDict['saveNameID'] = "Product_" + str(counter)
                fileMappingDict['description'] = self.description
                # parse inputs as a tuple
                inputs = (fileMappingDict)
                # append all tuples in a single tuple
                all_inputs = all_inputs + (inputs,)
            # compute the phase value using multi-thread functionality
            intermediateFiles = Parallel(n_jobs=-1,max_nbytes=1e6)(delayed(createConnComp_Int)(ii) for ii in all_inputs)

        except:
            log.info('Multi-core version failed, will try single for loop')
            intermediateFiles = []
            for counter in  range(len(self.fileMappingDict)):
                fileMappingDict = self.fileMappingDict[counter]
                fileMappingDict['saveDir'] = tempdir
                fileMappingDict['saveNameID'] = "Product_n" + str(counter)
                fileMappingDict['description'] = self.description
                # parse inputs as a tuple
                inputs = (fileMappingDict)
                # compute the phase value
                intermediateFiles_temp = createConnComp_Int(inputs)
                intermediateFiles.append(intermediateFiles_temp)

        # combining all conComp and unw files that need to be blended
        conCompFiles = []
        unwFiles = []
        for intermediateFile in intermediateFiles:
            conCompFiles.append(intermediateFile[0])
            unwFiles.append(intermediateFile[1])

        # check if the folder exist to which files are being generated.
        outPathUnw = os.path.dirname(os.path.abspath(self.outFileUnw))
        outPathConnComp = os.path.dirname(os.path.abspath(self.outFileConnComp))
        if not os.path.isdir(outPathUnw):
            os.makedirs(outPathUnw)
        if not os.path.isdir(outPathConnComp):
            os.makedirs(outPathConnComp)

        ## Will now merge the unwrapped and connected component files
        # remove existing output file(s)
        for file in glob.glob(self.outFileUnw + "*"):
            os.remove(file)
        gdal.BuildVRT(self.outFileUnw+'.vrt', unwFiles, options=gdal.BuildVRTOptions(srcNodata=0))
        gdal.Warp(self.outFileUnw, self.outFileUnw+'.vrt', options=gdal.WarpOptions(format=self.outputFormat, cutlineDSName=self.setTotProdBBoxFile, outputBounds=self.bbox_file))
        # Update VRT
        gdal.Translate(self.outFileUnw+'.vrt', self.outFileUnw, options=gdal.TranslateOptions(format="VRT"))
        # Apply mask (if specified).
        if self.mask is not None:
            update_file=gdal.Open(self.outFileUnw,gdal.GA_Update)
            update_file=update_file.GetRasterBand(1).WriteArray(self.mask.ReadAsArray()*gdal.Open(self.outFileUnw+'.vrt').ReadAsArray())
            update_file=None

        # remove existing output file(s)
        for file in glob.glob(self.outFileConnComp + "*"):
            os.remove(file)
        gdal.BuildVRT(self.outFileConnComp+'.vrt', conCompFiles, options=gdal.BuildVRTOptions(srcNodata=-1))
        gdal.Warp(self.outFileConnComp, self.outFileConnComp+'.vrt', options=gdal.WarpOptions(format=self.outputFormat, cutlineDSName=self.setTotProdBBoxFile, outputBounds=self.bbox_file))
        # Update VRT
        gdal.Translate(self.outFileConnComp+'.vrt', self.outFileConnComp, options=gdal.TranslateOptions(format="VRT"))
        # Apply mask (if specified).
        if self.mask is not None:
            update_file=gdal.Open(self.outFileConnComp,gdal.GA_Update)
            #mask value for conncomp must be set to nodata value -1
            ma_update_file=np.ma.masked_where(self.mask.ReadAsArray() == 0., gdal.Open(self.outFileConnComp+'.vrt').ReadAsArray())
            np.ma.set_fill_value(ma_update_file, update_file.GetRasterBand(1).GetNoDataValue())
            update_file=update_file.GetRasterBand(1).WriteArray(ma_update_file.filled())
            update_file=None

        cmd = "gdal_translate -of png -scale -ot Byte -q " + self.outFileUnw + ".vrt " + self.outFileUnw + ".png"
        os.system(cmd)

        # Remove the directory with intermediate files as they are no longer needed
        shutil.rmtree(tempdir)


class UnwrapOverlap(Stitching):

0 Source : vrtmanager.py
with GNU General Public License v3.0
from aria-tools

def resampleRaster(fname, multilooking, bounds, prods_TOTbbox, rankedResampling=False, outputFormat='ENVI', num_threads='2'):
    """Resample rasters and update corresponding VRTs."""
    # Import functions
    from scipy import stats
    from decimal import Decimal, ROUND_HALF_UP

    # Check if physical raster exists and needs to be updated
    # Also get datasource name (inputname)
    if outputFormat=='VRT' and os.path.exists(fname.split('.vrt')[0]):
        outputFormat='ENVI'
    if os.path.exists(fname.split('.vrt')[0]):
        inputname=fname
    else:
        fname+='.vrt'
        inputname=gdal.Open(fname).GetFileList()[-1]

    # Access original shape
    ds=gdal.Warp('', fname, options=gdal.WarpOptions(format="MEM", cutlineDSName=prods_TOTbbox, outputBounds=bounds, multithread=True, options=['NUM_THREADS=%s'%(num_threads)]))
    arrshape=[abs(ds.GetGeoTransform()[1])*multilooking, abs(ds.GetGeoTransform()[-1])*multilooking] # Get output res
    #get geotrans/proj
    ds=gdal.Warp('', fname, options=gdal.WarpOptions(format="MEM", cutlineDSName=prods_TOTbbox, outputBounds=bounds, xRes=arrshape[0], yRes=arrshape[1], resampleAlg='near',multithread=True, options=['NUM_THREADS=%s'%(num_threads)]))
    geotrans=ds.GetGeoTransform() ; proj=ds.GetProjection()

    # Must resample mask with nearest-neighbor
    if fname.split('/')[-2]=='mask':
        # Resample raster
        gdal.Warp(fname, inputname, options=gdal.WarpOptions(format=outputFormat, cutlineDSName=prods_TOTbbox, outputBounds=bounds, xRes=arrshape[0], yRes=arrshape[1], resampleAlg='near',multithread=True, options=['NUM_THREADS=%s'%(num_threads)+' -overwrite']))

    # Use pixel function to downsample connected components/unw files based off of frequency of connected components in each window
    elif fname.split('/')[-2]=='connectedComponents' or fname.split('/')[-2]=='unwrappedPhase':
        # Resample unw phase based off of mode of connected components
        fnameunw=os.path.join('/'.join(fname.split('/')[:-2]),'unwrappedPhase',''.join(fname.split('/')[-1]).split('.vrt')[0])
        fnameconcomp=os.path.join('/'.join(fname.split('/')[:-2]),'connectedComponents',''.join(fname.split('/')[-1]).split('.vrt')[0])
        if rankedResampling:
            #open connected components/unw files
            ds_concomp=gdal.Open(fnameconcomp)
            ds_concomp_nodata=ds_concomp.GetRasterBand(1).GetNoDataValue()
            ds_concomp=ds_concomp.ReadAsArray()
            ds_concomp=np.ma.masked_where(ds_concomp == ds_concomp_nodata, ds_concomp)
            np.ma.set_fill_value(ds_concomp, ds_concomp_nodata)

            ds_unw=gdal.Open(fnameunw)
            ds_unw_nodata=ds_unw.GetRasterBand(1).GetNoDataValue()
            ds_unw=ds_unw.ReadAsArray()
            ds_unw=np.ma.masked_where(ds_unw == ds_unw_nodata, ds_unw)
            np.ma.set_fill_value(ds_unw, ds_unw_nodata)

            unwmap=[]
            for row in range(multilooking,(ds_unw.shape[0])+multilooking,multilooking):
                unwmap_row=[]
                for column in range(multilooking,(ds_unw.shape[1])+multilooking,multilooking):
                    #get subset values
                    subset_concomp = ds_concomp[row-multilooking:row,column-multilooking:column]
                    subset_unw = ds_unw[row-multilooking:row,column-multilooking:column]
                    concomp_mode = stats.mode(subset_concomp.flatten()).mode[0]
                    #average only phase values coinciding with concomp mode
                    subset_concomp = np.where(subset_concomp != concomp_mode, 0, 1)
                    subset_unw=subset_unw*subset_concomp
                    #assign downsampled pixel values
                    unwmap_row.append(subset_unw.mean())
                unwmap.append(unwmap_row)

            #finalize unw array
            unwmap=np.array(unwmap)
            #finalize unw array shape
            unwmap=unwmap[0:int(Decimal(ds_unw.shape[0]/multilooking).quantize(0, ROUND_HALF_UP)), \
                0:int(Decimal(ds_unw.shape[1]/multilooking).quantize(0, ROUND_HALF_UP))]
            unwmap=np.ma.masked_invalid(unwmap) ; np.ma.set_fill_value(unwmap, ds_unw_nodata)
            #unwphase
            renderVRT(fnameunw, unwmap.filled(), geotrans=geotrans, drivername=outputFormat, gdal_fmt='float32', proj=proj, nodata=ds_unw_nodata)
            #temp workaround for gdal bug
            try:
                gdal.Open(fnameunw)
            except RuntimeError:
                for f in glob.glob(fnameunw+"*"):
                    os.remove(f)
                unwmap[0,0]=unwmap[0,0]-1e-6
                renderVRT(fnameunw, unwmap.filled(), geotrans=geotrans, drivername=outputFormat, gdal_fmt='float32', proj=proj, nodata=ds_unw_nodata)
            del unwmap
            # Resample connected components
            gdal.Warp(fnameconcomp, fnameconcomp, options=gdal.WarpOptions(format=outputFormat, cutlineDSName=prods_TOTbbox, outputBounds=bounds, xRes=arrshape[0], yRes=arrshape[1], resampleAlg='mode',multithread=True, options=['NUM_THREADS=%s'%(num_threads)+' -overwrite']))
            # update VRT
            gdal.BuildVRT(fnameconcomp+'.vrt', fnameconcomp, options=gdal.BuildVRTOptions(options=['-overwrite']))
        # Default: resample unw phase with gdal average algorithm
        else:
            # Resample unwphase
            ds_unw_nodata=gdal.Open(fnameunw)
            ds_unw_nodata=ds_unw_nodata.GetRasterBand(1).GetNoDataValue()
            gdal.Warp(fnameunw, fnameunw, options=gdal.WarpOptions(format=outputFormat, cutlineDSName=prods_TOTbbox, outputBounds=bounds, xRes=arrshape[0], yRes=arrshape[1], resampleAlg='average',multithread=True, options=['NUM_THREADS=%s'%(num_threads)+' -overwrite']))
            # update VRT
            gdal.BuildVRT(fnameunw+'.vrt', fnameunw, options=gdal.BuildVRTOptions(options=['-overwrite']))
            #temp workaround for gdal bug
            try:
                gdal.Open(fnameunw)
            except RuntimeError:
                unwmap=np.fromfile(fnameunw,dtype=np.float32).reshape(ds.GetRasterBand(1).ReadAsArray().shape)
                for f in glob.glob(fnameunw+"*"):
                    os.remove(f)
                unwmap[0,0]=unwmap[0,0]-1e-6
                renderVRT(fnameunw, unwmap, geotrans=geotrans, drivername=outputFormat, gdal_fmt='float32', proj=proj, nodata=ds_unw_nodata)
                del unwmap
            # Resample connected components
            gdal.Warp(fnameconcomp, fnameconcomp, options=gdal.WarpOptions(format=outputFormat, cutlineDSName=prods_TOTbbox, outputBounds=bounds, xRes=arrshape[0], yRes=arrshape[1], resampleAlg='near',multithread=True, options=['NUM_THREADS=%s'%(num_threads)+' -overwrite']))
            # update VRT
            gdal.BuildVRT(fnameconcomp+'.vrt', fnameconcomp, options=gdal.BuildVRTOptions(options=['-overwrite']))

    # Resample all other files with lanczos
    else:
        # Resample raster
        gdal.Warp(fname, inputname, options=gdal.WarpOptions(format=outputFormat, cutlineDSName=prods_TOTbbox, outputBounds=bounds, xRes=arrshape[0], yRes=arrshape[1], resampleAlg='lanczos',multithread=True, options=['NUM_THREADS=%s'%(num_threads)+' -overwrite']))

    if outputFormat!='VRT':
        # update VRT
        gdal.BuildVRT(fname+'.vrt', fname, options=gdal.BuildVRTOptions(options=['-overwrite']))

    return


###Average rasters
def rasterAverage(outname, product_dict, bounds, prods_TOTbbox, outputFormat='ENVI', thresh=None):

0 Source : vrtmanager.py
with GNU General Public License v3.0
from aria-tools

def rasterAverage(outname, product_dict, bounds, prods_TOTbbox, outputFormat='ENVI', thresh=None):
    """Generate average of rasters.

    Currently implemented for:
    1. amplitude under 'mask_util.py'
    2. coherence under 'plot_avgcoherence' function of productPlot"""
    ###Make average raster
    # Delete existing average raster file
    for i in glob.glob(outname+'*'): os.remove(i)

    # Iterate through all layers
    for i in enumerate(product_dict):
        arr_file = gdal.Warp('', i[1], options=gdal.WarpOptions(format="MEM", cutlineDSName=prods_TOTbbox, outputBounds=bounds))
        arr_file_arr = np.ma.masked_where(arr_file.ReadAsArray() == arr_file.GetRasterBand(1).GetNoDataValue(), arr_file.ReadAsArray())
        ### Iteratively update average raster file
        if os.path.exists(outname):
            arr_file = gdal.Open(outname,gdal.GA_Update)
            arr_file = arr_file.GetRasterBand(1).WriteArray(arr_file_arr+arr_file.ReadAsArray())
        else:
            # If looping through first raster file, nothing to sum so just save to file
            renderVRT(outname, arr_file_arr, geotrans=arr_file.GetGeoTransform(), drivername=outputFormat, \
               gdal_fmt=arr_file_arr.dtype.name, proj=arr_file.GetProjection(), \
               nodata=arr_file.GetRasterBand(1).GetNoDataValue())
        arr_file = arr_file_arr = None

    # Take average of raster sum
    arr_file = gdal.Open(outname,gdal.GA_Update)
    arr_mean = arr_file.ReadAsArray()/len(product_dict)

    # Mask using specified raster threshold
    if thresh:
        arr_mean = np.where(arr_mean   <   float(thresh), 0, 1)

    # Save updated array to file
    arr_file = arr_file.GetRasterBand(1).WriteArray(arr_mean)
    arr_file = None ; arr_mean = None

    # Load raster to pass
    arr_file = gdal.Open(outname).ReadAsArray()

    return arr_file


###Generate GACOS rsc
def rscGacos(outvrt, merged_rsc, tropo_date_dict):

0 Source : geo.py
with GNU General Public License v3.0
from Artikash

        def transform_non_affine(self, ll):
            longitude = ll[:, 0:1]
            latitude  = ll[:, 1:2]

            # Pre-compute some values
            half_long = longitude / 2.0
            cos_latitude = np.cos(latitude)

            alpha = np.arccos(cos_latitude * np.cos(half_long))
            # Mask this array or we'll get divide-by-zero errors
            alpha = ma.masked_where(alpha == 0.0, alpha)
            # The numerators also need to be masked so that masked
            # division will be invoked.
            # We want unnormalized sinc.  numpy.sinc gives us normalized
            sinc_alpha = ma.sin(alpha) / alpha

            x = (cos_latitude * ma.sin(half_long)) / sinc_alpha
            y = (ma.sin(latitude) / sinc_alpha)
            return np.concatenate((x.filled(0), y.filled(0)), 1)
        transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__

See More Examples