Here are the examples of the python api numpy.flipud taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
51 Examples
4
Example 1
Project: oq-engine Source File: shapefileparser.py
def build_polygon_from_fault_attrs(w, fault_attrs):
fault_trace = geo.line.Line([geo.point.Point(row[0], row[1])
for row in fault_attrs["trace"]])
lon, lat = SimpleFaultSurface.get_surface_vertexes(
fault_trace,
fault_attrs["upperSeismoDepth"],
fault_attrs["lowerSeismoDepth"],
fault_attrs["dip"])
# Reorder the vertexes
lons = numpy.concatenate([lon[::2], numpy.flipud(lon[1::2])])
lats = numpy.concatenate([lat[::2], numpy.flipud(lat[1::2])])
depths = numpy.concatenate([
numpy.ones_like(lon[::2]) * fault_attrs["upperSeismoDepth"],
numpy.ones_like(lon[::2]) * fault_attrs["lowerSeismoDepth"]])
# Create the 3D polygon
w.poly(parts=[[[tlon, tlat, tdep] for tlon, tlat, tdep
in zip(list(lons), list(lats), list(depths))]])
3
Example 2
Project: holoviews Source File: raster.py
def _compute_raster(self):
if self.interface.gridded:
return self, np.flipud(self.dimension_values(2, flat=False))
d1keys = self.dimension_values(0, False)
d2keys = self.dimension_values(1, False)
coords = [(d1, d2, np.NaN) for d1 in d1keys for d2 in d2keys]
dtype = 'dataframe' if pd else 'dictionary'
dense_data = Dataset(coords, kdims=self.kdims, vdims=self.vdims, datatype=[dtype])
concat_data = self.interface.concatenate([dense_data, Dataset(self)], datatype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'Mean of empty slice')
data = concat_data.aggregate(self.kdims, np.nanmean)
array = data.dimension_values(2).reshape(len(d1keys), len(d2keys))
return data, np.flipud(array.T)
3
Example 3
def get_data(self, element, ranges, style):
data = np.flipud(element.dimension_values(2, flat=False))
data = np.ma.array(data, mask=np.logical_not(np.isfinite(data)))
vdim = element.vdims[0]
self._norm_kwargs(element, ranges, style, vdim)
l, b, r, t = element.bounds.lbrt()
style['extent'] = [l, r, b, t]
return (data,), style, {}
3
Example 4
def get_data(self, element, ranges=None, empty=False):
img = element.data
if isinstance(element, Image):
l, b, r, t = element.bounds.lbrt()
else:
l, b, r, t = element.extents
dh = t-b
if type(element) is Raster:
b = t
mapping = dict(image='image', x='x', y='y', dw='dw', dh='dh')
if empty:
data = dict(image=[], x=[], y=[], dw=[], dh=[])
else:
data = dict(image=[np.flipud(img)], x=[l],
y=[b], dw=[r-l], dh=[dh])
return (data, mapping)
3
Example 5
def generate_plot(self, filename, title='', xlabel='', ylabel=''):
data_keys = self.data.keys()
key_num = len(data_keys)
self.plot = plt.figure()
if key_num == 1:
splt = self.plot.add_subplot(1, 1, 1)
im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
splt.set_title(title)
else: ## still plotting multiple image in one figure still has problem. the visualization is not good
logger.error('no supported yet')
self.plot.colorbar(im_data)
self.plot.savefig(filename) #, bbox_inches='tight'
3
Example 6
def reverse(self):
"""
Reverse the audio data.
:raises: :class:`~aeneas.audiofile.AudioFileNotInitializedError`: if the audio file is not initialized yet
.. versionadded:: 1.2.0
"""
if self.__samples is None:
if self.file_path is None:
self.log_exc(u"AudioFile object not initialized", None, True, AudioFileNotInitializedError)
else:
self.read_samples_from_file()
self.log(u"Reversing...")
self.__samples[0:self.__samples_length] = numpy.flipud(self.__samples[0:self.__samples_length])
self.log(u"Reversing... done")
3
Example 7
Project: Foxhound Source File: transforms.py
def FlipVertical(X):
Xt = []
for x in X:
if py_rng.random() > 0.5:
x = np.flipud(x)
Xt.append(x)
return Xt
3
Example 8
Project: PyAbel Source File: direct.py
def reflect_array(x, axis=1, kind='even'):
"""
Make a symmetrically reflected array with respect to the given axis
"""
if axis == 0:
x_sym = np.flipud(x)
elif axis == 1:
x_sym = np.fliplr(x)
else:
raise NotImplementedError
if kind == 'even':
fact = 1.0
elif kind == 'odd':
fact = -1.0
else:
raise NotImplementedError
return np.concatenate((fact*x_sym, x), axis=axis)
3
Example 9
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
assert_equal(flipud(a), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[3, 4, 5],
[0, 1, 2]]
assert_equal(flipud(a), b)
3
Example 10
def Amplitude(syn, obs, nt, dt):
# cross correlation amplitude
cc = _np.convolve(obs, _np.flipud(syn))
cmax = 0
ioff = 0
for it in range(2*nt-1):
if cc[it] > cmax:
cmax = cc[it]
ioff = it
if ioff <= 0:
wrsd = syn[ioff:] - obs[:-ioff]
else:
wrsd = syn[:-ioff] - obs[ioff:]
return _np.sqrt(_np.sum(wrsd*wrsd*dt))
3
Example 11
def is_symmetric(self, nodal=False, tol=1.e-14):
if not nodal:
L = self[self.g.ilo:self.g.ilo+self.g.nx/2,
self.g.jlo:self.g.jhi+1]
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+1,
self.g.jlo:self.g.jhi+1]
else:
print(self.g.ilo,self.g.ilo+self.g.nx/2+1)
L = self[self.g.ilo:self.g.ilo+self.g.nx/2+1,
self.g.jlo:self.g.jhi+1]
print(self.g.ilo+self.g.nx/2,self.g.ihi+2)
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+2,
self.g.jlo:self.g.jhi+1]
e = abs(L - np.flipud(R)).max()
print(e, tol, e < tol)
return e < tol
3
Example 12
Project: pyspace Source File: subsampling.py
def _execute(self, data):
""" Subsample the given data and return a new time series """
if self.new_len == 0 :
self.new_len = int(round(self.target_frequency*len(data)/(1.0*data.sampling_frequency)))
if not self.mirror:
downsampled_time_series = \
TimeSeries.replace_data(data,
scipy.signal.resample(data, self.new_len,
t=None, axis=0,
window=self.window))
else:
downsampled_time_series = \
TimeSeries.replace_data(data,
scipy.signal.resample(numpy.vstack((data,numpy.flipud(data))), self.new_len*2,
t=None, axis=0,
window=self.window)[:self.new_len])
downsampled_time_series.sampling_frequency = self.target_frequency
return downsampled_time_series
3
Example 13
Project: pyLAR Source File: Low_Rank_Atlas_Iter_BRATS.py
def showReferenceImage(reference_im_name):
im_ref = sitk.ReadImage(reference_im_name) # image in SITK format
im_ref_array = sitk.GetArrayFromImage(im_ref) # get numpy array
z_dim, x_dim, y_dim = im_ref_array.shape # get 3D volume shape
vector_length = z_dim * x_dim * y_dim
# display reference image
fig = plt.figure(figsize=(15, 5))
plt.subplot(131)
implot = plt.imshow(np.flipud(im_ref_array[z_dim / 2, :, :]), plt.cm.gray)
plt.subplot(132)
implot = plt.imshow(np.flipud(im_ref_array[:, x_dim / 2, :]), plt.cm.gray)
plt.subplot(133)
implot = plt.imshow(np.flipud(im_ref_array[:, :, y_dim / 2]), plt.cm.gray)
plt.axis('off')
plt.title('healthy atlas')
fig.clf()
del im_ref, im_ref_array
return
3
Example 14
def Traveltime(syn, obs, nt, dt):
# cross correlation time
cc = abs(_np.convolve(obs, _np.flipud(syn)))
cmax = 0
misfit = 0.
ioff = None
for it in range(2*nt-1):
if cc[it] > cmax:
cmax = cc[it]
ioff = it
misfit = (ioff-nt+1)*dt
if ioff is not None:
misfit = (ioff-nt+1)*dt
return misfit
3
Example 15
Project: gammatone Source File: gtgram.py
def gtgram_xe(wave, fs, channels, f_min):
""" Calculate the intermediate ERB filterbank processed matrix """
cfs = centre_freqs(fs, channels, f_min)
fcoefs = np.flipud(make_erb_filters(fs, cfs))
xf = erb_filterbank(wave, fcoefs)
xe = np.power(xf, 2)
return xe
3
Example 16
Project: Foxhound Source File: transforms.py
def Reflect(X):
Xt = []
for x in X:
if py_rng.random() > 0.5:
x = np.flipud(x)
if py_rng.random() > 0.5:
x = np.fliplr(x)
Xt.append(x)
return Xt
3
Example 17
def is_asymmetric(self, nodal=False, tol=1.e-14):
if not nodal:
L = self[self.g.ilo:self.g.ilo+self.g.nx/2,
self.g.jlo:self.g.jhi+1]
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+1,
self.g.jlo:self.g.jhi+1]
else:
print(self.g.ilo,self.g.ilo+self.g.nx/2+1)
L = self[self.g.ilo:self.g.ilo+self.g.nx/2+1,
self.g.jlo:self.g.jhi+1]
print(self.g.ilo+self.g.nx/2,self.g.ihi+2)
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+2,
self.g.jlo:self.g.jhi+1]
e = abs(L + np.flipud(R)).max()
print(e, tol, e < tol)
return e < tol
3
Example 18
def errorfill(xvals,yvals,errvals,**kwargs):
"""
Plot an errorbar as a filled polygon that can be transparent.
See the pylab.fill method for kwargs.
"""
# set the xrange
x_range = np.concatenate((xvals,np.flipud(xvals)))
y_range = np.concatenate((yvals+errvals,np.flipud(yvals-errvals)))
# do the fill
return pl.fill(x_range,y_range,**kwargs)
3
Example 19
def test_basic(self):
a = get_mat(4)
b = a[::-1,:]
assert_equal(flipud(a),b)
a = [[0,1,2],
[3,4,5]]
b = [[3,4,5],
[0,1,2]]
assert_equal(flipud(a),b)
3
Example 20
Project: online-hdp Source File: hdp.py
def do_m_step(self, ss):
self.optimal_ordering(ss)
## update top level sticks
self.m_var_sticks[0] = ss.m_var_sticks_ss[:self.m_T-1] + 1.0
var_phi_sum = np.flipud(ss.m_var_sticks_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cuemsum(var_phi_sum)) + self.m_gamma
## update topic parameters
self.m_beta = self.m_eta + ss.m_var_beta_ss
if self.m_hdp_hyperparam.m_hyper_opt:
self.m_var_gamma_a = self.m_hdp_hyperparam.m_gamma_a + self.m_T - 1
dig_sum = sp.psi(np.sum(self.m_var_sticks, 0))
Elog1_W = sp.psi(self.m_var_sticks[1]) - dig_sum
self.m_var_gamma_b = self.m_hdp_hyperparam.m_gamma_b - np.sum(Elog1_W)
self.m_gamma = hdp_hyperparam.m_gamma_a/hdp_hyperparam.m_gamma_b
3
Example 21
def flip_image(self, image, flip_coords):
assert(len(flip_coords) == 2)
assert(max(flip_coords) <= 1)
assert(min(flip_coords) >= 0)
if flip_coords[0] == 1:
image = numpy.flipud(image)
if flip_coords[1] == 1:
image = numpy.fliplr(image)
return image
0
Example 22
Project: inasafe Source File: numerics.py
def axes_to_points(x, y):
"""Generate all combinations of grid point coordinates from x and y axes
:param x: x coordinates (array)
:type x: numpy.ndarray
:param y: y coordinates (array)
:type y: numpy.ndarray
:returns:
* P: Nx2 array consisting of coordinates for all
grid points defined by x and y axes. The x coordinate
will vary the fastest to match the way 2D numpy
arrays are laid out by default ('C' order). That way,
the x and y coordinates will match a corresponding
2D array A when flattened (A.flat[:] or A.reshape(-1))
Note:
Example
x = [1, 2, 3]
y = [10, 20]
P = [[1, 10],
[2, 10],
[3, 10],
[1, 20],
[2, 20],
[3, 20]]
"""
# Reverse y coordinates to have them start at bottom of array
y = numpy.flipud(y)
# Repeat x coordinates for each y (fastest varying)
# noinspection PyTypeChecker
X = numpy.kron(numpy.ones(len(y)), x)
# Repeat y coordinates for each x (slowest varying)
Y = numpy.kron(y, numpy.ones(len(x)))
# Check
N = len(X)
verify(len(Y) == N)
# Create Nx2 array of x and y coordinates
X = numpy.reshape(X, (N, 1))
Y = numpy.reshape(Y, (N, 1))
P = numpy.concatenate((X, Y), axis=1)
# Return
return P
0
Example 23
def edge(Nxy, dx, center=[0, 0, 0], orientation=[1, 0, 0]):
"""Array along the xy-axis with edge at the origin.
Parameters
----------
Nxy : int
Number of secondary sources along x- and y-axis.
center : (3,) array_like, optional
Position of edge.
orientation : (3,) array_like, optional
Normal vector of array. Default orientation is along xy-axis.
Returns
-------
`ArrayData`
Positions, orientations and weights of secondary sources.
Examples
--------
.. plot::
:context: close-figs
x0, n0, a0 = sfs.array.edge(8, 0.2)
sfs.plot.loudspeaker_2d(x0, n0, a0)
plt.axis('equal')
"""
# array along y-axis
x00, n00, a00 = linear(Nxy, dx, center=[0, Nxy//2*dx+dx/2, 0])
x00 = np.flipud(x00)
positions = x00
directions = n00
weights = a00
# array along x-axis
x00, n00, a00 = linear(Nxy, dx, center=[Nxy//2*dx-dx/2, 0, 0],
orientation=[0, 1, 0])
x00 = np.flipud(x00)
positions = np.concatenate((positions, x00))
directions = np.concatenate((directions, n00))
weights = np.concatenate((weights, a00))
# rotate array
positions, directions = _rotate_array(positions, directions,
[1, 0, 0], orientation)
# shift array to desired position
positions += center
return ArrayData(positions, directions, weights)
0
Example 24
Project: quant_at Source File: johansen.py
def coint_johansen(x, p, k):
# % error checking on inputs
# if (nargin ~= 3)
# error('Wrong # of inputs to johansen')
# end
nobs, m = x.shape
#why this? f is detrend transformed series, p is detrend data
if (p > -1):
f = 0
else:
f = p
x = detrend(x,p)
dx = tdiff(x,1, axis=0)
#dx = trimr(dx,1,0)
z = mlag(dx,k)#[k-1:]
z = trimr(z,k,0)
z = detrend(z,f)
dx = trimr(dx,k,0)
dx = detrend(dx,f)
#r0t = dx - z*(z\dx)
r0t = resid(dx, z) #diff on lagged diffs
#lx = trimr(lag(x,k),k,0)
lx = lag(x,k)
lx = trimr(lx, 1, 0)
dx = detrend(lx,f)
#rkt = dx - z*(z\dx)
rkt = resid(dx, z) #level on lagged diffs
skk = np.dot(rkt.T, rkt) / rows(rkt)
sk0 = np.dot(rkt.T, r0t) / rows(rkt)
s00 = np.dot(r0t.T, r0t) / rows(r0t)
sig = np.dot(sk0, np.dot(inv(s00), (sk0.T)))
tmp = inv(skk)
#du, au = eig(np.dot(tmp, sig))
au, du = eig(np.dot(tmp, sig)) #au is eval, du is evec
#orig = np.dot(tmp, sig)
#% Normalize the eigen vectors such that (du'skk*du) = I
temp = inv(chol(np.dot(du.T, np.dot(skk, du))))
dt = np.dot(du, temp)
#JP: the next part can be done much easier
#% NOTE: At this point, the eigenvectors are aligned by column. To
#% physically move the column elements using the MATLAB sort,
#% take the transpose to put the eigenvectors across the row
#dt = transpose(dt)
#% sort eigenvalues and vectors
#au, auind = np.sort(diag(au))
auind = np.argsort(au)
#a = flipud(au)
aind = flipud(auind)
a = au[aind]
#d = dt[aind,:]
d = dt[:,aind]
#%NOTE: The eigenvectors have been sorted by row based on auind and moved to array "d".
#% Put the eigenvectors back in column format after the sort by taking the
#% transpose of "d". Since the eigenvectors have been physically moved, there is
#% no need for aind at all. To preserve existing programming, aind is reset back to
#% 1, 2, 3, ....
#d = transpose(d)
#test = np.dot(transpose(d), np.dot(skk, d))
#%EXPLANATION: The MATLAB sort function sorts from low to high. The flip realigns
#%auind to go from the largest to the smallest eigenvalue (now aind). The original procedure
#%physically moved the rows of dt (to d) based on the alignment in aind and then used
#%aind as a column index to address the eigenvectors from high to low. This is a double
#%sort. If you wanted to extract the eigenvector corresponding to the largest eigenvalue by,
#%using aind as a reference, you would get the correct eigenvector, but with sorted
#%coefficients and, therefore, any follow-on calculation would seem to be in error.
#%If alternative programming methods are used to evaluate the eigenvalues, e.g. Frame method
#%followed by a root extraction on the characteristic equation, then the roots can be
#%quickly sorted. One by one, the corresponding eigenvectors can be generated. The resultant
#%array can be operated on using the Cholesky transformation, which enables a unit
#%diagonalization of skk. But nowhere along the way are the coefficients within the
#%eigenvector array ever changed. The final value of the "beta" array using either method
#%should be the same.
#% Compute the trace and max eigenvalue statistics */
lr1 = zeros(m)
lr2 = zeros(m)
cvm = zeros((m,3))
cvt = zeros((m,3))
iota = ones(m)
t, junk = rkt.shape
for i in range(0, m):
tmp = trimr(log(iota-a), i ,0)
lr1[i] = -t * np.sum(tmp, 0) #columnsum ?
#tmp = np.log(1-a)
#lr1[i] = -t * np.sum(tmp[i:])
lr2[i] = -t * log(1-a[i])
cvm[i,:] = c_sja(m-i,p)
cvt[i,:] = c_sjt(m-i,p)
aind[i] = i
#end
result = Holder()
#% set up results structure
#estimation results, residuals
result.rkt = rkt
result.r0t = r0t
result.eig = a
result.evec = d #transposed compared to matlab ?
result.lr1 = lr1
result.lr2 = lr2
result.cvt = cvt
result.cvm = cvm
result.ind = aind
result.meth = 'johansen'
return result
0
Example 25
def _backward(self, grad_act, input_act):
# this is probably the trickiest method in this entire module...
# input activation gradient -- notice that we have to flip the filters horizontally and
# vertically
rev_filters = np.fliplr(np.flipud(self.filters_))
# note: opencv doesn't like arbitrary slices of numpy arrays, so we need to shuffle the
# dimensions around a little bit
# rev_filters will now be NUM_FILTERS x NUM_CHANNELS x ...
rev_filters = np.rollaxis(np.rollaxis(rev_filters, 2, 0), 3, 0).copy()
padded_grad_act = padarray(grad_act, self.filter_pad)
# padded_grad_act will now be NUM_FILTERS x NUM_EXAMPLES x ...
padded_grad_act = np.rollaxis(padded_grad_act, 3, 0).copy()
grad_input_act = np.zeros(input_act.shape, dtype='float32')
for z in xrange(input_act.shape[0]):
for c in xrange(input_act.shape[-1]):
for f in xrange(self.filters_.shape[-1]):
grad_input_act[z,:,:,c] += filter2D(padded_grad_act[f,z], rev_filters[f,c])
# grad_input_act = grad_input_act.sum(-1)
# params gradient
grad_params = np.zeros((input_act.shape[1:4] + (grad_act.shape[-1],)), dtype='float32')
# grad_act_ will now be NUM_FILTERS x NUM_EXAMPLES x ...
grad_act_ = np.rollaxis(grad_act, 3, 0).copy()
# padded_grad_act will now be NUM_CHANNELS x NUM_EXAMPLES x ...
input_act = np.rollaxis(input_act, 3, 0).copy()
for n in xrange(input_act.shape[1]):
for c in xrange(input_act.shape[0]):
for f in xrange(grad_act.shape[-1]):
grad_params[:,:,c,f] += filter2D(input_act[c,n], grad_act_[f,n])
grad_params /= input_act.shape[1]
r_border, c_border = grad_act.shape[1]/2, grad_act.shape[2]/2
if grad_act.shape[1] %2 == 0:
grad_params = grad_params[r_border:-r_border+1, c_border:-c_border+1,...]
else:
grad_params = grad_params[r_border:-r_border, c_border:-c_border,...]
assert grad_params.shape == self.filters_.shape, 'wrong param grad shape'
return grad_input_act, grad_params.ravel()
0
Example 26
Project: ptsa Source File: filt.py
def filtfilt(b,a,x):
#For now only accepting 1d arrays
ntaps=max(len(a),len(b))
edge=ntaps*3
if x.ndim != 1:
raise ValueError, "Filtflit is only accepting 1 dimension arrays."
#x must be bigger than edge
if x.size < edge:
raise ValueError, "Input vector needs to be bigger than 3 * max(len(a),len(b)."
if len(a)!=len(b):
b=r_[b,zeros(len(a)-len(b))]
zi=lfilter_zi(b,a)
#Grow the signal to have edges for stabilizing
#the filter with inverted replicas of the signal
s=r_[2*x[0]-x[edge:1:-1],x,2*x[-1]-x[-1:-edge:-1]]
#in the case of one go we only need one of the extrems
# both are needed for filtfilt
(y,zf)=lfilter(b,a,s,-1,zi*s[0])
(y,zf)=lfilter(b,a,flipud(y),-1,zi*y[-1])
return flipud(y[edge-1:-edge+1])
0
Example 27
def _process(self, element, key=None):
try:
from matplotlib import pyplot as plt
except ImportError:
raise ImportError("contours operation requires matplotlib.")
figure_handle = plt.figure()
extent = element.range(0) + element.range(1)[::-1]
if self.p.filled:
contour_fn = plt.contourf
contour_type = Polygons
else:
contour_fn = plt.contour
contour_type = Contours
if type(element) is Raster:
data = [np.flipud(element.data)]
elif isinstance(element, Raster):
data = [element.data]
elif isinstance(element, QuadMesh):
data = (element.dimension_values(0, False),
element.dimension_values(1, False),
element.data[2])
contour_set = contour_fn(*data, extent=extent,
levels=self.p.levels)
contours = NdOverlay(None, kdims=['Levels'])
for level, cset in zip(self.p.levels, contour_set.collections):
paths = []
for path in cset.get_paths():
paths.extend(np.split(path.vertices, np.where(path.codes==1)[0][1:]))
contours[level] = contour_type(paths, level=level, group=self.p.group,
label=element.label, kdims=element.kdims,
vdims=element.vdims)
plt.close(figure_handle)
if self.p.overlaid:
contours = element * contours
return contours
0
Example 28
Project: inasafe Source File: interpolation2d.py
def interpolate_raster(x, y, z, points, mode='linear', bounds_error=False):
"""2D interpolation of raster data
It is assumed that data is organised in matrix z as latitudes from
bottom up along the first dimension and longitudes from west to east
along the second dimension.
Further it is assumed that x is the vector of longitudes and y the
vector of latitudes.
See interpolate2d for details of the interpolation routine
:param x: 1D array of x-coordinates of the mesh on which to interpolate
:type x: numpy.ndarray
:param y: 1D array of y-coordinates of the mesh on which to interpolate
:type y: numpy.ndarray
:param z: 2D array of values for each x, y pair
:type z: numpy.ndarry
:param points: Nx2 array of coordinates where interpolated values are
sought
:type points: numpy.narray
:param mode: Determines the interpolation order.
Options are:
* 'constant' - piecewise constant nearest neighbour interpolation
* 'linear' - bilinear interpolation using the four
nearest neighbours (default)
:type mode: str
:param bounds_error: If True (default) a BoundsError exception
will be raised when interpolated values are requested
outside the domain of the input data. If False, nan
is returned for those values
:type bounds_error: bool
:returns: 1D array with same length as points with interpolated values
:raises: Exception, BoundsError (see note about bounds_error)
"""
# Flip matrix z up-down to interpret latitudes ordered from south to north
z = numpy.flipud(z)
# Transpose z to have y coordinates along the first axis and x coordinates
# along the second axis
# noinspection PyUnresolvedReferences
z = z.transpose()
# Call underlying interpolation routine and return
res = interpolate2d(x, y, z, points, mode=mode, bounds_error=bounds_error)
return res
0
Example 29
def to_pil(self, origin='lower'):
arr = np.flipud(self.data) if origin == 'lower' else self.data
return fromarray(arr, 'RGBA')
0
Example 30
Project: imagen Source File: audio.py
def _get_row_amplitudes(self):
signal_interval = self.signal()
sample_rate = self.signal.sample_rate
# A signal window *must* span one sample rate
signal_window = tile(signal_interval, ceil(1.0/self.signal.interval_length))
if self.windowing_function:
smoothed_window = signal_window[0:sample_rate] * self.windowing_function(sample_rate)
else:
smoothed_window = signal_window[0:sample_rate]
amplitudes = (abs(fft.rfft(smoothed_window))[0:sample_rate/2] + self.offset) * self.scale
for index in range(0, self._sheet_dimensions[0]-2):
start_frequency = self.frequency_spacing[index]
end_frequency = self.frequency_spacing[index+1]
normalisation_factor = nonzero(amplitudes[start_frequency:end_frequency])[0].size
if normalisation_factor == 0:
amplitudes[index] = 0
else:
amplitudes[index] = sum(amplitudes[start_frequency:end_frequency]) / normalisation_factor
return flipud(amplitudes[0:self._sheet_dimensions[0]].reshape(-1,1))
0
Example 31
Project: cvxpy Source File: tree_mat.py
def conv_mul(lin_op, rh_val, transpose=False, is_abs=False):
"""Multiply by a convolution operator.
arameters
----------
lin_op : LinOp
The root linear operator.
rh_val : NDArray
The vector being convolved.
transpose : bool
Is the transpose of convolution being applied?
is_abs : bool
Is the absolute value of convolution being applied?
Returns
-------
NumPy NDArray
The convolution.
"""
constant = mul(lin_op.data, {}, is_abs)
# Convert to 2D
constant, rh_val = map(intf.from_1D_to_2D, [constant, rh_val])
if transpose:
constant = np.flipud(constant)
# rh_val always larger than constant.
return fftconvolve(rh_val, constant, mode='valid')
else:
# First argument must be larger.
if constant.size >= rh_val.size:
return fftconvolve(constant, rh_val, mode='full')
else:
return fftconvolve(rh_val, constant, mode='full')
0
Example 32
def flipud(self):
""" Flip the image array upside down in-place. Wrapper for np.flipud()"""
self.array = np.flipud(self.array)
0
Example 33
def run(config, software, im_fns, check=True):
"""Unbiased atlas building - Atlas-to-image registration"""
log = logging.getLogger(__name__)
if check:
check_requirements(config, software)
reference_im_fn = config.reference_im_fn
selection = config.selection
result_dir = config.result_dir
ants_params = config.ants_params
num_of_iterations_per_level = config.num_of_iterations_per_level
num_of_levels = config.num_of_levels # multiscale bluring (coarse-to-fine)
s = time.time()
pyLAR.affineRegistrationStep(software.EXE_BRAINSFit, im_fns, result_dir, selection, reference_im_fn)
if config.histogram_matching:
pyLAR.histogramMatchingStep(selection, result_dir)
num_of_data = len(selection)
iterCount = 0
for level in range(0, num_of_levels):
for iterCount in range(1, num_of_iterations_per_level+1):
log.info('Level: ' + str(level))
log.info('Iteration ' + str(iterCount))
_runIteration(level, iterCount, ants_params, result_dir, selection, software)
gc.collect() # garbage collection
# We need to check if num_of_iterations_per_level is set to 0, which leads
# to computing an average on the affine registration.
if level != num_of_levels - 1:
log.warning('No need for multiple levels! TO BE REMOVED!')
for i in range(num_of_data):
current_file_name = 'L' + str(level) + '_Iter' + str(iterCount) + '_' + str(i) + '.nrrd'
current_file_path = os.path.join(result_dir, current_file_name)
nextLevelInitIm = os.path.join(result_dir, 'L'+str(level+1)+'_Iter0_' + str(i) + '.nrrd')
shutil.copyfile(current_file_path, nextLevelInitIm)
# if num_of_levels > 1:
# print 'WARNING: No need for multiple levels! TO BE REMOVED!'
# for i in range(num_of_data):
# next_prefix = 'L' + str(level+1) + '_Iter0_'
# next_path = os.path.join(result_dir, next_prefix)
# newLevelInitIm = next_path + str(i) + '.nrrd'
current_prefix = 'L' + str(num_of_levels-1) + '_Iter' + str(num_of_iterations_per_level)
current_path = os.path.join(result_dir, current_prefix)
atlasIm = current_path + '_atlas.nrrd'
listOfImages = []
num_of_data = len(selection)
for i in range(num_of_data):
lrIm = current_path + '_' + str(i) + '.nrrd'
listOfImages.append(lrIm)
pyLAR.AverageImages(software.EXE_AverageImages, listOfImages, atlasIm)
logging.debug("Saves list outputs:%s"%(os.path.join(result_dir,'list_outputs.txt')))
pyLAR.writeTxtFromList(os.path.join(result_dir,'list_outputs.txt'),[atlasIm])
try:
import matplotlib.pyplot as plt
import SimpleITK as sitk
import numpy as np
im = sitk.ReadImage(atlasIm)
im_array = sitk.GetArrayFromImage(im)
z_dim, x_dim, y_dim = im_array.shape
plt.figure()
plt.imshow(np.flipud(im_array[z_dim/2, :]), plt.cm.gray)
plt.title(current_prefix + ' atlas')
plt.savefig(current_path + '.png')
except ImportError:
pass
e = time.time()
l = e - s
log.info('Total running time: %f mins' % (l/60.0))
0
Example 34
Project: PyAbel Source File: polar.py
def reproject_image_into_polar(data, origin=None, Jacobian=False,
dr=1, dt=None):
"""
Reprojects a 2D numpy array (``data``) into a polar coordinate system.
"origin" is a tuple of (x0, y0) relative to the bottom-left image corner,
and defaults to the center of the image.
Parameters
----------
data : 2D np.array
origin : tuple
The coordinate of the image center, relative to bottom-left
Jacobian : boolean
Include ``r`` intensity scaling in the coordinate transform.
This should be included to account for the changing pixel size that
occurs during the transform.
dr : float
Radial coordinate spacing for the grid interpolation
tests show that there is not much point in going below 0.5
dt : float
Angular coordinate spacing (in degrees)
if ``dt=None``, dt will be set such that the number of theta values
is equal to the maximum value between the height or the width of
the image.
Returns
-------
output : 2D np.array
The polar image (r, theta)
r_grid : 2D np.array
meshgrid of radial coordinates
theta_grid : 2D np.array
meshgrid of theta coordinates
Notes
-----
Adapted from:
http://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
# bottom-left coordinate system requires numpy image to be np.flipud
data = np.flipud(data)
ny, nx = data.shape[:2]
if origin is None:
origin = (nx//2 + nx % 2, ny//2 + ny % 2) # % handles odd size image
# Determine that the min and max r and theta coords will be...
x, y = index_coords(data, origin=origin) # (x,y) coordinates of each pixel
r, theta = cart2polar(x, y) # convert (x,y) -> (r,θ), note θ=0 is vertical
nr = np.round((r.max()-r.min())/dr)
if dt is None:
nt = max(nx, ny)
else:
# dt in degrees
nt = np.round((theta.max()-theta.min())/(np.pi*dt/180))
# Make a regular (in polar space) grid based on the min and max r & theta
r_i = np.linspace(r.min(), r.max(), nr, endpoint=False)
theta_i = np.linspace(theta.min(), theta.max(), nt, endpoint=False)
theta_grid, r_grid = np.meshgrid(theta_i, r_i)
# Project the r and theta grid back into pixel coordinates
X, Y = polar2cart(r_grid, theta_grid)
X += origin[0] # We need to shift the origin
Y += origin[1] # back to the bottom-left corner...
xi, yi = X.flatten(), Y.flatten()
coords = np.vstack((yi, xi)) # (map_coordinates requires a 2xn array)
zi = map_coordinates(data, coords)
output = zi.reshape((nr, nt))
if Jacobian:
output = output*r_i[:, np.newaxis]
return output, r_grid, theta_grid
0
Example 35
def is_symmetric(self, nodal=False, tol=1.e-14):
"""return True is the data is left-right symmetric (to the tolerance
tol) For node-centered data, set nodal=True
"""
if not nodal:
L = self[self.g.ilo:self.g.ilo+self.g.nx/2,
self.g.jlo:self.g.jhi+1]
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+1,
self.g.jlo:self.g.jhi+1]
else:
print(self.g.ilo,self.g.ilo+self.g.nx/2+1)
L = self[self.g.ilo:self.g.ilo+self.g.nx/2+1,
self.g.jlo:self.g.jhi+1]
print(self.g.ilo+self.g.nx/2,self.g.ihi+2)
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+2,
self.g.jlo:self.g.jhi+1]
e = abs(L - np.flipud(R)).max()
print(e, tol, e < tol)
return e < tol
0
Example 36
Project: tractor Source File: symmetric_integral_image.py
def test_rectangles(self):
from numpy import fliplr, flipud
from numpy.random import random_sample, seed
from integral_image import *
seed(42)
I = random_sample((11,11))
# symmetrize by copying the first quadrant
I[:,6:] = fliplr(I[:,0:5])
I[6:,:] = flipud(I[0:5,:])
intimg = integral_image(I)
symmimg = integral_image(I[:6,:6])
for (x0,x1,y0,y1) in [(-1,0,-1,0), # first element
(1,4,1,4), # first quadrant (simple)
(-1,5,-1,5), # first quadrant (edges)
(6,9,1,4), # quadrant 2 (simple)
(5,10,1,4), # quadrant 2 (edges)
(4,5,-1,5), # quadrant 1/2 boundary
(4,6,-1,5), # quadrant 1/2 boundary + 1 over
(-1,10,-1,5), # quadrants 1,2 (up to edges)
(7,9,7,9), # quadrant 3
(1,4,7,9), # quadrant 4
(3,7,4,8), #
(-1,10,-1,10), #
(5,6,-1,0)]:
R0 = intimg_rect(intimg, x0, x1, y0, y1)
R1 = symm_intimg_rect(symmimg, x0, x1, y0, y1, 5)
print(round(R0,5), round(R1,5), round(R0,5) == round(R1,5))
self.assertAlmostEqual(R1, R0, 8)
0
Example 37
Project: landlab Source File: esri_ascii.py
def write_esri_ascii(path, fields, names=None, clobber=False):
"""Write landlab fields to ESRI ASCII.
Write the data and grid information for *fields* to *path* in the ESRI
ASCII format.
Parameters
----------
path : str
Path to output file.
fields : field-like
Landlab field object that holds a grid and associated values.
names : iterable of str, optional
Names of the fields to include in the output file. If not provided,
write all fields.
clobber : boolean
If *path* exists, clobber the existing file, otherwise raise an
exception.
Examples
--------
>>> import numpy as np
>>> from landlab.testing.tools import cdtemp
>>> from landlab import RasterModelGrid
>>> from landlab.io.esri_ascii import write_esri_ascii
>>> grid = RasterModelGrid((4, 5), spacing=(2., 2.))
>>> _ = grid.add_field('node', 'air__temperature', np.arange(20.))
>>> with cdtemp() as _:
... files = write_esri_ascii('test.asc', grid)
>>> files
['test.asc']
>>> _ = grid.add_field('node', 'land_surface__elevation', np.arange(20.))
>>> with cdtemp() as _:
... files = write_esri_ascii('test.asc', grid)
>>> files.sort()
>>> files
['test_air__temperature.asc', 'test_land_surface__elevation.asc']
"""
if os.path.exists(path) and not clobber:
raise ValueError('file exists')
if isinstance(names, six.string_types):
names = [names]
names = names or fields.at_node.keys()
if len(names) == 1:
paths = [path]
elif len(names) > 1:
(base, ext) = os.path.splitext(path)
paths = [base + '_' + name + ext for name in names]
else:
raise ValueError('no node fields to write')
bad_names = set(names) - set(fields.at_node.keys())
if len(bad_names) > 0:
raise ValueError('unknown field name(s): %s' % ','.join(bad_names))
header = {
'ncols': fields.number_of_node_columns,
'nrows': fields.number_of_node_rows,
'xllcorner': fields.node_x[0],
'yllcorner': fields.node_y[0],
'cellsize': fields.dx,
}
for path, name in zip(paths, names):
header_lines = ['%s %s' % (key, str(val))
for key, val in list(header.items())]
data = fields.at_node[name].reshape(header['nrows'], header['ncols'])
np.savetxt(path, np.flipud(data), header=os.linesep.join(header_lines),
comments='')
return paths
0
Example 38
def get_data(self, element, ranges):
return (), dict(x=unique_array(element.dimension_values(0, False)),
y=unique_array(element.dimension_values(1, False)),
z=np.flipud(element.raster))
0
Example 39
Project: sncosmo Source File: bandpasses.py
def to_unit(self, unit):
"""Return wavelength and transmission in new wavelength units.
If the requested units are the same as the current units, self is
returned.
Parameters
----------
unit : `~astropy.units.Unit` or str
Target wavelength unit.
Returns
-------
wave : `~numpy.ndarray`
trans : `~numpy.ndarray`
"""
if unit is u.AA:
return self.wave, self.trans
d = u.AA.to(unit, self.wave, u.spectral())
t = self.trans
if d[0] > d[-1]:
d = np.flipud(d)
t = np.flipud(t)
return d, t
0
Example 40
Project: pyLAR Source File: images.py
def showImageMidSlice(reference_im_fn, size_x=15, size_y=5):
try:
import matplotlib.pyplot as plt
except ImportError:
print "showImageMidSlice not supported - matplotlib not available"
return
im_ref = sitk.ReadImage(reference_im_fn) # image in SITK format
im_ref_array = sitk.GetArrayFromImage(im_ref) # get numpy array
z_dim, x_dim, y_dim = im_ref_array.shape # get 3D volume shape
# display reference image
fig = plt.figure(figsize=(size_x,size_y))
plt.subplot(131)
implot = plt.imshow(np.flipud(im_ref_array[z_dim/2,:,:]),plt.cm.gray)
plt.subplot(132)
implot = plt.imshow(np.flipud(im_ref_array[:,x_dim/2,:]),plt.cm.gray)
plt.subplot(133)
implot = plt.imshow(np.flipud(im_ref_array[:,:,y_dim/2]),plt.cm.gray)
plt.axis('off')
plt.title('healthy atlas')
fig.clf()
del im_ref, im_ref_array
return
0
Example 41
Project: landlab Source File: esri_ascii.py
def read_esri_ascii(asc_file, grid=None, reshape=False, name=None, halo=0):
"""Read :py:class:`~landlab.RasterModelGrid` from an ESRI ASCII file.
Read data from *asc_file*, an ESRI_ ASCII file, into a
:py:class:`~landlab.RasterModelGrid`. *asc_file* is either the name of
the data file or is a file-like object.
The grid and data read from the file are returned as a tuple
(*grid*, *data*) where *grid* is an instance of
:py:class:`~landlab.RasterModelGrid` and *data* is a numpy
array of doubles with that has been reshaped to have the number of rows
and columns given in the header.
.. _ESRI: http://resources.esri.com/help/9.3/arcgisengine/java/GP_ToolRef/spatial_analyst_tools/esri_ascii_raster_format.htm
Parameters
----------
asc_file : str of file-like
Data file to read.
reshape : boolean, optional
Reshape the returned array, otherwise return a flattened array.
name : str, optional
Add data to the grid as a named field.
grid : *grid* , optional
Adds data to an existing *grid* instead of creating a new one.
halo : integer, optional
Adds outer border of depth halo to the *grid*.
Returns
-------
(grid, data) : tuple
A newly-created RasterModel grid and the associated node data.
Raises
------
DataSizeError
Data are not the same size as indicated by the header file.
MismatchGridDataSizeError
If a grid is passed, the size of the grid does not agree with the
size of the data.
Examples
--------
Assume that fop is the name of a file that contains text below
(make sure you have your path correct):
ncols 3
nrows 4
xllcorner 1.
yllcorner 2.
cellsize 10.
NODATA_value -9999
0. 1. 2.
3. 4. 5.
6. 7. 8.
9. 10. 11.
--------
>>> from landlab.io import read_esri_ascii
>>> (grid, data) = read_esri_ascii('fop') # doctest: +SKIP
>>> #grid is an object of type RasterModelGrid with 4 rows and 3 cols
>>> #data contains an array of length 4*3 that is equal to
>>> # [9., 10., 11., 6., 7., 8., 3., 4., 5., 0., 1., 2.]
>>> (grid, data) = read_esri_ascii('fop', halo=1) # doctest: +SKIP
>>> #now the data has a nodata_value ring of -9999 around it. So array is
>>> # [-9999, -9999, -9999, -9999, -9999, -9999,
>>> # -9999, 9., 10., 11., -9999,
>>> # -9999, 6., 7., 8., -9999,
>>> # -9999, 3., 4., 5., -9999,
>>> # -9999, 0., 1., 2. -9999,
>>> # -9999, -9999, -9999, -9999, -9999, -9999]
"""
from ..grid import RasterModelGrid
if isinstance(asc_file, six.string_types):
file_name = asc_file
with open(file_name, 'r') as asc_file:
header = read_asc_header(asc_file)
data = _read_asc_data(asc_file)
else:
header = read_asc_header(asc_file)
data = _read_asc_data(asc_file)
#There is no reason for halo to be negative.
#Assume that if a negative value is given it should be 0.
if halo <= 0:
shape = (header['nrows'], header['ncols'])
if data.size != shape[0] * shape[1]:
raise DataSizeError(shape[0] * shape[1], data.size)
else:
shape = (header['nrows'] + 2 * halo, header['ncols'] + 2 * halo)
#check to see if a nodata_value was given. If not, assign -9999.
if 'nodata_value' in header.keys():
nodata_value = header['nodata_value']
else:
header['nodata_value'] = -9999.
nodata_value = header['nodata_value']
if data.size != (shape[0] - 2 * halo) * (shape[1] - 2 * halo):
raise DataSizeError(shape[0] * shape[1], data.size)
spacing = (header['cellsize'], header['cellsize'])
#origin = (header['xllcorner'], header['yllcorner'])
data = np.flipud(data)
#REMEMBER, shape contains the size with halo in place
#header contains the shape of the original data
#Add halo below
if halo > 0:
helper_row = np.ones(shape[1]) * nodata_value
#for the first halo row(s), add num cols worth of nodata vals to data
for i in range(0, halo):
data = np.insert(data,0,helper_row)
#then for header['nrows'] add halo number nodata vals, header['ncols']
#of data, then halo number of nodata vals
helper_row_ends = np.ones(halo) * nodata_value
for i in range(halo, header['nrows']+halo):
#this adds at the beginning of the row
data = np.insert(data,i * shape[1],helper_row_ends)
#this adds at the end of the row
data = np.insert(data,(i + 1) * shape[1] - halo,helper_row_ends)
#for the last halo row(s), add num cols worth of nodata vals to data
for i in range(header['nrows']+halo,shape[0]):
data = np.insert(data,data.size,helper_row)
if not reshape:
data = data.flatten()
if grid is not None:
if (grid.number_of_node_rows != shape[0]) or \
(grid.number_of_node_columns != shape[1]):
raise MismatchGridDataSizeError(shape[0] * shape[1], \
grid.number_of_node_rows * grid.number_of_node_columns )
if grid is None:
grid = RasterModelGrid(shape, spacing=spacing)
if name:
grid.add_field('node', name, data)
return (grid, data)
0
Example 42
def vertical_flip(x):
for i in range(x.shape[0]):
x[i] = np.flipud(x[i])
return x
0
Example 43
Project: mayavi Source File: figure.py
def screenshot(figure=None, mode='rgb', antialiased=False):
""" Return the current figure pixmap as an array.
**Parameters**
:figure: a figure instance or None, optional
If specified, the figure instance to capture the view of.
:mode: {'rgb', 'rgba'}
The color mode of the array captured.
:antialiased: {True, False}
Use anti-aliasing for rendering the screenshot.
Uses the number of aa frames set by
figure.scene.anti_aliasing_frames
**Notes**
On most systems, this works similarly to taking a screenshot of
the rendering window. Thus if it is hidden by another window, you
will capture the other window. This limitation is due to the
heavy use of the hardware graphics system.
**Examples**
This function can be useful for integrating 3D plotting with
Mayavi in a 2D plot created by matplotlib.
>>> from mayavi import mlab
>>> mlab.test_plot3d()
>>> arr = mlab.screenshot()
>>> import pylab as pl
>>> pl.imshow(arr)
>>> pl.axis('off')
>>> pl.show()
"""
if figure is None:
figure = gcf()
x, y = tuple(figure.scene.get_size())
# Try to lift the window
figure.scene._lift()
if mode == 'rgb':
out = tvtk.UnsignedCharArray()
shape = (y, x, 3)
pixel_getter = figure.scene.render_window.get_pixel_data
pg_args = (0, 0, x - 1, y - 1, 1, out)
elif mode == 'rgba':
out = tvtk.FloatArray()
shape = (y, x, 4)
pixel_getter = figure.scene.render_window.get_rgba_pixel_data
pg_args = (0, 0, x - 1, y - 1, 1, out)
else:
raise ValueError('mode type not understood')
if antialiased:
# save the current aa value to restore it later
old_aa = figure.scene.render_window.aa_frames
figure.scene.render_window.aa_frames = figure.scene.anti_aliasing_frames
figure.scene.render()
pixel_getter(*pg_args)
figure.scene.render_window.aa_frames = old_aa
figure.scene.render()
else:
pixel_getter(*pg_args)
# Return the array in a way that pylab.imshow plots it right:
out = out.to_array()
out.shape = shape
out = np.flipud(out)
return out
0
Example 44
def is_asymmetric(self, nodal=False, tol=1.e-14):
"""return True is the data is left-right asymmetric (to the tolerance
tol)---e.g, the sign flips. For node-centered data, set nodal=True
"""
if not nodal:
L = self[self.g.ilo:self.g.ilo+self.g.nx/2,
self.g.jlo:self.g.jhi+1]
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+1,
self.g.jlo:self.g.jhi+1]
else:
print(self.g.ilo,self.g.ilo+self.g.nx/2+1)
L = self[self.g.ilo:self.g.ilo+self.g.nx/2+1,
self.g.jlo:self.g.jhi+1]
print(self.g.ilo+self.g.nx/2,self.g.ihi+2)
R = self[self.g.ilo+self.g.nx/2:self.g.ihi+2,
self.g.jlo:self.g.jhi+1]
e = abs(L + np.flipud(R)).max()
print(e, tol, e < tol)
return e < tol
0
Example 45
Project: nansat Source File: mapper_nora10_local_vpv.py
def __init__(self, fileName, gdalDataset, gdalMetadata, logLevel=30,
**kwargs):
if fileName[0:len(keywordBase)] != keywordBase:
raise WrongMapperError(__file__,
"Not Nora10 data converted from felt to netCDF")
requestedTime = datetime.strptime(fileName[len(keywordBase)+1:],
'%Y%m%d%H%M')
# For correct rounding
fileTime = requestedTime + timedelta(minutes=30)
fileTime = fileTime - timedelta(minutes=fileTime.minute)
nc_file = (baseFolder + 'windspeed_10m' +
fileTime.strftime('/%Y/%m/') + 'windspeed_' +
fileTime.strftime('%Y%m%d%H.nc'))
nc_file_winddir = (baseFolder + 'winddir_10m' +
fileTime.strftime('/%Y/%m/') +
'winddir_' + fileTime.strftime('%Y%m%d%H.nc'))
# Would prefer to use geotransform, but ob_tran
# (General Oblique Transformation) is not supported by GDAL
# Keeping lines below for potential future use:
#proj4 = gdalDataset.GetMetadataItem('projection_rotated_ll#proj4')
#proj4 = '+proj=ob_tran +o_proj=longlat +lon_0=-40 +o_lat_p=22 +a=6367470 +e=0'
#rlatmin = -13.25; rlatmax = 26.65; deltarlat = 0.1
#rlonmin = 5.75; rlonmax = 30.45; deltarlon = 0.1
# Needed due to precence of time dimension in netCDF file
gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', 'No')
# Read relevant arrays into memory
g = gdal.Open('NETCDF:"' + nc_file + '":' + 'windspeed_10m')
ws_10m = np.flipud(g.GetRasterBand(1).ReadAsArray())
g = gdal.Open('NETCDF:"' + nc_file_winddir + '":' +
'wind_direction_10m')
wd_10m = np.flipud(g.GetRasterBand(1).ReadAsArray())
g = gdal.Open('NETCDF:"' + nc_file + '":' + 'latitude')
lat = np.flipud(g.GetRasterBand(1).ReadAsArray())
g = gdal.Open('NETCDF:"' + nc_file + '":' + 'longitude')
lon = np.flipud(g.GetRasterBand(1).ReadAsArray())
u10 = -ws_10m*np.sin(np.deg2rad(wd_10m))
v10 = -ws_10m*np.cos(np.deg2rad(wd_10m))
VRT_u10 = VRT(array=u10, lat=lat, lon=lon)
VRT_v10 = VRT(array=v10, lat=lat, lon=lon)
# Store bandVRTs so that they are available after reprojection etc
self.bandVRTs = {'u_VRT': VRT_u10,
'v_VRT': VRT_v10}
metaDict = []
metaDict.append({'src': {'SourceFilename': VRT_u10.fileName,
'SourceBand': 1},
'dst': {'wkv': 'eastward_wind',
'name': 'eastward_wind'}})
metaDict.append({'src': {'SourceFilename': VRT_v10.fileName,
'SourceBand': 1},
'dst': {'wkv': 'northward_wind',
'name': 'northward_wind'}})
# Add pixel function with wind speed
metaDict.append({
'src': [{'SourceFilename': self.bandVRTs['u_VRT'].fileName,
'SourceBand': 1,
'DataType': 6},
{'SourceFilename': self.bandVRTs['v_VRT'].fileName,
'SourceBand': 1,
'DataType': 6}],
'dst': {'wkv': 'wind_speed',
'name': 'windspeed',
'height': '10 m',
'PixelFunctionType': 'UVToMagnitude'}})
# Add pixel function with wind direction
metaDict.append({
'src': [{'SourceFilename': self.bandVRTs['u_VRT'].fileName,
'SourceBand': 1,
'DataType': 6},
{'SourceFilename': self.bandVRTs['v_VRT'].fileName,
'SourceBand': 1,
'DataType': 6}],
'dst': {'wkv': 'wind_from_direction',
'name': 'winddir',
'height': '10 m',
'PixelFunctionType': 'UVToDirectionFrom'}})
# create empty VRT dataset with geolocation only
VRT.__init__(self, lat=lat, lon=lon)
# add bands with metadata and corresponding values
# to the empty VRT
self._create_bands(metaDict)
# Add time
self.dataset.SetMetadataItem('time_coverage_start', fileTime.isoformat())
0
Example 46
def CaptureScreen(self):
self.screen = np.flipud( _Screenshot() )
0
Example 47
Project: visvis Source File: getframe.py
def getframe(ob):
""" getframe(object)
Get a snapshot of the current figure or axes or axesContainer.
It is retured as a numpy array (color image).
Also see vv.screenshot().
"""
# Get figure
fig = ob.GetFigure()
if not fig:
raise ValueError('Object is not present in any alive figures.')
# Select the figure
fig._SetCurrent() # works on all backends
# we read the pixels as shown on screen.
gl.glReadBuffer(gl.GL_FRONT)
# establish rectangle to sample
if isinstance(ob, vv.BaseFigure):
x,y,w,h = 0, 0, ob.position.w, ob.position.h
elif isinstance(ob, vv.AxesContainer):
x,y = ob.position.absTopLeft
w,h = ob.position.size
y = fig.position.h - (y+h)
elif isinstance(ob, vv.Axes):
x,y = ob.position.absTopLeft
w,h = ob.position.size
y = fig.position.h - (y+h)
x+=1; y+=1; w-=1; h-=1; # first pixel is the bounding box
else:
raise ValueError("The given object is not a figure nor an axes.")
# read
# use floats to prevent strides etc. uint8 caused crash on qt backend.
im = gl.glReadPixels(x, y, w, h, gl.GL_RGB, gl.GL_FLOAT)
# reshape, flip, and store
im.shape = h,w,3
im = np.flipud(im)
# done
return im
0
Example 48
def correlate(u, v):
w = np.convolve(u, np.flipud(v))
return
0
Example 49
def Reverse(self):
""" In-place reverse. """
tmp = self.data[:self._len].copy()
self.data[:self._len] = np.flipud(tmp)
0
Example 50
Project: PySAR Source File: plot.py
def main(argv):
color_map='jet'
disp_opposite='no'
try:
opts, args = getopt.getopt(argv,"h:f:d:o:x:y:m:M:i:c:")
except getopt.GetoptError:
Usage() ; sys.exit(1)
if opts==[]:
Usage() ; sys.exit(1)
for opt,arg in opts:
if opt in ("-h","--help"):
Usage()
sys.exit()
elif opt == '-f':
File = arg
elif opt == '-d':
demFile=arg
elif opt=='-m':
Vmin=float(arg)
elif opt=='-M':
Vmax=float(arg)
elif opt == '-x':
winx=arg.split(':')
elif opt == '-y':
winy = arg.split(':')
elif opt == '-o':
outName = arg
elif opt == '-i':
disp_opposite = arg
elif opt == '-c':
color_map=arg
h5file=h5py.File(File,'r')
k=h5file.keys()
print k[0]
# ccmap=plt.get_cmap(color_map)
################################################
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.6, 1.0, 1.0),
(0.8, 1.0, 1.0),
(1.0, 0.5, 0.5)),
'green': ((0.0, 0.0, 0.0),
(0.2, 0.0, 0.0),
(0.4, 1.0, 1.0),
(0.6, 1.0, 1.0),
(0.8, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.5, .5),
(0.2, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0),)
}
if color_map =='pysar_hsv':
ccmap = LinearSegmentedColormap('BlueRed1', cdict1)
else:
ccmap=plt.get_cmap(color_map)
print 'colormap is : '+ color_map
################################################
dset = h5file[k[0]].get(k[0])
data=dset[0:dset.shape[0],0:dset.shape[1]]
if disp_opposite in('yes','Yes','Y','y','YES'):
data=-1*data
try:
xref=h5file[k[0]].attrs['ref_x']
yref=h5file[k[0]].attrs['ref_y']
except:
print 'No reference point'
try:
ullon=float(h5file[k[0]].attrs['X_FIRST'])
ullat=float(h5file[k[0]].attrs['Y_FIRST'])
lon_step=float(h5file[k[0]].attrs['X_STEP'])
lat_step=float(h5file[k[0]].attrs['Y_STEP'])
lon_unit=h5file[k[0]].attrs['Y_UNIT']
lat_unit=h5file[k[0]].attrs['X_UNIT']
llcrnrlon=ullon
llcrnrlat=ullat+lat_step*data.shape[0]
urcrnrlon=ullon+lon_step*data.shape[1]
urcrnrlat=ullat
geocoord='yes'
print 'Input file is Geocoded'
except:
geocoord='no'
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
resolution='l', area_thresh=1., projection='cyl',suppress_ticks=False,ax=ax)
print demFile
demFile
if os.path.basename(demFile).split('.')[1]=='hgt':
amp,dem,demRsc = readfile.read_float32(demFile)
elif os.path.basename(demFile).split('.')[1]=='dem':
dem,demRsc = readfile.read_dem(demFile)
#################################################################
try:
winx
wx=[int(i) for i in win_x.split()]
dem=dem[:,wx[0]:wx[1]]
data=data[:,wx[0]:wx[1]]
ullon=float(h5file[k[0]].attrs['X_FIRST'])+wx[0]
llcrnrlon=ullon
urcrnrlon=ullon+lon_step*data.shape[1]
except:
print ''
try:
winy
wy=[int(i) for i in winy.split()]
dem=dem[wy[0]:wy[1],:]
data=data[wy[0]:wy[1],:]
except:
print ''
################################################################
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
resolution='l', area_thresh=1., projection='cyl',suppress_ticks=False,ax=ax)
cmap_dem=plt.get_cmap('gray')
m.imshow(ut.hillshade(np.flipud(dem),50.0),cmap=cmap_dem)
try:
im=m.imshow(np.flipud(data),vmin=Vmin,vmax=Vmax,cmap=ccmap)
# cb = m.colorbar(im,"right", size="5%", pad='2%')
except:
im=m.imshow(np.flipud(data))
# cb = m.colorbar(im,"right", size="5%", pad='2%')
# m.bluemarble()
# cb = m.colorbar(im,"right", size="5%", pad='2%')
# parallels = np.arange(31.,34,0.5)
# m.drawparallels(parallels,labels=[1,0,0,1],linewidth=0.0)
# meridians = np.arange(-115.,-112.,0.5)
# m.drawmeridians(meridians,labels=[1,0,0,1],linewidth=0.0)
# m.drawmapscale()
# m = Basemap(llcrnrlon=-110.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,
# projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,
# resolution ='l',area_thresh=1000.)
# m.drawcoastlines()
# m.drawcountries()
# m.drawmapboundary(fill_color='#99ffff')
# m.fillcontinents(color='#cc9966',lake_color='#99ffff')
# m.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])
# m.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])
# plt.title('Atlantic Hurricane Tracks (Storms Reaching Category 4, 1851-2004)')
try:
figName = outName
except:
outName=os.path.basename(File).replace('.h5','')
figName = outName + '.png'
plt.savefig(figName,pad_inches=0.0)
# plt.show()
h5file.close()