Here are the examples of the python api numpy.rank taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
4 Examples
3
Example 1
Project: pyorbital Source File: geoloc.py
def mnorm(m, axis=None):
"""norm of a matrix of vectors stacked along the *axis* dimension.
"""
if axis is None:
axis = np.rank(m) - 1
return np.sqrt((m**2).sum(axis))
0
Example 2
Project: pyunicorn Source File: data.py
@classmethod
def _get_netcdf_data(cls, file_name, file_type, observable_name,
dimension_names, vertical_level=None,
silence_level=0):
"""
Import data from a NetCDF file with a regular and rectangular grid.
Supported file types ``file_type`` are:
- "NetCDF" for regular (rectangular) grids
- "iNetCDF" for irregular (e.g. geodesic) grids or station data
:arg str file_name: The name of the data file.
:arg str file_type: The format of the data file.
:arg str observable_name: The short name of the observable within data
file (particularly relevant for NetCDF).
:arg dict dimension_names: The names of the dimensions as used in the
NetCDF file. E.g., dimension_names = {"lat": "lat", "lon": "lon",
"time": "time"}.
:arg int vertical_level: The vertical level to be extracted from the
data file. Is ignored for horizontal data sets. If None, the first
level in the data file is chosen.
:arg int silence_level: The inverse level of verbosity of the object.
"""
if silence_level <= 1:
print "Reading NetCDF File and converting data to NumPy array..."
# Initialize dictionary of results
res = {}
# Open netCDF3 or netCDF4 file
f = netCDF4.Dataset(file_name, "r")
# Create reference to observable
observable = f.variables[observable_name][:].astype("float32")
# Get time axis from NetCDF file
time = f.variables[dimension_names["time"]][:].astype("float32")
# Get number of dimensions of data
n_dim = np.rank(observable)
# Distinguish between regular and irregular grids
if file_type == "NetCDF":
# Create Grid instance
lat_grid = f.variables[dimension_names["lat"]][:].astype("float32")
lon_grid = f.variables[dimension_names["lon"]][:].astype("float32")
res["grid"] = Grid.RegularGrid(time, lat_grid, lon_grid,
silence_level)
# If 3D data set (time, lat, lon), select whole data set
if n_dim == 3:
res["observable"] = observable.copy()
# If 4D data set (time, level, lat, lon), select certain vertical
# level.
elif n_dim == 4:
# Handle selected vertical level
if vertical_level is None:
level = 0
else:
level = vertical_level
res["observable"] = observable[:, level, :, :].copy()
else:
print "Regular NetCDF data sets with dimensions other than \
3 (time, lat, lon) or 4 (time, level, lat, lon) are not \
supported by Data class!"
elif file_type == "iNetCDF":
# Create Grid instance
lat_seq = f.variables["grid_center_lat"][:].astype("float32")
lon_seq = f.variables["grid_center_lon"][:].astype("float32")
res["grid"] = Grid(time, lat_seq, lon_seq, silence_level)
# If 2D data set (time, index), select whole data set
if n_dim == 2:
res["observable"] = observable.copy()
# If 3D data set (time, level, index), select certain vertical
# level.
elif n_dim == 3:
# Handle selected vertical level
if vertical_level is None:
level = 0
else:
level = vertical_level
res["observable"] = observable[:, level, :].copy()
else:
print "Irregular NetCDF data sets with dimensions other than \
2 (time, index) or 3 (time, level, index) are not \
supported by Data class!"
# Get length of raw data time axis
n_time = res["observable"].shape[0]
# Reshape observable to comply with the standard shape (time, index)
res["observable"].shape = (n_time, -1)
# Get long name of observable
res["observable_long_name"] = f.variables[observable_name].long_name
# Store name of observable
res["observable_name"] = observable_name
f.close()
return res
0
Example 3
Project: python-control Source File: yottalab.py
def bb_dcgain(sys):
"""Return the steady state value of the step response os sys
Usage
=====
dcgain=dcgain(sys)
Inputs
------
sys: system
Outputs
-------
dcgain : steady state value
"""
a=mat(sys.A)
b=mat(sys.B)
c=mat(sys.C)
d=mat(sys.D)
nx=shape(a)[0]
if sys.dt!=0.0:
a=a-eye(nx,nx)
r=rank(a)
if r<nx:
gm=[]
else:
gm=-c*inv(a)*b+d
return array(gm)
0
Example 4
Project: msmbuilder-legacy Source File: clustering.py
@classmethod
def load_from_disk(cls, filename):
"""Load up a clusterer from disk
This is useful because computing the Z-matrix
(done in __init__) is the most expensive part, and assigning is cheap
Parameters
----------
filename : str
location to save to
Raises
------
TODO: Probablt raises something if filename doesn't exist?
"""
data = io.loadh(filename, deferred=False)
Z, traj_lengths = data['z_matrix'], data['traj_lengths']
# Next two lines are a hack to fix Serializer bug. KAB
if np.rank(traj_lengths) == 0:
traj_lengths = [traj_lengths]
return cls(None, None, precomputed_values=(Z, traj_lengths))