Here are the examples of the python api numpy.empty taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
170 Examples
0
Example 101
def numerical_grad(theta, f, dx=1e-3, order=1):
""" return numerical estimate of the local gradient
The gradient is computer by using the Taylor expansion approximation over
each dimension:
f(t + dt) = f(t) + h df/dt(t) + h^2/2 d^2f/dt^2 + ...
The first order gives then:
df/dt = (f(t + dt) - f(t)) / dt + O(dt)
Note that we could also compute the backwards version by subtracting dt instead:
df/dt = (f(t) - f(t -dt)) / dt + O(dt)
A better approach is to use a 3-step formula where we evaluate the
derivative on both sides of a chosen point t using the above forward and
backward two-step formulae and taking the average afterward. We need to use the Taylor expansion to higher order:
f (t +/- dt) = f (t) +/- dt df/dt + dt ^ 2 / 2 dt^2 f/dt^2 +/- dt ^ 3 d^3 f/dt^3 + O(dt ^ 4)
df/dt = (f(t + dt) - f(t - dt)) / (2 * dt) + O(dt ^ 3)
Note: that the error is now of the order of dt ^ 3 instead of dt
In a same manner we can obtain the next order by using f(t +/- 2 * dt):
df/dt = (f(t - 2 * dt) - 8 f(t - dt)) + 8 f(t + dt) - f(t + 2 * dt) / (12 * dt) + O(dt ^ 4)
In the 2nd order, two additional function evaluations are required (per dimension), implying a
more time-consuming algorithm. However the approximation error is of the order of dt ^ 4
INPUTS
------
theta: ndarray[float, ndim=1]
vector value around which estimating the gradient
f: callable
function from which estimating the gradient
KEYWORDS
--------
dx: float
pertubation to apply in each direction during the gradient estimation
order: int in [1, 2]
order of the estimates:
1 uses the central average over 2 points
2 uses the central average over 4 points
OUTPUTS
-------
df: ndarray[float, ndim=1]
gradient vector estimated at theta
COST: the gradient estimation need to evaluates ndim * (2 * order) points (see above)
CAUTION: if dt is very small, the limited numerical precision can result in big errors.
"""
ndim = len(theta)
df = np.empty(ndim, dtype=float)
if order == 1:
cst = 0.5 / dx
for k in range(ndim):
dt = np.zeros(ndim, dtype=float)
dt[k] = dx
df[k] = (f(theta + dt) - f(theta - dt)) * cst
elif order == 2:
cst = 1. / (12. * dx)
for k in range(ndim):
dt = np.zeros(ndim, dtype=float)
dt[k] = dx
df[k] = cst * (f(theta - 2 * dt) - 8. * f(theta - dt) + 8. * f(theta + dt) - f(theta + 2. * dt) )
return df
0
Example 102
Project: mne-python Source File: brainvision.py
def _get_vhdr_info(vhdr_fname, eog, misc, scale, montage):
"""Extract all the information from the header file.
Parameters
----------
vhdr_fname : str
Raw EEG header to be read.
eog : list of str
Names of channels that should be designated EOG channels. Names should
correspond to the vhdr file.
misc : list or tuple of str | 'auto'
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. If 'auto', units in vhdr file are used for inferring
misc channels. Default is ``'auto'``.
scale : float
The scaling factor for EEG data. Unless specified otherwise by
header file, units are in microvolts. Default scale factor is 1.
montage : str | True | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the docuementation of
:func:`mne.channels.read_montage` for more information.
Returns
-------
info : Info
The measurement info.
fmt : str
The data format in the file.
edf_info : dict
A dict containing Brain Vision specific parameters.
events : array, shape (n_events, 3)
Events from the corresponding vmrk file.
"""
scale = float(scale)
ext = os.path.splitext(vhdr_fname)[-1]
if ext != '.vhdr':
raise IOError("The header file must be given to read the data, "
"not the '%s' file." % ext)
with open(vhdr_fname, 'rb') as f:
# extract the first section to resemble a cfg
header = f.readline()
codepage = 'utf-8'
# we don't actually need to know the coding for the header line.
# the characters in it all belong to ASCII and are thus the
# same in Latin-1 and UTF-8
header = header.decode('ascii', 'ignore').strip()
_check_hdr_version(header)
settings = f.read()
try:
# if there is an explicit codepage set, use it
# we pretend like it's ascii when searching for the codepage
cp_setting = re.search('Codepage=(.+)',
settings.decode('ascii', 'ignore'),
re.IGNORECASE & re.MULTILINE)
if cp_setting:
codepage = cp_setting.group(1).strip()
settings = settings.decode(codepage)
except UnicodeDecodeError:
# if UTF-8 (new standard) or explicit codepage setting fails,
# fallback to Latin-1, which is Windows default and implicit
# standard in older recordings
settings = settings.decode('latin-1')
if settings.find('[Comment]') != -1:
params, settings = settings.split('[Comment]')
else:
params, settings = settings, ''
cfg = configparser.ConfigParser()
if hasattr(cfg, 'read_file'): # newer API
cfg.read_file(StringIO(params))
else:
cfg.readfp(StringIO(params))
# get sampling info
# Sampling interval is given in microsec
sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
info = _empty_info(sfreq)
# check binary format
data_format = cfg.get('Common Infos', 'DataFormat')
if data_format != 'BINARY':
raise ValueError('Only data in binary format is supported. '
'Your data are in %s.' % data_format)
order = cfg.get('Common Infos', 'DataOrientation')
if order not in _orientation_dict:
raise NotImplementedError('Data Orientation %s is not supported'
% order)
order = _orientation_dict[order]
fmt = cfg.get('Binary Infos', 'BinaryFormat')
if fmt not in _fmt_dict:
raise NotImplementedError('Datatype %s is not supported' % fmt)
fmt = _fmt_dict[fmt]
# load channel labels
nchan = cfg.getint('Common Infos', 'NumberOfChannels') + 1
ch_names = [''] * nchan
cals = np.empty(nchan)
ranges = np.empty(nchan)
cals.fill(np.nan)
ch_dict = dict()
misc_chs = dict()
for chan, props in cfg.items('Channel Infos'):
n = int(re.findall(r'ch(\d+)', chan)[0]) - 1
props = props.split(',')
# default to microvolts because that's what the older brainvision
# standard explicitly assumed; the unit is only allowed to be
# something else if explicitly stated (cf. EEGLAB export below)
if len(props) < 4:
props += (u'µV',)
name, _, resolution, unit = props[:4]
ch_dict[chan] = name
ch_names[n] = name
if resolution == "":
if not(unit): # For truncated vhdrs (e.g. EEGLAB export)
resolution = 0.000001
else:
resolution = 1. # for files with units specified, but not res
unit = unit.replace(u'\xc2', u'') # Remove unwanted control characters
cals[n] = float(resolution)
ranges[n] = _unit_dict.get(unit, 1) * scale
if unit not in ('V', u'µV', 'uV'):
misc_chs[name] = (FIFF.FIFF_UNIT_CEL if unit == 'C'
else FIFF.FIFF_UNIT_NONE)
misc = list(misc_chs.keys()) if misc == 'auto' else misc
# create montage
if montage is True:
from ...transforms import _sphere_to_cartesian
from ...channels.montage import Montage
montage_pos = list()
montage_names = list()
for ch in cfg.items('Coordinates'):
montage_names.append(ch_dict[ch[0]])
radius, theta, phi = map(float, ch[1].split(','))
# 1: radius, 2: theta, 3: phi
pos = _sphere_to_cartesian(r=radius, theta=theta, phi=phi)
montage_pos.append(pos)
montage_sel = np.arange(len(montage_pos))
montage = Montage(montage_pos, montage_names, 'Brainvision',
montage_sel)
ch_names[-1] = 'STI 014'
cals[-1] = 1.
ranges[-1] = 1.
if np.isnan(cals).any():
raise RuntimeError('Missing channel units')
# Attempts to extract filtering info from header. If not found, both are
# set to zero.
settings = settings.splitlines()
idx = None
if 'Channels' in settings:
idx = settings.index('Channels')
settings = settings[idx + 1:]
hp_col, lp_col = 4, 5
for idx, setting in enumerate(settings):
if re.match('#\s+Name', setting):
break
else:
idx = None
# If software filters are active, then they override the hardware setup
# But we still want to be able to double check the channel names
# for alignment purposes, we keep track of the hardware setting idx
idx_amp = idx
if 'S o f t w a r e F i l t e r s' in settings:
idx = settings.index('S o f t w a r e F i l t e r s')
for idx, setting in enumerate(settings[idx + 1:], idx + 1):
if re.match('#\s+Low Cutoff', setting):
hp_col, lp_col = 1, 2
warn('Online software filter detected. Using software '
'filter settings and ignoring hardware values')
break
else:
idx = idx_amp
if idx:
lowpass = []
highpass = []
# extract filter units and convert s to Hz if necessary
# this cannot be done as post-processing as the inverse t-f
# relationship means that the min/max comparisons don't make sense
# unless we know the units
header = re.split('\s\s+', settings[idx])
hp_s = '[s]' in header[hp_col]
lp_s = '[s]' in header[lp_col]
for i, ch in enumerate(ch_names[:-1], 1):
line = re.split('\s\s+', settings[idx + i])
# double check alignment with channel by using the hw settings
# the actual divider is multiple spaces -- for newer BV
# files, the unit is specified for every channel separated
# by a single space, while for older files, the unit is
# specified in the column headers
if idx == idx_amp:
line_amp = line
else:
line_amp = re.split('\s\s+', settings[idx_amp + i])
assert ch in line_amp
highpass.append(line[hp_col])
lowpass.append(line[lp_col])
if len(highpass) == 0:
pass
elif len(set(highpass)) == 1:
if highpass[0] in ('NaN', 'Off'):
pass # Placeholder for future use. Highpass set in _empty_info
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
if hp_s:
info['highpass'] = 1. / info['highpass']
else:
heterogeneous_hp_filter = True
if hp_s:
# We convert channels with disabled filters to having
# highpass relaxed / no filters
highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC')
else np.Inf for filt in highpass]
info['highpass'] = np.max(np.array(highpass, dtype=np.float))
# Coveniently enough 1 / np.Inf = 0.0, so this works for
# DC / no highpass filter
info['highpass'] = 1. / info['highpass']
# not exactly the cleanest use of FP, but this makes us
# more conservative in *not* warning.
if info['highpass'] == 0.0 and len(set(highpass)) == 1:
# not actually heterogeneous in effect
# ... just heterogeneously disabled
heterogeneous_hp_filter = False
else:
highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC')
else 0.0 for filt in highpass]
info['highpass'] = np.min(np.array(highpass, dtype=np.float))
if info['highpass'] == 0.0 and len(set(highpass)) == 1:
# not actually heterogeneous in effect
# ... just heterogeneously disabled
heterogeneous_hp_filter = False
if heterogeneous_hp_filter:
warn('Channels contain different highpass filters. '
'Lowest (weakest) filter setting (%0.2f Hz) '
'will be stored.' % info['highpass'])
if len(lowpass) == 0:
pass
elif len(set(lowpass)) == 1:
if lowpass[0] in ('NaN', 'Off'):
pass # Placeholder for future use. Lowpass set in _empty_info
else:
info['lowpass'] = float(lowpass[0])
if lp_s:
info['lowpass'] = 1. / info['lowpass']
else:
heterogeneous_lp_filter = True
if lp_s:
# We convert channels with disabled filters to having
# infinitely relaxed / no filters
lowpass = [float(filt) if filt not in ('NaN', 'Off')
else 0.0 for filt in lowpass]
info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))
try:
info['lowpass'] = 1. / info['lowpass']
except ZeroDivisionError:
if len(set(lowpass)) == 1:
# No lowpass actually set for the weakest setting
# so we set lowpass to the Nyquist frequency
info['lowpass'] = info['sfreq'] / 2.
# not actually heterogeneous in effect
# ... just heterogeneously disabled
heterogeneous_lp_filter = False
else:
# no lowpass filter is the weakest filter,
# but it wasn't the only filter
pass
else:
# We convert channels with disabled filters to having
# infinitely relaxed / no filters
lowpass = [float(filt) if filt not in ('NaN', 'Off')
else np.Inf for filt in lowpass]
info['lowpass'] = np.max(np.array(lowpass, dtype=np.float))
if np.isinf(info['lowpass']):
# No lowpass actually set for the weakest setting
# so we set lowpass to the Nyquist frequency
info['lowpass'] = info['sfreq'] / 2.
if len(set(lowpass)) == 1:
# not actually heterogeneous in effect
# ... just heterogeneously disabled
heterogeneous_lp_filter = False
if heterogeneous_lp_filter:
# this isn't clean FP, but then again, we only want to provide
# the Nyquist hint when the lowpass filter was actually
# calculated from dividing the sampling frequency by 2, so the
# exact/direct comparison (instead of tolerance) makes sense
if info['lowpass'] == info['sfreq'] / 2.0:
nyquist = ', Nyquist limit'
else:
nyquist = ""
warn('Channels contain different lowpass filters. '
'Highest (weakest) filter setting (%0.2f Hz%s) '
'will be stored.' % (info['lowpass'], nyquist))
# locate EEG and marker files
path = os.path.dirname(vhdr_fname)
info['filename'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
info['meas_date'] = int(time.time())
info['buffer_size_sec'] = 1. # reasonable default
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
for idx, ch_name in enumerate(ch_names):
if ch_name in eog or idx in eog or idx - nchan in eog:
kind = FIFF.FIFFV_EOG_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_V
elif ch_name in misc or idx in misc or idx - nchan in misc:
kind = FIFF.FIFFV_MISC_CH
coil_type = FIFF.FIFFV_COIL_NONE
if ch_name in misc_chs:
unit = misc_chs[ch_name]
else:
unit = FIFF.FIFF_UNIT_NONE
elif ch_name == 'STI 014':
kind = FIFF.FIFFV_STIM_CH
coil_type = FIFF.FIFFV_COIL_NONE
unit = FIFF.FIFF_UNIT_NONE
else:
kind = FIFF.FIFFV_EEG_CH
coil_type = FIFF.FIFFV_COIL_EEG
unit = FIFF.FIFF_UNIT_V
info['chs'].append(dict(
ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1,
scanno=idx + 1, cal=cals[idx], range=ranges[idx], loc=np.zeros(12),
unit=unit, unit_mul=0., # always zero- mne manual pg. 273
coord_frame=FIFF.FIFFV_COORD_HEAD))
# for stim channel
mrk_fname = os.path.join(path, cfg.get('Common Infos', 'MarkerFile'))
info._update_redundant()
info._check_consistency()
return info, fmt, order, mrk_fname, montage
0
Example 103
Project: harold Source File: _system_funcs.py
def staircase(A,B,C,compute_T=False,form='c',invert=False,block_indices=False):
"""
The staircase form is used very often to assess system properties.
Given a state system matrix triplet A,B,C, this function computes
the so-called controller/observer-Hessenberg form such that the resulting
system matrices have the block-form (x denoting the nonzero blocks)
.. math::
\\begin{array}{c|c}
\\begin{bmatrix}
\\times & \\times & \\times & \\times & \\times \\\\
\\times & \\times & \\times & \\times & \\times \\\\
0 & \\times & \\times & \\times & \\times \\\\
0 & 0 & \\times & \\times & \\times \\\\
0 & 0 & 0 & \\times & \\times
\\end{bmatrix} &
\\begin{bmatrix}
\\times \\\\
0 \\\\
0 \\\\
0 \\\\
0
\\end{bmatrix} \\\\ \\hline
\\begin{bmatrix}
\\times & \\times & \\times & \\times & \\times \\\\
\\times & \\times & \\times & \\times & \\times
\\end{bmatrix}
\\end{array}
For controllability and observability, the existence of zero-rank
subdiagonal blocks can be checked, as opposed to forming the Kalman
matrix and checking the rank. Staircase method can numerically be
more stable since for certain matrices, A^n computations can
introduce large errors (for some A that have entries with varying
order of magnitudes). But it is also prone to numerical rank guessing
mismatches.
Notice that, if we use the pertransposed data, then we have the
observer form which is usually asked from the user to supply
the data as :math:`A,B,C \Rightarrow A^T,C^T,B^T` and then transpose
back the result. This is just silly to ask the user to do that. Hence
the additional ``form`` option denoting whether it is the observer or
the controller form that is requested.
Parameters
----------
A,B,C : {(n,n),(n,m),(p,n)} array_like
System Matrices to be converted
compute_T : bool, optional
Whether the transformation matrix T should be computed or not
form : { 'c' , 'o' }, optional
Determines whether the controller- or observer-Hessenberg form
will be computed.
invert : bool, optional
Whether to select which side the B or C matrix will be compressed.
For example, the default case returns the B matrix with (if any)
zero rows at the bottom. invert option flips this choice either in
B or C matrices depending on the "form" switch.
block_indices : bool, optional
Returns
-------
Ah,Bh,Ch : {(n,n),(n,m),(p,n)} 2D numpy arrays
Converted system matrices
T : (n,n) 2D numpy array
If the boolean ``compute_T`` is true, returns the transformation
matrix such that
.. math::
\\left[\\begin{array}{c|c}
T^{-1}AT &T^{-1}B \\\\ \\hline
CT & D
\\end{array}\\right]
is in the desired staircase form.
k: Numpy array
If the boolean ``block_indices`` is true, returns the array
of controllable/observable block sizes identified during block
diagonalization
"""
if not form in {'c','o'}:
raise ValueError('The "form" key can only take values'
'\"c\" or \"o\" denoting\ncontroller- or '
'observer-Hessenberg form.')
if form == 'o':
A , B , C = A.T , C.T , B.T
n = A.shape[0]
ub , sb , vb , m0 = haroldsvd(B,also_rank=True)
cble_block_indices = np.empty((1,0))
# Trivially Uncontrollable Case
# Skip the first branch of the loop by making m0 greater than n
# such that the matrices are returned as is without any computation
if m0 == 0:
m0 = n + 1
cble_block_indices = np.array([0])
# After these, start the regular case
if n > m0:# If it is not a square system with full rank B
A0 = ub.T.dot(A.dot(ub))
# print(A0)
# Row compress B and consistent zero blocks with the reported rank
B0 = sb.dot(vb)
B0[m0:,:] = 0.
C0 = C.dot(ub)
cble_block_indices = np.append(cble_block_indices,m0)
if compute_T:
P = sp.linalg.block_diag(np.eye(n-ub.T.shape[0]),ub.T)
# Since we deal with submatrices, we need to increase the
# default tolerance to reasonably high values that are
# related to the original data to get exact zeros
tol_from_A = n*sp.linalg.norm(A,1)*np.finfo(float).eps
# Region of interest
m = m0
ROI_start = 0
ROI_size = 0
for dummy_row_counter in range(A.shape[0]):
ROI_start += ROI_size
ROI_size = m
# print(ROI_start,ROI_size)
h1,h2,h3,h4 = matrix_slice(
A0[ROI_start:,ROI_start:],
(ROI_size,ROI_size)
)
uh3,sh3,vh3,m = haroldsvd(h3,also_rank=True,rank_tol = tol_from_A)
# Make sure reported rank and sh3 are consistent about zeros
sh3[ sh3 < tol_from_A ] = 0.
# If the resulting subblock is not full row or zero rank
if 0 < m < h3.shape[0]:
cble_block_indices = np.append(cble_block_indices,m)
if compute_T:
P = sp.linalg.block_diag(np.eye(n-uh3.shape[1]),uh3.T).dot(P)
A0[ROI_start:,ROI_start:] = np.r_[
np.c_[h1,h2],
np.c_[sh3.dot(vh3),uh3.T.dot(h4)]
]
A0 = A0.dot(sp.linalg.block_diag(np.eye(n-uh3.shape[1]),uh3))
C0 = C0.dot(sp.linalg.block_diag(np.eye(n-uh3.shape[1]),uh3))
# Clean up
A0[abs(A0) < tol_from_A ] = 0.
C0[abs(C0) < tol_from_A ] = 0.
elif m == h3.shape[0]:
cble_block_indices = np.append(cble_block_indices,m)
break
else:
break
if invert:
A0 = np.fliplr(np.flipud(A0))
B0 = np.flipud(B0)
C0 = np.fliplr(C0)
if compute_T:
P = np.flipud(P)
if form == 'o':
A0 , B0 , C0 = A0.T , C0.T , B0.T
if compute_T:
if block_indices:
return A0 , B0 , C0 , P.T , cble_block_indices
else:
return A0 , B0 , C0 , P.T
else:
if block_indices:
return A0 , B0 , C0 , cble_block_indices
else:
return A0 , B0 , C0
else: # Square system B full rank ==> trivially controllable
cble_block_indices = np.array([n])
if form == 'o':
A , B , C = A.T , C.T , B.T
if compute_T:
if block_indices:
return A,B,C,np.eye(n),cble_block_indices
else:
return A , B , C , np.eye(n)
else:
if block_indices:
return A , B , C , cble_block_indices
else:
return A , B , C
0
Example 104
Project: sima Source File: extract.py
def _roi_extract(inputs):
"""ROI extract code, intended to be used by extract_rois.
Needs to be a top-level function to allow it to be used with Pools.
Parameters - a single two-element tuple, 'inputs'
----------
frame : array
An individual aligned frame from the imaging session.
constants : dict
Variables that do not change each loop and are pre-calculated to speed
up extraction. Includes demixer, mask_stack, A, masked_pixels, and
is_overlap.
Returns - a single three-element tuple
-------
values : array
n_rois length array of average pixel intensity for all pixels
in each ROI
demixed_values : array
If demixer is None, the second returned value will also be None.
Same format as values, but calculated from the demixed raw signal.
frame : array
Return back the frame as it was passed in, used for calculating a mean
image.
"""
frame, frame_idx, constants = inputs
def put_back_nans(values, imaged_rois, n_rois):
"""Puts NaNs back in output arrays for ROIs that were not imaged this
frame.
"""
value_idx = 0
roi_idx = 0
final_values = np.empty(n_rois)
while roi_idx < n_rois:
if roi_idx in imaged_rois:
final_values[roi_idx] = values[value_idx]
value_idx += 1
else:
final_values[roi_idx] = np.nan
roi_idx += 1
return final_values
n_rois = constants['A'].shape[1]
masked_frame = frame[constants['masked_pixels']]
# Determine which pixels and ROIs were imaged this frame
# If none were, just return with all NaNs
imaged_pixels = np.isfinite(masked_frame)
if not np.any(imaged_pixels):
nan_result = np.empty((n_rois, 1))
nan_result.fill(np.nan)
return (frame_idx, nan_result, nan_result)
# If there is overlapping pixels between the ROIs calculate the full
# pseudoinverse of A, if not use a shortcut
if constants['is_overlap']:
A = constants['A'][imaged_pixels, :]
# Identify all of the rois that were imaged this frame
imaged_rois = np.unique(
constants['mask_stack'][:, imaged_pixels].nonzero()[0])
if len(imaged_rois) < n_rois:
A = A.tocsc()[:, imaged_rois].tocsr()
# First assume ROIs are independent, else fall back to full pseudo-inv
try:
weights = inv(A.T * A) * A.T
except RuntimeError:
weights = csc_matrix(np.linalg.pinv(A.todense()))
else:
orig_masks = constants['mask_stack'].copy()
imaged_masks = orig_masks[:, imaged_pixels]
imaged_rois = np.unique(imaged_masks.nonzero()[0])
if len(imaged_rois) < n_rois:
orig_masks = orig_masks.tocsr()[imaged_rois, :].tocsc()
imaged_masks = imaged_masks.tocsr()[imaged_rois, :].tocsc()
orig_masks.data **= 2
imaged_masks.data **= 2
scale_factor = old_div(
orig_masks.sum(axis=1), imaged_masks.sum(axis=1))
scale_factor = np.array(scale_factor).flatten()
weights = diags(scale_factor, 0) \
* constants['mask_stack'][imaged_rois][:, imaged_pixels]
# Extract signals
values = weights * masked_frame[imaged_pixels, np.newaxis]
weights_sums = weights.sum(axis=1)
result = values + weights_sums
if len(imaged_rois) < n_rois:
result = put_back_nans(result, imaged_rois, n_rois)
if constants['demixer'] is None:
return (frame_idx, result, None)
# Same as 'values' but with the demixed frame data
demixed_frame = masked_frame + constants['demixer']
demixed_values = weights * demixed_frame[imaged_pixels, np.newaxis]
demixed_result = demixed_values + weights_sums
if len(imaged_rois) < n_rois:
demixed_result = put_back_nans(demixed_result, imaged_rois, n_rois)
return (frame_idx, result, demixed_result)
0
Example 105
Project: nmrglue Source File: proc_lp.py
def lp(data, pred=1, slice=slice(None), order=8, mode="f", append="after",
bad_roots="auto", fix_mode="on", mirror=None, method="svd"):
"""
Linear prediction extrapolation of 1D or 2D data.
Parameters
----------
data : ndarray
1D or 2D NMR data with the last (-1) axis in the time domain.
pred : int
Number of points to predict along the last axis.
slice : slice object, optional
Slice object which selects the region along the last axis to use in LP
equation. The default (slice(None)) will use all points.
order : int
Prediction order, number of LP coefficients calculated.
mode : {'f', 'b', 'fb' or 'bf'}
Mode to generate LP filter. 'f' for forward,'b' for backward, fb for
'forward-backward and 'bf' for backward-forward.
extend : {'before', 'after'}
Location to extend the data, either 'before' the current data, or
'after' the existing data. This is independent of the `mode` parameter.
bad_roots : {'incr', 'decr', None, 'auto'}
Type of roots which to consider bad and to stabilize. Option are those
with increasing signals 'incr' or decreasing signals 'decr'. None will
perform no root stabiliting. The default ('auto') will set the
parameter based on the `mode` parameter. 'f' or 'fb' `mode` will
results in a 'incr' `bad_roots` parameter, 'b' or 'bf` in 'decr'
fix_mode : {'on', 'reflect'}
Method used to stabilize bad roots, 'on' to move the roots onto the
unit circle, 'reflect' to reflect bad roots across the unit circle.
This parameter is ignored when `bad_roots` is None.
mirror : {None, '0', '180'}
Mode to form mirror image of data before processing. None will
process the data trace as provided (no mirror image). '0' or '180'
forms a mirror image of the sliced trace to calculate the LP filter.
'0' should be used with data with no delay, '180' with data
with an initial half-point delay.
method : {'svd', 'qr', 'choleskey', 'tls'}
Method to use to calculate the LP filter. Choices are a SVD ('svd'), QR
('qr'), or Choleskey ('choleskey') decomposition, or Total Least
Squares ('tls').
Returns
-------
ndata : ndarray
NMR data with `pred` number of points linear predicted and appended to
the original data.
Notes
-----
When given 2D data a series of 1D linear predictions are made to
each row in the array, extending each by pred points. To perform a 2D
linear prediction using a 2D prediction matrix use :py:func:`lp2d`.
In forward-backward or backward-forward mode root stabilizing is done
on both sets of signal roots as calculated in the first mode direction.
After averaging the coefficient the roots are again stabilized.
When the append parameter does not match the LP mode, for example
if a backward linear prediction (mode='b') is used to predict points
after the trace (append='after'), any root fixing is done before reversing
the filter.
"""
if data.ndim == 1:
return lp_1d(data, pred, slice, order, mode, append, bad_roots,
fix_mode, mirror, method)
elif data.ndim == 2:
# create empty array to hold output
s = list(data.shape)
s[-1] = s[-1] + pred
new = np.empty(s, dtype=data.dtype)
# vector-wise 1D LP
for i, trace in enumerate(data):
new[i] = lp_1d(trace, pred, slice, order, mode, append, bad_roots,
fix_mode, mirror, method)
return new
else:
raise ValueError("data must be a one or two dimensional array")
0
Example 106
def stack(signal_list, axis=None, new_axis_name='stack_element',
mmap=False, mmap_dir=None,):
"""Concatenate the signals in the list over a given axis or a new axis.
The title is set to that of the first signal in the list.
Parameters
----------
signal_list : list of BaseSignal instances
axis : {None, int, str}
If None, the signals are stacked over a new axis. The data must
have the same dimensions. Otherwise the
signals are stacked over the axis given by its integer index or
its name. The data must have the same shape, except in the dimension
corresponding to `axis`.
new_axis_name : string
The name of the new axis when `axis` is None.
If an axis with this name already
exists it automatically append '-i', where `i` are integers,
until it finds a name that is not yet in use.
mmap: bool
If True and stack is True, then the data is stored
in a memory-mapped temporary file.The memory-mapped data is
stored on disk, and not directly loaded into memory.
Memory mapping is especially useful for accessing small
fragments of large files without reading the entire file into
memory.
mmap_dir : string
If mmap_dir is not None, and stack and mmap are True, the memory
mapped file will be created in the given directory,
otherwise the default directory is used.
Returns
-------
signal : BaseSignal instance (or subclass, determined by the objects in
signal list)
Examples
--------
>>> data = np.arange(20)
>>> s = hs.stack([hs.signals.Signal1D(data[:10]),
... hs.signals.Signal1D(data[10:])])
>>> s
<Signal1D, title: Stack of , dimensions: (2, 10)>
>>> s.data
array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
"""
axis_input = copy.deepcopy(axis)
for i, obj in enumerate(signal_list):
if i == 0:
if axis is None:
original_shape = obj.data.shape
stack_shape = tuple([len(signal_list), ]) + original_shape
tempf = None
if mmap is False:
data = np.empty(stack_shape,
dtype=obj.data.dtype)
else:
tempf = tempfile.NamedTemporaryFile(
dir=mmap_dir)
data = np.memmap(tempf,
dtype=obj.data.dtype,
mode='w+',
shape=stack_shape,)
signal = type(obj)(data=data)
signal.axes_manager._axes[
1:] = copy.deepcopy(
obj.axes_manager._axes)
axis_name = new_axis_name
axis_names = [axis_.name for axis_ in
signal.axes_manager._axes[1:]]
j = 1
while axis_name in axis_names:
axis_name = new_axis_name + "_%i" % j
j += 1
eaxis = signal.axes_manager._axes[0]
eaxis.name = axis_name
eaxis.navigate = True # This triggers _update_parameters
signal.metadata = copy.deepcopy(obj.metadata)
# Get the title from 1st object
signal.metadata.General.title = (
"Stack of " + obj.metadata.General.title)
signal.original_metadata = DictionaryTreeBrowser({})
else:
axis = obj.axes_manager[axis]
signal = obj.deepcopy()
signal.original_metadata.add_node('stack_elements')
# Store parameters
signal.original_metadata.stack_elements.add_node(
'element%i' % i)
node = signal.original_metadata.stack_elements[
'element%i' % i]
node.original_metadata = \
obj.original_metadata.as_dictionary()
node.metadata = \
obj.metadata.as_dictionary()
if axis is None:
if obj.data.shape != original_shape:
raise IOError(
"Only files with data of the same shape can be stacked")
signal.data[i, ...] = obj.data
del obj
if axis is not None:
signal.data = np.concatenate([signal_.data for signal_ in signal_list],
axis=axis.index_in_array)
signal.get_dimensions_from_data()
if axis_input is None:
axis_input = signal.axes_manager[-1 + 1j].index_in_axes_manager
step_sizes = 1
else:
step_sizes = [obj.axes_manager[axis_input].size
for obj in signal_list]
signal.metadata._HyperSpy.set_item(
'Stacking_history.axis',
axis_input)
signal.metadata._HyperSpy.set_item(
'Stacking_history.step_sizes',
step_sizes)
from hyperspy.signal import BaseSignal
if np.all([s.metadata.has_item('Signal.Noise_properties.variance')
for s in signal_list]):
if np.all([isinstance(s.metadata.Signal.Noise_properties.variance, BaseSignal)
for s in signal_list]):
variance = stack(
[s.metadata.Signal.Noise_properties.variance for s in signal_list], axis)
signal.metadata.set_item(
'Signal.Noise_properties.variance',
variance)
return signal
0
Example 107
def get_pandas_dataframe(self, data_size, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : Integral
The total number of bins in the tally corresponding to this filter
Keyword arguments
-----------------
distribcell_paths : bool
If True (default), expand distribcell indices into multi-index
columns describing the path to that distribcell through the CSG
tree. NOTE: This option assumes that all distribcell paths are of
the same length and do not have the same universes and cells but
different lattice cell indices.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with columns describing distributed cells. The
for will be either:
1. a single column with the cell instance IDs (without summary info)
2. separate columns for the cell IDs, universe IDs, and lattice IDs
and x,y,z cell indices corresponding to each (distribcell paths).
The number of rows in the DataFrame is the same as the total number
of bins in the corresponding tally, with the filter bin
appropriately tiled to map to the corresponding tally bins.
Raises
------
ImportError
When Pandas is not installed
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
import pandas as pd
df = pd.DataFrame()
level_df = None
distribcell_paths = kwargs.setdefault('distribcell_paths', True)
# Create Pandas Multi-index columns for each level in CSG tree
if distribcell_paths:
# Distribcell paths require linked metadata from the Summary
if self.distribcell_paths is None:
msg = 'Unable to construct distribcell paths since ' \
'the Summary is not linked to the StatePoint'
raise ValueError(msg)
# Make copy of array of distribcell paths to use in
# Pandas Multi-index column construction
distribcell_paths = copy.deepcopy(self.distribcell_paths)
num_offsets = len(distribcell_paths)
# Loop over CSG levels in the distribcell paths
level_counter = 0
levels_remain = True
while levels_remain:
# Use level key as first index in Pandas Multi-index column
level_counter += 1
level_key = 'level {}'.format(level_counter)
# Use the first distribcell path to determine if level
# is a universe/cell or lattice level
first_path = distribcell_paths[0]
next_index = first_path.index('-')
level = first_path[:next_index]
# Trim universe/lattice info from path
first_path = first_path[next_index+2:]
# Create a dictionary for this level for Pandas Multi-index
level_dict = OrderedDict()
# This level is a lattice (e.g., ID(x,y,z))
if '(' in level:
level_type = 'lattice'
# Initialize prefix Multi-index keys
lat_id_key = (level_key, 'lat', 'id')
lat_x_key = (level_key, 'lat', 'x')
lat_y_key = (level_key, 'lat', 'y')
lat_z_key = (level_key, 'lat', 'z')
# Allocate NumPy arrays for each CSG level and
# each Multi-index column in the DataFrame
level_dict[lat_id_key] = np.empty(num_offsets)
level_dict[lat_x_key] = np.empty(num_offsets)
level_dict[lat_y_key] = np.empty(num_offsets)
level_dict[lat_z_key] = np.empty(num_offsets)
# This level is a universe / cell (e.g., ID->ID)
else:
level_type = 'universe'
# Initialize prefix Multi-index keys
univ_key = (level_key, 'univ', 'id')
cell_key = (level_key, 'cell', 'id')
# Allocate NumPy arrays for each CSG level and
# each Multi-index column in the DataFrame
level_dict[univ_key] = np.empty(num_offsets)
level_dict[cell_key] = np.empty(num_offsets)
# Determine any levels remain in path
if '-' not in first_path:
levels_remain = False
# Populate Multi-index arrays with all distribcell paths
for i, path in enumerate(distribcell_paths):
if level_type == 'lattice':
# Extract lattice ID, indices from path
next_index = path.index('-')
lat_id_indices = path[:next_index]
# Trim lattice info from distribcell path
distribcell_paths[i] = path[next_index+2:]
# Extract the lattice cell indices from the path
i1 = lat_id_indices.index('(')
i2 = lat_id_indices.index(')')
i3 = lat_id_indices[i1+1:i2]
# Assign entry to Lattice Multi-index column
level_dict[lat_id_key][i] = path[:i1]
level_dict[lat_x_key][i] = int(i3.split(',')[0]) - 1
level_dict[lat_y_key][i] = int(i3.split(',')[1]) - 1
level_dict[lat_z_key][i] = int(i3.split(',')[2]) - 1
else:
# Extract universe ID from path
next_index = path.index('-')
universe_id = int(path[:next_index])
# Trim universe info from distribcell path
path = path[next_index+2:]
# Extract cell ID from path
if '-' in path:
next_index = path.index('-')
cell_id = int(path[:next_index])
distribcell_paths[i] = path[next_index+2:]
else:
cell_id = int(path)
distribcell_paths[i] = ''
# Assign entry to Universe, Cell Multi-index columns
level_dict[univ_key][i] = universe_id
level_dict[cell_key][i] = cell_id
# Tile the Multi-index columns
for level_key, level_bins in level_dict.items():
level_bins = np.repeat(level_bins, self.stride)
tile_factor = data_size / len(level_bins)
level_bins = np.tile(level_bins, tile_factor)
level_dict[level_key] = level_bins
# Initialize a Pandas DataFrame from the level dictionary
if level_df is None:
level_df = pd.DataFrame(level_dict)
else:
level_df = pd.concat([level_df, pd.DataFrame(level_dict)],
axis=1)
# Create DataFrame column for distribcell instance IDs
# NOTE: This is performed regardless of whether the user
# requests Summary geometric information
filter_bins = np.arange(self.num_bins)
filter_bins = np.repeat(filter_bins, self.stride)
tile_factor = data_size / len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
df = pd.DataFrame({self.short_name.lower() : filter_bins})
# If OpenCG level info DataFrame was created, concatenate
# with DataFrame of distribcell instance IDs
if level_df is not None:
level_df = level_df.dropna(axis=1, how='all')
level_df = level_df.astype(np.int)
df = pd.concat([level_df, df], axis=1)
return df
0
Example 108
Project: mayavi Source File: datasets.py
def generate_annulus(r=None, theta=None, z=None):
""" Generate points for structured grid for a cylindrical annular
volume. This method is useful for generating a unstructured
cylindrical mesh for VTK (and perhaps other tools).
Parameters
----------
r : array : The radial values of the grid points.
It defaults to linspace(1.0, 2.0, 11).
theta : array : The angular values of the x axis for the grid
points. It defaults to linspace(0,2*pi,11).
z: array : The values along the z axis of the grid points.
It defaults to linspace(0,0,1.0, 11).
Return
------
points : array
Nx3 array of points that make up the volume of the annulus.
They are organized in planes starting with the first value
of z and with the inside "ring" of the plane as the first
set of points. The default point array will be 1331x3.
"""
# Default values for the annular grid.
if r is None: r = linspace(1.0,2.0, 11)
if theta is None: theta = linspace(0,2*pi,11)
if z is None: z = linspace(0.0,1.0, 11)
# Find the x values and y values for each plane.
x_plane = (cos(theta)*r[:,None]).ravel()
y_plane = (sin(theta)*r[:,None]).ravel()
# Allocate an array for all the points. We'll have len(x_plane)
# points on each plane, and we have a plane for each z value, so
# we need len(x_plane)*len(z) points.
points = empty([len(x_plane)*len(z),3])
# Loop through the points for each plane and fill them with the
# correct x,y,z values.
start = 0
for z_plane in z:
end = start+len(x_plane)
# slice out a plane of the output points and fill it
# with the x,y, and z values for this plane. The x,y
# values are the same for every plane. The z value
# is set to the current z
plane_points = points[start:end]
plane_points[:,0] = x_plane
plane_points[:,1] = y_plane
plane_points[:,2] = z_plane
start = end
return points
0
Example 109
Project: flopy Source File: map.py
def plot_endpoint(self, ep, direction='ending',
selection=None, selection_direction=None, **kwargs):
"""
Plot the MODPATH endpoints.
Parameters
----------
ep : rec array
A numpy recarray with the endpoint particle data from the
MODPATH 6 endpoint file
direction : str
String defining if starting or ending particle locations should be
considered. (default is 'ending')
selection : tuple
tuple that defines the zero-base layer, row, column location
(l, r, c) to use to make a selection of particle endpoints.
The selection could be a well location to determine capture zone
for the well. If selection is None, all particle endpoints for
the user-sepcified direction will be plotted. (default is None)
selection_direction : str
String defining is a selection should be made on starting or
ending particle locations. If selection is not None and
selection_direction is None, the selection direction will be set
to the opposite of direction. (default is None)
kwargs : ax, c, s or size, colorbar, colorbar_label, shrink. The
remaining kwargs are passed into the matplotlib scatter
method. If colorbar is True a colorbar will be added to the plot.
If colorbar_label is passed in and colorbar is True then
colorbar_label will be passed to the colorbar set_label()
method. If shrink is passed in and colorbar is True then
the colorbar size will be set using shrink.
Returns
-------
sp : matplotlib.pyplot.scatter
"""
if direction.lower() == 'ending':
direction = 'ending'
elif direction.lower() == 'starting':
direction = 'starting'
else:
errmsg = 'flopy.map.plot_endpoint direction must be "ending" ' + \
'or "starting".'
raise Exception(errmsg)
if direction == 'starting':
xp, yp = 'x0', 'y0'
elif direction == 'ending':
xp, yp = 'x', 'y'
if selection_direction is not None:
if selection_direction.lower() != 'starting' and \
selection_direction.lower() != 'ending':
errmsg = 'flopy.map.plot_endpoint selection_direction ' + \
'must be "ending" or "starting".'
raise Exception(errmsg)
else:
if direction.lower() == 'starting':
selection_direction = 'ending'
elif direction.lower() == 'ending':
selection_direction = 'starting'
if selection is not None:
try:
k, i, j = selection[0], selection[1], selection[2]
if selection_direction.lower() == 'starting':
ksel, isel, jsel = 'k0', 'i0', 'j0'
elif selection_direction.lower() == 'ending':
ksel, isel, jsel = 'k', 'i', 'j'
except:
errmsg = 'flopy.map.plot_endpoint selection must be a ' + \
'zero-based layer, row, column tuple (l, r, c) ' + \
'of the location to evaluate (i.e., well location).'
raise Exception(errmsg)
if selection is not None:
idx = (ep[ksel] == k) & (ep[isel] == i) & (ep[jsel] == j)
tep = ep[idx]
else:
tep = ep.copy()
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = self.ax
# scatter kwargs that users may redefine
if 'c' not in kwargs:
c = tep['finaltime'] - tep['initialtime']
else:
c = np.empty((tep.shape[0]), dtype="S30")
c.fill(kwargs.pop('c'))
s = 50
if 's' in kwargs:
s = float(kwargs.pop('s')) ** 2.
elif 'size' in kwargs:
s = float(kwargs.pop('size')) ** 2.
# colorbar kwargs
createcb = False
if 'colorbar' in kwargs:
createcb = kwargs.pop('colorbar')
colorbar_label = 'Endpoint Time'
if 'colorbar_label' in kwargs:
colorbar_label = kwargs.pop('colorbar_label')
shrink = 1.
if 'shrink' in kwargs:
shrink = float(kwargs.pop('shrink'))
# rotate data
x0r, y0r = self.sr.rotate(tep[xp], tep[yp], self.sr.rotation, 0.,
self.sr.yedge[0])
x0r += self.sr.xul
y0r += self.sr.yul - self.sr.yedge[0]
# build array to plot
arr = np.vstack((x0r, y0r)).T
# plot the end point data
sp = plt.scatter(arr[:, 0], arr[:, 1], c=c, s=s, **kwargs)
# add a colorbar for travel times
if createcb:
cb = plt.colorbar(sp, shrink=shrink)
cb.set_label(colorbar_label)
return sp
0
Example 110
def read(fnm, in_memory, ibands=ALL, bandclass=CompressedBand):
""" Read a GeoTiff file and return a numpy array and a dictionary of header
information.
Parameters
----------
fnm : str
input datasource
in_memory : boolean
indicates whether array should be read fully into memory
ibands : int or list of ints
band number (1...)
bandclass : karta.raster.band class
if *in_memory* is `False`, use this class for band storage
Returns an band object and a dictionary of metadata
"""
hdr = dict()
dataset = osgeo.gdal.Open(fnm, gc.GA_ReadOnly)
if ibands == ALL:
ibands = list(range(1, dataset.RasterCount+1))
elif not hasattr(ibands, "__iter__"):
ibands = [ibands]
try:
hdr["nx"] = dataset.RasterXSize
hdr["ny"] = dataset.RasterYSize
transform = dataset.GetGeoTransform()
if transform is not None:
hdr["dx"] = transform[1]
hdr["dy"] = transform[5]
hdr["xulcorner"] = transform[0]
hdr["yulcorner"] = transform[3]
hdr["sx"] = transform[2]
hdr["sy"] = transform[4]
else:
raise AttributeError("No GeoTransform in geotiff file")
sr = SRS_from_WKT(dataset.GetProjectionRef())
hdr["srs"] = {"proj4": sr.ExportToProj4(),
"semimajor": sr.GetSemiMajor(),
"flattening": sr.GetInvFlattening(),
"name": sr.GetAttrValue('PROJCS')}
max_dtype = 0
rasterbands = [dataset.GetRasterBand(i) for i in ibands]
hdr["nodata"] = rasterbands[0].GetNoDataValue()
nx = rasterbands[0].XSize
ny = rasterbands[0].YSize
if rasterbands[0].DataType > max_dtype:
max_dtype = rasterbands[0].DataType
if in_memory:
dtype = numpy_dtype(rasterbands[0].DataType)
bands = [bandclass((ny, nx), dtype) for _ in ibands]
for i, rb in enumerate(rasterbands):
_arr = rb.ReadAsArray(buf_obj=np.empty([ny, nx], dtype=dtype))
bands[i][:,:] = _arr.squeeze()[::-1]
else:
bands = [GdalFileBand(rb, dataset) for rb in rasterbands]
finally:
if in_memory:
dataset = None
return bands, hdr
0
Example 111
Project: SuPyLearner Source File: core.py
def cv_superlearner(sl, X, y, K=5):
"""
Cross validate the SuperLearner sl as well as all candidates in
sl.library and print results.
Parameters
----------
sl: An object of type SuperLearner
X : numpy array of shape [n_samples,n_features]
or other object acceptable to the fit() methods
of all candidates in the library
Training data
y : numpy array of shape [n_samples]
Target values
K : Number of folds for cross-validating sl and candidate estimators. More yeilds better result
because training sets are closer in size to the full data-set, but more takes longer.
Returns
-------
risks_cv: numpy array of shape [len(sl.library)]
"""
library = sl.library[:]
n=len(y)
folds=cv.KFold(n, K)
y_pred_cv = np.empty(shape=(n, len(library)+1))
for train_index, test_index in folds:
X_train, X_test=X[train_index], X[test_index]
y_train, y_test=y[train_index], y[test_index]
for aa in range(len(library)):
est=library[aa]
est.fit(X_train,y_train)
y_pred_cv[test_index, aa]=sl._get_pred(est, X_test)
sl.fit(X_train, y_train)
y_pred_cv[test_index, len(library)]=sl.predict(X_test)
risk_cv=np.empty(shape=(len(library)+1, 1))
for aa in range(len(library)+1):
#List for risk for each fold for estimator aa
risks=[]
for train_index, test_index in folds:
risks.append(sl._get_risk(y[test_index], y_pred_cv[test_index, aa]))
#Take mean across volds
risk_cv[aa]= np.mean(risks)
if sl.libnames is None:
libnames=[est.__class__.__name__ for est in sl.library]
else:
libnames=sl.libnames[:]
libnames.append("SuperLearner")
print "Cross-validated risk estimates for each estimator in the library and SuperLearner:"
print np.column_stack((libnames, risk_cv))
return risk_cv
0
Example 112
Project: edm2016 Source File: test_linear_operators.py
def test_rmatvec_nd(self):
""" Test that given an n x k linear operator and n x n matrix rmatvec_nd yields a k x k
matrix"""
def rev_index(index_map, x, output_dim):
intermediate = np.empty((output_dim, x.shape[1]))
final = np.empty((output_dim, output_dim))
for i in range(x.shape[1]):
intermediate[:, i] = np.bincount(index_map, weights=x[:, i], minlength=output_dim)
for i in range(output_dim):
final[i, :] = np.bincount(index_map, weights=intermediate[i, :],
minlength=output_dim)
return final
n = 10
x = np.random.randn(n, n)
k = np.random.randint(1, 5)
index_map = np.random.randint(k, size=n)
lin_op = undertest.IndexOperator(index_map=index_map, dim_x=k)
actual = undertest.rmatvec_nd(lin_op, x)
expected_backproj = rev_index(index_map, x, k)
np.testing.assert_array_equal(actual, expected_backproj)
# Sparse, non-diagonal
x_sp = sp.csr_matrix(x)
actual = undertest.rmatvec_nd(lin_op, x_sp)
np.testing.assert_array_equal(actual, expected_backproj)
# Sparse diagonal
x_sp_diag = sp.diags(np.diag(x), 0)
actual = undertest.rmatvec_nd(lin_op, x_sp_diag)
self.assertEqual(actual.shape, (k, k))
expected_backproj = np.diag(np.bincount(index_map, weights=np.diag(x), minlength=k))
np.testing.assert_array_equal(actual, expected_backproj)
# Non-sparse diagonal
x_diag = np.diag(np.random.randn(n))
actual = undertest.rmatvec_nd(lin_op, x_diag)
self.assertEqual(actual.shape, (k, k))
# The result should also be sparse and diagonal
expected_backproj = np.diag(np.bincount(index_map, weights=np.diag(x_diag), minlength=k))
np.testing.assert_array_equal(actual, expected_backproj)
# scalar
x = 1.3
k = 5
index_map = np.random.randint(k, size=1)
lin_op = undertest.IndexOperator(index_map=index_map, dim_x=k)
actual = undertest.rmatvec_nd(lin_op, x)
expected_backproj = np.zeros(k)
expected_backproj[index_map] = x
np.testing.assert_array_equal(actual, expected_backproj)
0
Example 113
Project: kabuki Source File: hierarchical.py
def sample_emcee(self, nwalkers=500, samples=10, dispersion=.1, burn=5, thin=1, stretch_width=2., anneal_stretch=True, pool=None):
import emcee
import pymc.progressbar as pbar
# This is the likelihood function for emcee
lnprob = LnProb(self)
# init
self.mcmc()
# get current values
stochs = self.get_stochastics()
start = [node_descr['node'].value for name, node_descr in stochs.iterrows()]
ndim = len(start)
def init_from_priors():
p0 = np.empty((nwalkers, ndim))
i = 0
while i != nwalkers:
self.mc.draw_from_prior()
try:
self.mc.logp
p0[i, :] = [node_descr['node'].value for name, node_descr in stochs.iterrows()]
i += 1
except pm.ZeroProbability:
continue
return p0
if hasattr(self, 'emcee_dispersions'):
scale = np.empty_like(start)
for i, (name, node_descr) in enumerate(stochs.iterrows()):
knode_name = node_descr['knode_name'].replace('_subj', '')
scale[i] = self.emcee_dispersions.get(knode_name, 0.1)
else:
scale = 0.1
p0 = np.random.randn(ndim * nwalkers).reshape((nwalkers, ndim)) * scale * dispersion + start
#p0 = init_from_priors()
# instantiate sampler passing in the pymc likelihood function
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, a=stretch_width, pool=pool)
bar = pbar.progress_bar(burn + samples)
i = 0
annealing = np.linspace(stretch_width, 2, burn)
sys.stdout.flush()
for pos, prob, state in sampler.sample(p0, iterations=burn):
if anneal_stretch:
sampler.a = annealing[i]
i += 1
bar.update(i)
#print("\nMean acceptance fraction during burn-in: {}".format(np.mean(sampler.acceptance_fraction)))
sampler.reset()
# sample
try:
for p, lnprob, lnlike in sampler.sample(pos,
iterations=samples,
thin=thin):
i += 1
bar.update(i)
except KeyboardInterrupt:
pass
finally:
print(("\nMean acceptance fraction during sampling: {}".format(np.mean(sampler.acceptance_fraction))))
# restore state
for val, (name, node_descr) in zip(start, stochs.iterrows()):
node_descr['node'].set_value(val)
# Save samples back to pymc model
self.mc.sample(1, progress_bar=False) # This call is to set up the chains
for pos, (name, node) in enumerate(stochs.iterrows()):
node['node'].trace._trace[0] = sampler.flatchain[:, pos]
return sampler
0
Example 114
Project: mcedit2 Source File: schematic.py
def __init__(self, shape=None, filename=None, blocktypes='Alpha', readonly=False, resume=False):
"""
Creates an object which stores a section of a Minecraft world as an
NBT structure. The order of the coordinates for the block arrays in
the file is y,z,x. This is the same order used in Minecraft 1.4's
chunk sections.
Parameters
----------
shape: tuple of int
The shape of the schematic as (x, y, z)
filename: basestring
Path to a file to load a saved schematic from.
blocktypes: basestring or BlockTypeSet
The name of a builtin blocktypes set (one of
"Classic", "Alpha", "Pocket") to indicate allowable blocks. The default
is Alpha. An instance of BlockTypeSet may be passed instead.
Returns
----------
SchematicFileAdapter
"""
self.EntityRef = PCEntityRef
self.TileEntityRef = PCTileEntityRef
if filename is None and shape is None:
raise ValueError("shape or filename required to create %s" % self.__class__.__name__)
if filename:
self.filename = filename
if os.path.exists(filename):
rootTag = nbt.load(filename)
else:
rootTag = None
else:
self.filename = None
rootTag = None
if blocktypes in blocktypeClassesByName:
self.blocktypes = blocktypeClassesByName[blocktypes]()
else:
if not isinstance(blocktypes, BlockTypeSet):
raise ValueError("%s is not a recognized BlockTypeSet", blocktypes)
self.blocktypes = blocktypes
if rootTag:
self.rootTag = rootTag
if "Materials" in rootTag:
self.blocktypes = blocktypeClassesByName[self.Materials]()
else:
rootTag["Materials"] = nbt.TAG_String(self.blocktypes.name)
w = self.rootTag["Width"].value
l = self.rootTag["Length"].value
h = self.rootTag["Height"].value
assert self.rootTag["Blocks"].value.size == w * l * h
self._Blocks = self.rootTag["Blocks"].value.astype('uint16').reshape(h, l, w) # _Blocks is y, z, x
del self.rootTag["Blocks"]
if "AddBlocks" in self.rootTag:
# Use WorldEdit's "AddBlocks" array to load and store the 4 high bits of a block ID.
# Unlike Minecraft's NibbleArrays, this array stores the first block's bits in the
# 4 high bits of the first byte.
size = (h * l * w)
# If odd, add one to the size to make sure the adjacent slices line up.
add = numpy.empty(size + (size & 1), 'uint16')
# Fill the even bytes with data
add[::2] = self.rootTag["AddBlocks"].value
# Copy the low 4 bits to the odd bytes
add[1::2] = add[::2] & 0xf
# Shift the even bytes down
add[::2] >>= 4
# Shift every byte up before merging it with Blocks
add <<= 8
self._Blocks |= add[:size].reshape(h, l, w)
del self.rootTag["AddBlocks"]
self.rootTag["Data"].value = self.rootTag["Data"].value.reshape(h, l, w)
if "Biomes" in self.rootTag:
self.rootTag["Biomes"].value.shape = (l, w)
# If BlockIDs is present, it contains an ID->internalName mapping
# from the source level's FML tag.
if "BlockIDs" in self.rootTag:
self.blocktypes.addBlockIDsFromSchematicTag(self.rootTag["BlockIDs"])
# If itemStackVersion is present, it was exported from MCEdit 2.0.
# Its value is either 17 or 18, the values of the version constants.
# ItemIDs will also be present.
# If itemStackVersion is not present, this schematic was exported from
# WorldEdit or MCEdit 1.0. The itemStackVersion cannot be determined
# without searching the entities for an itemStack and checking
# the type of its `id` tag. If no itemStacks are found, the
# version defaults to 1.8 which does not need an ItemIDs tag.
if "itemStackVersion" in self.rootTag:
itemStackVersion = self.rootTag["itemStackVersion"].value
if itemStackVersion not in (VERSION_1_7, VERSION_1_8):
raise LevelFormatError("Unknown item stack version %d" % itemStackVersion)
if itemStackVersion == VERSION_1_7:
itemIDs = self.rootTag.get("ItemIDs")
if itemIDs is not None:
self.blocktypes.addItemIDsFromSchematicTag(itemIDs)
self.blocktypes.itemStackVersion = itemStackVersion
else:
self.blocktypes.itemStackVersion = self.getItemStackVersionFromEntities()
else:
rootTag = nbt.TAG_Compound(name="Schematic")
rootTag["Height"] = nbt.TAG_Short(shape[1])
rootTag["Length"] = nbt.TAG_Short(shape[2])
rootTag["Width"] = nbt.TAG_Short(shape[0])
rootTag["Entities"] = nbt.TAG_List()
rootTag["TileEntities"] = nbt.TAG_List()
rootTag["Materials"] = nbt.TAG_String(self.blocktypes.name)
rootTag["itemStackVersion"] = nbt.TAG_Byte(self.blocktypes.itemStackVersion)
self._Blocks = zeros((shape[1], shape[2], shape[0]), 'uint16')
rootTag["Data"] = nbt.TAG_Byte_Array(zeros((shape[1], shape[2], shape[0]), uint8))
rootTag["Biomes"] = nbt.TAG_Byte_Array(zeros((shape[2], shape[0]), uint8))
self.rootTag = rootTag
self.rootTag["BlockIDs"] = blockIDMapping(self.blocktypes)
itemMapping = itemIDMapping(self.blocktypes)
if itemMapping is not None:
self.rootTag["ItemIDs"] = itemMapping # Only present for Forge 1.7
# Expand blocks and data to chunk edges
h16 = (self.Height + 15) & ~0xf
l16 = (self.Length + 15) & ~0xf
w16 = (self.Width + 15) & ~0xf
blocks = self._Blocks
self._Blocks = numpy.zeros((h16, l16, w16), blocks.dtype)
self._Blocks[:blocks.shape[0], :blocks.shape[1], :blocks.shape[2]] = blocks
data = self.rootTag["Data"].value
self.rootTag["Data"].value = numpy.zeros((h16, l16, w16), data.dtype)
self.rootTag["Data"].value[:data.shape[0], :data.shape[1], :data.shape[2]] = data
self.rootTag["Data"].value &= 0xF # discard high bits
self.entitiesByChunk = defaultdict(list)
for tag in self.rootTag["Entities"]:
ref = self.EntityRef(tag)
pos = ref.Position
cx, cy, cz = pos.chunkPos()
self.entitiesByChunk[cx, cz].append(tag)
self.tileEntitiesByChunk = defaultdict(list)
for tag in self.rootTag["TileEntities"]:
ref = self.TileEntityRef(tag)
pos = ref.Position
cx, cy, cz = pos.chunkPos()
self.tileEntitiesByChunk[cx, cz].append(tag)
0
Example 115
def train_step(self, batch, discriminator=None):
"""
Runs a training step using the global loss on each of the scale networks.
@param batch: An array of shape
[c.BATCH_SIZE x self.height x self.width x (3 * (c.HIST_LEN + 1))].
The input and output frames, concatenated along the channel axis (index 3).
@param discriminator: The discriminator model. Default = None, if not adversarial.
@return: The global step.
"""
##
# Split into inputs and outputs
##
input_frames = batch[:, :, :, :-3]
gt_frames = batch[:, :, :, -3:]
##
# Train
##
feed_dict = {self.input_frames_train: input_frames, self.gt_frames_train: gt_frames}
if c.ADVERSARIAL:
# Run the generator first to get generated frames
scale_preds = self.sess.run(self.scale_preds_train, feed_dict=feed_dict)
# Run the discriminator nets on those frames to get predictions
d_feed_dict = {}
for scale_num, gen_frames in enumerate(scale_preds):
d_feed_dict[discriminator.scale_nets[scale_num].input_frames] = gen_frames
d_scale_preds = self.sess.run(discriminator.scale_preds, feed_dict=d_feed_dict)
# Add discriminator predictions to the
for i, preds in enumerate(d_scale_preds):
feed_dict[self.d_scale_preds[i]] = preds
_, global_loss, global_psnr_error, global_sharpdiff_error, global_step, summaries = \
self.sess.run([self.train_op,
self.global_loss,
self.psnr_error_train,
self.sharpdiff_error_train,
self.global_step,
self.summaries_train],
feed_dict=feed_dict)
##
# User output
##
if global_step % c.STATS_FREQ == 0:
print 'GeneratorModel : Step ', global_step
print ' Global Loss : ', global_loss
print ' PSNR Error : ', global_psnr_error
print ' Sharpdiff Error: ', global_sharpdiff_error
if global_step % c.SUMMARY_FREQ == 0:
self.summary_writer.add_summary(summaries, global_step)
print 'GeneratorModel: saved summaries'
if global_step % c.IMG_SAVE_FREQ == 0:
print '-' * 30
print 'Saving images...'
# if not adversarial, we didn't get the preds for each scale net before for the
# discriminator prediction, so do it now
if not c.ADVERSARIAL:
scale_preds = self.sess.run(self.scale_preds_train, feed_dict=feed_dict)
# re-generate scale gt_frames to avoid having to run through TensorFlow.
scale_gts = []
for scale_num in xrange(self.num_scale_nets):
scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)
scale_height = int(self.height_train * scale_factor)
scale_width = int(self.width_train * scale_factor)
# resize gt_output_frames for scale and append to scale_gts_train
scaled_gt_frames = np.empty([c.BATCH_SIZE, scale_height, scale_width, 3])
for i, img in enumerate(gt_frames):
# for skimage.transform.resize, images need to be in range [0, 1], so normalize
# to [0, 1] before resize and back to [-1, 1] after
sknorm_img = (img / 2) + 0.5
resized_frame = resize(sknorm_img, [scale_height, scale_width, 3])
scaled_gt_frames[i] = (resized_frame - 0.5) * 2
scale_gts.append(scaled_gt_frames)
# for every clip in the batch, save the inputs, scale preds and scale gts
for pred_num in xrange(len(input_frames)):
pred_dir = c.get_dir(os.path.join(c.IMG_SAVE_DIR, 'Step_' + str(global_step),
str(pred_num)))
# save input images
for frame_num in xrange(c.HIST_LEN):
img = input_frames[pred_num, :, :, (frame_num * 3):((frame_num + 1) * 3)]
imsave(os.path.join(pred_dir, 'input_' + str(frame_num) + '.png'), img)
# save preds and gts at each scale
# noinspection PyUnboundLocalVariable
for scale_num, scale_pred in enumerate(scale_preds):
gen_img = scale_pred[pred_num]
path = os.path.join(pred_dir, 'scale' + str(scale_num))
gt_img = scale_gts[scale_num][pred_num]
imsave(path + '_gen.png', gen_img)
imsave(path + '_gt.png', gt_img)
print 'Saved images!'
print '-' * 30
return global_step
0
Example 116
Project: PySnpTools Source File: test.py
def test_writes(self):
#===================================
# Defining sub functions
#===================================
def _oned_int(c):
return range(c)
def _oned_str(c):
return [str(i) for i in xrange(c)]
def _twooned_int(c):
return [[i] for i in xrange(c)]
def _twooned_str(c):
return [[str(i)] for i in xrange(c)]
def _twotwod_int(c):
return [[i,i] for i in xrange(c)]
def _twotwod_str(c):
return [[str(i),"hello"] for i in xrange(c)]
def _none(c):
return None
def _zero(c):
return np.empty([c,0])
#===================================
# Staring main function
#===================================
logging.info("starting 'test_writes'")
np.random.seed(0)
output_template = "tempdir/pstreader/writes.{0}.{1}"
create_directory_if_necessary(output_template.format(0,"npz"))
i = 0
for row_count in [5,2,1,0]:
for col_count in [4,2,1,0]:
val = np.random.normal(.5,2,size=(row_count,col_count))
for row_or_col_gen in [_oned_int,_oned_str,_twooned_int,_twooned_str,_twotwod_int,_twotwod_str]:
row = row_or_col_gen(row_count)
col = row_or_col_gen(col_count)
for prop_gen in [_oned_int,_oned_str,_twooned_int,_twooned_str,_twotwod_int,_twotwod_str,_none,_zero]:
row_prop = prop_gen(row_count)
col_prop = prop_gen(col_count)
pstdata = PstData(row,col,val,row_prop,col_prop,str(i))
for the_class,suffix in [(PstNpz,"npz"),(PstHdf5,"hdf5")]:
filename = output_template.format(i,suffix)
logging.info(filename)
i += 1
the_class.write(filename,pstdata)
for subsetter in [None, sp.s_[::2,::3]]:
reader = the_class(filename)
_fortesting_JustCheckExists().input(reader)
subreader = reader if subsetter is None else reader[subsetter[0],subsetter[1]]
readdata = subreader.read(order='C')
expected = pstdata if subsetter is None else pstdata[subsetter[0],subsetter[1]].read()
assert np.array_equal(readdata.val,expected.val)
assert np.array_equal(readdata.row,expected.row)
assert np.array_equal(readdata.col,expected.col)
assert np.array_equal(readdata.row_property,expected.row_property)
assert np.array_equal(readdata.col_property,expected.col_property)
try:
os.remove(filename)
except:
pass
logging.info("done with 'test_writes'")
0
Example 117
Project: ignition Source File: rp_shallow.py
def rp_shallow_hll_1d(q_l,q_r,aux_l,aux_r,aux_global):
r"""
HLL shallow water solver ::
W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2)
W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2)
Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2)
*aux_global* should contain:
- *g* - (float) Gravitational constant
:Version: 1.0 (2009-02-05)
"""
# Array shapes
nrp = q_l.shape[0]
meqn = 2
mwaves = 2
# Output arrays
wave = np.empty( (nrp, meqn, mwaves) )
s = np.empty( (nrp, mwaves) )
amdq = np.zeros( (nrp, meqn) )
apdq = np.zeros( (nrp, meqn) )
# Compute Roe and right and left speeds
ubar = ( (q_l[:,1]/np.sqrt(q_l[:,0]) + q_r[:,1]/np.sqrt(q_r[:,0])) /
(np.sqrt(q_l[:,0]) + np.sqrt(q_r[:,0])) )
cbar = np.sqrt(0.5 * aux_global['g'] * (q_l[:,0] + q_r[:,0]))
u_r = q_r[:,1] / q_r[:,0]
c_r = np.sqrt(aux_global['g'] * q_r[:,0])
u_l = q_l[:,1] / q_l[:,0]
c_l = np.sqrt(aux_global['g'] * q_l[:,0])
# Compute Einfeldt speeds
s_index = np.empty((nrp,4))
s_index[:,0] = ubar+cbar
s_index[:,1] = ubar-cbar
s_index[:,2] = u_l + c_l
s_index[:,3] = u_l - c_l
s[:,0] = np.min(s_index,axis=1)
s_index[:,2] = u_r + c_r
s_index[:,3] = u_r - c_r
s[:,1] = np.max(s_index,axis=1)
# Compute middle state
q_hat = np.empty((nrp,2))
q_hat[:,0] = ((q_r[:,1] - q_l[:,1] - s[:,1] * q_r[:,0]
+ s[:,0] * q_l[:,0]) / (s[:,0] - s[:,1]))
q_hat[:,1] = ((q_r[:,1]**2/q_r[:,0] + 0.5 * aux_global['g'] * q_r[:,0]**2
- (q_l[:,1]**2/q_l[:,0] + 0.5 * aux_global['g'] * q_l[:,0]**2)
- s[:,1] * q_r[:,1] + s[:,0] * q_l[:,1]) / (s[:,0] - s[:,1]))
# Compute each family of waves
wave[:,:,0] = q_hat - q_l
wave[:,:,1] = q_r - q_hat
# Compute variations
s_index = np.zeros((nrp,2))
for m in xrange(meqn):
for mw in xrange(mwaves):
s_index[:,0] = s[:,mw]
amdq[:,m] += np.min(s_index,axis=1) * wave[:,m,mw]
apdq[:,m] += np.max(s_index,axis=1) * wave[:,m,mw]
return wave, s, amdq, apdq
0
Example 118
Project: Arelle Source File: TkTableWrapper.py
def sample_test():
from tkinter import Tk, Scrollbar, N, S, W, E, ttk
import numpy
def test_cmd(event):
if event.i == 0:
return arr[event.r, event.c]
else:
arr[event.r, event.c] = event.S
return 'set'
def browsecmd(event):
print("event:", event.__dict__)
print("curselection:", test.curselection())
print("active cell index:", test.index('active'))
activeRow = int(test.index('active', 'row'))
activeCol = int(test.index('active', 'col'))
print("active:", activeRow)
print("anchor:", test.index('anchor', 'row'))
# the following line is operational, it shows that it is possible
# to modiy programmatically the content of a cell.
# var[test.index('active')] = 'toto'
def comboValueChanged(event):
combobox = event.widget
print('Selected value in combobox: '+combobox.get())
root = Tk()
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
numrows, numcols = 6250,40
#Using ArrayVar consumes double as much memory as NumPy+command
#var = ArrayVar(root)
#for y in range(0, numrows):
# for x in range(0, numcols):
# index = "%i,%i" % (y, x)
# var[index] = index
arr = numpy.empty((numrows+2, numcols), dtype=object)
for y in range(numrows):
for x in range(numcols):
arr[y, x] = "%i,%i" % (y, x)
test = Table(root,
rows=numrows,
cols=numcols,
state='normal',
width=6,
height=6,
titlerows=2,
titlecols=2,
roworigin=0,
colorigin=0,
selectmode='extended',
selecttype='cell',
rowstretch='last',
colstretch='last',
rowheight=-26,
colwidth=15,
browsecmd=browsecmd,
flashmode='off',
anchor='e',
#variable=var,
usecommand=1,
background='#fffffffff',
relief='sunken',
command=test_cmd,
takefocus=False,
rowseparator='\n'
# drawmode='slow'
)
# http://effbot.org/zone/tkinter-scrollbar-patterns.htm
verticalScrollbar = Scrollbar(root, orient='vertical', command=test.yview_scroll)
horizontalScrollbar = Scrollbar(root, orient='horizontal', command=test.xview_scroll)
test.config(xscrollcommand=horizontalScrollbar.set, yscrollcommand=verticalScrollbar.set)
verticalScrollbar.grid(column="1", row='0', sticky=(N, S))
horizontalScrollbar.grid(column="0", row='1', sticky=(W, E))
kwargs = {'4,5': '5,3', '0,0': '0,1'}
test.spans(index=None, **kwargs)
for y in range(2, numrows):
for x in range(2, numcols):
if (x%2==0 and y%2==1) or (x%2==1 and y%2==0):
index = "%i,%i" % (y, x)
test.tag_cell('disabled', index)
for x in range(2, numcols):
index = "%i,%i" % (0, x)
test.tag_cell('border', index)
index = "%i,%i" % (1, x)
test.tag_cell('border', index)
for y in range(1, numrows):
index = "%i,%i" % (y, 0)
test.tag_cell('border-left-right', index)
index = "%i,%i" % (y, 1)
test.tag_cell('border', index)
for y in range(2, numrows):
cities = ('Brussels', 'Luxembourg', 'Strasbourg', 'Trier', 'Rome')
combobox = ttk.Combobox(test, values=cities, state='readonly')
test.window_configure('%i,9'%y, window=combobox, sticky=(N, E, S, W))
combobox.bind(sequence='<<ComboboxSelected>>',
func=comboValueChanged,
add='+')
def cellRight(event, *args):
widget = event.widget
titleRows = int(widget.cget('titlerows'))
titleCols = int(widget.cget('titlecols'))
top = int(widget.cget('roworigin'))+titleRows
left = int(widget.cget('colorigin'))+titleCols
try:
col = int(widget.index('active', 'col'))
row = int(widget.index('active', 'row'))
except (tkinter.TclError):
row, col = top-1, left
maxRows = int(widget.cget('rows'))
maxCols = int(widget.cget('cols'))
widget.selection_clear('all')
if row<top:
if (col<left):
index = '%i,%i'% (top, left)
else:
index = '%i,%i'% (top, col)
widget.activate(index)
widget.see(index)
widget.selection_set(index)
elif col<left:
index = '%i,%i'% (row, left)
widget.activate(index)
widget.see(index)
widget.selection_set(index)
elif col<maxCols-titleCols-1:
widget.moveCell(0, 1)
elif row<maxRows-titleCols-1:
widget.moveCell(1, left-col)
else:
widget.moveCell(top-row, left-col)
def cellDown(event, *args):
widget = event.widget
titleRows = int(widget.cget('titlerows'))
titleCols = int(widget.cget('titlecols'))
top = int(widget.cget('roworigin'))+titleRows
left = int(widget.cget('colorigin'))+titleCols
try:
col = int(widget.index('active', 'col'))
row = int(widget.index('active', 'row'))
except (tkinter.TclError):
row, col = top-1, left
maxRows = int(widget.cget('rows'))
maxCols = int(widget.cget('cols'))
widget.selection_clear('all')
if row<top:
if (col<left):
index = '%i,%i'% (top, left)
else:
index = '%i,%i'% (top, col)
widget.activate(index)
widget.see(index)
widget.selection_set(index)
elif col<left:
index = '%i,%i'% (row, left)
widget.activate(index)
widget.see(index)
widget.selection_set(index)
elif row<maxRows-titleRows-1:
widget.moveCell(1, 0)
elif col<maxCols-titleCols-1:
widget.moveCell(top-row, 1)
else:
widget.moveCell(top-row, left-col)
return 'break' # do not insert return in cell content
menu = test.contextMenu()
menu.add_command(label="Quit", underline=0, command=root.destroy)
test.bind("<Tab>", func=cellRight, add="+")
test.bind("<Return>", func=cellDown)
test.tag_cell('border-left-top', "-2,-2")
test.tag_raise('border-left-top', abovethis='title')
test.tag_raise('border-left-right', abovethis='title')
test.tag_raise('border', abovethis='title')
test.grid(column="0", row='0', sticky=(N, W, S, E))
test.tag_configure('sel', background = '#000400400')
test.tag_configure('active', background = '#000a00a00')
test.tag_configure('title', anchor='w', bg='#d00d00d00', fg='#000000000', relief='flat')
test.tag_configure('disabled', bg='#d00d00d00', fg='#000000000', relief='flat', state='disabled')
test.tag_configure('border-left-top', relief='ridge', borderwidth=(1,0,1,0))
test.tag_configure('border-left-right', relief='ridge', borderwidth=(1,1,0,0))
test.tag_configure('border', relief='ridge', borderwidth=(1,1,1,1))
data = ('py','t','h','o','n','','+','','Tk','')
def add_new_data(*args):
#test.config(state='normal')
test.insert_rows('end', 1)
r = test.index('end').split(',')[0] #get row number <str>
args = (r,) + args
idx = r + ',1'
test.set('row', idx, *args)
test.see(idx)
#test.config(state='disabled')
root.after(3000, add_new_data, *data)
root.after(4000, add_new_data, *data)
root.mainloop()
0
Example 119
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
# im: input image
# minsize: minimum of faces' size
# pnet, rnet, onet: caffemodel
# threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold
# fastresize: resize img from last scale (using in high-resolution images) if fastresize==true
factor_count=0
total_boxes=np.empty((0,9))
points=[]
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# creat scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for j in range(len(scales)):
scale=scales[j]
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
0
Example 120
Project: flopy Source File: utils.py
def util3d_helper(f, u3d, **kwargs):
""" export helper for Transient2d instances
Parameters
-----------
f : string (filename) or existing export instance type (NetCdf only for now)
u3d : Util3d instance
min_valid : minimum valid value
max_valid : maximum valid value
"""
assert isinstance(u3d, Util3d), "util3d_helper only helps Util3d instances"
assert len(u3d.shape) == 3, "util3d_helper only supports 3D arrays"
min_valid = kwargs.get("min_valid", -1.0e+9)
max_valid = kwargs.get("max_valid", 1.0e+9)
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, u3d.model)
if isinstance(f, str) and f.lower().endswith(".shp"):
array_dict = {}
for ilay in range(u3d.model.nlay):
u2d = u3d[ilay]
name = '{}_{:03d}'.format(shapefile_utils.shape_attr_name(u2d.name), ilay + 1)
array_dict[name] = u2d.array
shapefile_utils.write_grid_shapefile(f, u3d.model.sr,
array_dict)
elif isinstance(f, NetCdf) or isinstance(f,dict):
var_name = u3d.name[0].replace(' ', '_').lower()
#f.log("getting 3D array for {0}".format(var_name))
array = u3d.array
# this is for the crappy vcont in bcf6
# if isinstance(f,NetCdf) and array.shape != f.shape:
# f.log("broadcasting 3D array for {0}".format(var_name))
# full_array = np.empty(f.shape)
# full_array[:] = np.NaN
# full_array[:array.shape[0]] = array
# array = full_array
# f.log("broadcasting 3D array for {0}".format(var_name))
#f.log("getting 3D array for {0}".format(var_name))
#
mask = None
if u3d.model.bas6 is not None and "ibound" not in var_name:
mask = u3d.model.bas6.ibound.array == 0
elif u3d.model.btn is not None and 'icbund' not in var_name:
mask = u3d.model.btn.icbund.array == 0
if mask is not None and array.shape != mask.shape:
#f.log("broadcasting 3D array for {0}".format(var_name))
full_array = np.empty(mask.shape)
full_array[:] = np.NaN
full_array[:array.shape[0]] = array
array = full_array
#f.log("broadcasting 3D array for {0}".format(var_name))
# runtime warning issued in some cases - need to track down cause
# happens when NaN is already in array
with np.errstate(invalid="ignore"):
if array.dtype not in [int,np.int,np.int32,np.int64]:
#if u3d.model.bas6 is not None and "ibound" not in var_name:
# array[u3d.model.bas6.ibound.array == 0] = np.NaN
#elif u3d.model.btn is not None and 'icbund' not in var_name:
# array[u3d.model.btn.icbund.array == 0] = np.NaN
if mask is not None:
array[mask] = np.NaN
array[array <= min_valid] = np.NaN
array[array >= max_valid] = np.NaN
mx, mn = np.nanmax(array), np.nanmin(array)
else:
mx, mn = np.nanmax(array), np.nanmin(array)
if mask is not None:
array[mask] = netcdf.FILLVALUE
array[array <= min_valid] = netcdf.FILLVALUE
array[array >= max_valid] = netcdf.FILLVALUE
if u3d.model.bas6 is not None and "ibound" not in var_name:
array[u3d.model.bas6.ibound.array == 0] = netcdf.FILLVALUE
elif u3d.model.btn is not None and 'icbund' not in var_name:
array[u3d.model.btn.icbund.array == 0] = netcdf.FILLVALUE
if isinstance(f,dict):
f[var_name] = array
return f
array[np.isnan(array)] = f.fillvalue
units = "unitless"
if var_name in NC_UNITS_FORMAT:
units = NC_UNITS_FORMAT[var_name].format(f.grid_units, f.time_units)
precision_str = NC_PRECISION_TYPE[u3d.dtype]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
else:
attribs = {"long_name": var_name}
attribs["coordinates"] = "layer latitude longitude"
attribs["units"] = units
attribs["min"] = mn
attribs["max"] = mx
try:
var = f.create_variable(var_name, attribs, precision_str=precision_str,
dimensions=("layer", "y", "x"))
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
try:
var[:] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
return f
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
0
Example 121
Project: veusz Source File: expression.py
def evalDataset(self):
"""Return the evaluated dataset."""
# FIXME: handle irregular grids
# return cached data if docuement unchanged
if self.docuement.changeset == self.lastchangeset:
return self.cacheddata
self.lastchangeset = self.docuement.changeset
self.cacheddata = None
evaluated = {}
environment = self.docuement.evaulate.context.copy()
environment['_DS_'] = self.evaluateDataset
# evaluate the x, y and z expressions
for name in ('exprx', 'expry', 'exprz'):
origexpr = getattr(self, name)
expr = substituteDatasets(self.docuement.data, origexpr, 'data')[0]
comp = self.docuement.evaluate.compileCheckedExpression(
expr, origexpr=origexpr)
if comp is None:
return None
try:
evaluated[name] = eval(comp, environment)
except Exception as e:
self.docuement.log(_("Error evaluating expression: %s\n"
"Error: %s") % (expr, cstr(e)) )
return None
minx, maxx, stepx, stepsx = getSpacing(evaluated['exprx'])
miny, maxy, stepy, stepsy = getSpacing(evaluated['expry'])
# update cached x and y ranges
self._xrange = (minx-stepx*0.5, maxx+stepx*0.5)
self._yrange = (miny-stepy*0.5, maxy+stepy*0.5)
self.cacheddata = N.empty( (stepsy, stepsx) )
self.cacheddata[:,:] = N.nan
xpts = ((1./stepx)*(evaluated['exprx']-minx)).astype('int32')
ypts = ((1./stepy)*(evaluated['expry']-miny)).astype('int32')
# this is ugly - is this really the way to do it?
try:
self.cacheddata.flat [ xpts + ypts*stepsx ] = evaluated['exprz']
except Exception as e:
self.docuement.log(_("Shape mismatch when constructing dataset\n"
"Error: %s") % cstr(e) )
return None
return self.cacheddata
0
Example 122
Project: pyParticleEst Source File: smoother.py
def perform_mhbp(self, pt, M, R, reduced=False):
"""
Create smoothed trajectories using Metropolis-Hastings Backward Propeser
Args:
- pt (ParticleTrajectory): forward trajetories
- M (int): number of trajectories to createa
- R (int): Number of proposal for each time step
"""
T = len(pt)
ut = self.u
yt = self.y
tt = self.t
straj = numpy.empty((T,), dtype=object)
# Initialise from end time estimates
tmp = numpy.copy(pt[-1].pa.w)
tmp -= numpy.max(tmp)
tmp = numpy.exp(tmp)
tmp = tmp / numpy.sum(tmp)
cind = pf.sample(tmp, M)
find = numpy.arange(M, dtype=int)
# anc = pt[-1].ancestors[cind]
# last_part = self.model.sample_smooth(part=pt[-1].pa.part[cind],
# ptraj=pt[:-1],
# anc=anc,
# future_trajs=None,
# find=find,
# ut=ut, yt=yt, tt=tt,
# cur_ind=T - 1)
for t in reversed(xrange(T)):
# Initialise from filtered estimate
if (t < T - 1):
ft = straj[(t + 1):]
else:
ft = None
# Initialize with filterted estimates
pnew = pt[t].pa.part[cind]
if (t > 0):
anc = pt[t].ancestors[cind]
tmp = numpy.copy(pt[t - 1].pa.w)
tmp -= numpy.max(tmp)
tmp = numpy.exp(tmp)
tmp = tmp / numpy.sum(tmp)
ptraj = pt[:t]
else:
ptraj = None
for _ in xrange(R):
if (t > 0):
# Propose new ancestors
panc = pf.sample(tmp, M)
(pnew, acc) = mc_step(model=self.model,
part=pnew,
ptraj=ptraj,
pind_prop=panc,
pind_curr=anc,
future_trajs=ft,
find=find,
ut=ut,
yt=yt,
tt=tt,
cur_ind=t,
reduced=reduced)
anc[acc] = panc[acc]
fpart = self.model.sample_smooth(part=pnew,
ptraj=ptraj,
anc=anc,
future_trajs=ft,
find=find,
ut=ut, yt=yt, tt=tt,
cur_ind=t)
straj[t] = TrajectoryStep(ParticleApproximation(fpart))
cind = anc
self.traj = straj
if hasattr(self.model, 'post_smoothing'):
# Do e.g. constrained smoothing for RBPS models
self.traj = self.model.post_smoothing(self)
0
Example 123
Project: tract_querier Source File: tensor_operations.py
def compute_all_measures(tractography, desired_keys_list, scalars=None, resolution=None):
unordered_results = dict()
if ('number of tracts' in desired_keys_list):
unordered_results['number of tracts'] = tract_operations.tract_count(
tractography.tracts())
if ('length mean (mm)' in desired_keys_list) or ('length std (mm^2)' in desired_keys_list):
lengths = numpy.empty(len(tractography.tracts()))
for i, one_tract in enumerate(tractography.tracts()):
lengths[i] = tract_operations.tract_length(one_tract)
unordered_results['length mean (mm)'] = lengths.mean()
unordered_results['length std (mm^2)'] = lengths.std()
if ('tract volume' in desired_keys_list) and (resolution is not None):
resolution = float(resolution)
voxels = tract_operations.voxelized_tract(tractography, resolution)
neighbors = numpy.array([
[0, 1, 0],
[0, -1, 0],
[1, 0, 0],
[-1, 0, 0],
[0, 0, 1],
[0, 0, -1]
])
dilated_voxels = set()
dilated_voxels.update(voxels)
eroded_voxels = set()
for voxel in voxels:
neighbors_list = zip(*(neighbors + voxel).T)
dilated_voxels.update(neighbors_list)
if len(voxels.intersection(neighbors_list)) == len(neighbors):
eroded_voxels.add(voxel)
# print len(dilated_voxels), len(voxels), len(eroded_voxels)
approx_voxels = (len(dilated_voxels) - len(eroded_voxels)) / 2.
approx_volume = approx_voxels * (resolution ** 3)
unordered_results['tract volume'] = approx_volume
if ('per tract distance weighted mean %s' in desired_keys_list ) or \
('per tract distance weighted std %s' in desired_keys_list):
mean_keys_list = list()
std_keys_list = list()
for scalar in scalars:
mean_key = 'per tract distance weighted mean %s' % scalar
std_key = 'per tract distance weighted std %s' % scalar
mean_keys_list.append(mean_key)
std_keys_list.append(std_key)
scalars = tractography.tracts_data()[scalar]
weighted_scalars = numpy.empty((len(tractography.tracts()), 2))
for line_index, t_data in enumerate(tractography.tracts()):
tdiff = numpy.sqrt((numpy.diff(t_data, axis=0) ** 2).sum(-1))
length = tdiff.sum()
values = scalars[line_index][1:].squeeze()
average = numpy.average(values, weights=tdiff)
weighted_scalars[line_index, 0] = average
weighted_scalars[line_index, 1] = length
mean = numpy.average(
weighted_scalars[:, 0], weights=weighted_scalars[:, 1])
std = numpy.average(
(weighted_scalars[:, 0] - mean) ** 2, weights=weighted_scalars[:, 1])
unordered_results[mean_key] = mean
unordered_results[std_key] = std
mii = desired_keys_list.index('per tract distance weighted mean %s')
desired_keys_list[mii:mii + 1] = mean_keys_list
sii = desired_keys_list.index('per tract distance weighted std %s')
desired_keys_list[sii:sii + 1] = std_keys_list
# Make Ordered Dictionary
ordered_dict = OrderedDict()
for key in desired_keys_list:
ordered_dict[key] = unordered_results[key]
return ordered_dict
0
Example 124
Project: sphere Source File: visualize-rs0.py
def rateStatePlot(sid):
matplotlib.rcParams.update({'font.size': 7, 'font.family': 'sans-serif'})
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
rasterized = True # rasterize colored areas (pcolormesh and colorbar)
# izfactor = 4 # factor for vertical discretization in xvel
izfactor = 1 # factor for vertical discretization in poros
#############
# DATA READ #
#############
sim = sphere.sim(sid, fluid=False)
sim.readfirst()
# nsteps = 2
# nsteps = 10
# nsteps = 400
nsteps = sim.status()
t = numpy.empty(nsteps)
# Stress, pressure and friction
sigma_def = numpy.empty_like(t)
sigma_eff = numpy.empty_like(t)
tau_def = numpy.empty_like(t)
tau_eff = numpy.empty_like(t)
p_f_bar = numpy.empty_like(t)
p_f_top = numpy.empty_like(t)
mu = numpy.empty_like(t) # shear friction
# shear velocity plot
v = numpy.empty_like(t) # velocity
I = numpy.empty_like(t) # inertia number
# displacement and mean porosity plot
xdisp = numpy.empty_like(t)
phi_bar = numpy.empty_like(t)
# mean horizontal porosity plot
poros = numpy.empty((sim.num[2], nsteps))
xvel = numpy.zeros((sim.num[2]*izfactor, nsteps))
zpos_c = numpy.empty(sim.num[2]*izfactor)
dz = sim.L[2]/(sim.num[2]*izfactor)
for i in numpy.arange(sim.num[2]*izfactor):
zpos_c[i] = i*dz + 0.5*dz
# Contact statistics plot
n = numpy.empty(nsteps)
coordinationnumber = numpy.empty(nsteps)
nkept = numpy.empty(nsteps)
for i in numpy.arange(nsteps):
sim.readstep(i+1, verbose=verbose) # use step 1 to n
t[i] = sim.currentTime()
sigma_def[i] = sim.currentNormalStress('defined')
sigma_eff[i] = sim.currentNormalStress('effective')
tau_def[i] = sim.shearStress('defined')
tau_eff[i] = sim.shearStress('effective')
mu[i] = tau_eff[i]/sigma_eff[i]
#mu[i] = tau_eff[i]/sigma_def[i]
I[i] = sim.inertiaParameterPlanarShear()
v[i] = sim.shearVelocity()
xdisp[i] = sim.shearDisplacement()
#poros[:, i] = numpy.average(numpy.average(sim.phi, axis=0), axis=0)
# calculate mean values of xvel
dz = sim.L[2]/(sim.num[2]*izfactor)
for iz in numpy.arange(sim.num[2]*izfactor):
z_bot = iz*dz
z_top = (iz+1)*dz
idx = numpy.nonzero((sim.x[:, 2] >= z_bot) & (sim.x[:, 2] < z_top))
#import ipdb; ipdb.set_trace()
if idx[0].size > 0:
# xvel[iz,i] = numpy.mean(numpy.abs(sim.vel[I,0]))
# xvel[iz, i] = numpy.mean(sim.vel[idx, 0])
xvel[iz, i] = numpy.mean(sim.vel[idx, 0])/sim.shearVelocity()
loaded_contacts = 0
if calculateforcechains:
if i > 0 and calculateforcechainhistory:
loaded_contacts_prev = numpy.copy(loaded_contacts)
pairs_prev = numpy.copy(sim.pairs)
loaded_contacts = sim.findLoadedContacts(
threshold=sim.currentNormalStress()*2.)
# sim.currentNormalStress()/1000.)
n[i] = numpy.size(loaded_contacts)
sim.findCoordinationNumber()
coordinationnumber[i] = sim.findMeanCoordinationNumber()
if calculateforcechainhistory:
nfound = 0
if i > 0:
for a in loaded_contacts[0]:
for b in loaded_contacts_prev[0]:
if (sim.pairs[:, a] == pairs_prev[:, b]).all():
nfound += 1
nkept[i] = nfound
print nfound
print coordinationnumber[i]
if calculateforcechains:
numpy.savetxt(sid + '-fc.txt', (n, nkept, coordinationnumber))
else:
if plotforcechains:
n, nkept, coordinationnumber = numpy.loadtxt(sid + '-fc.txt')
# Transform time from model time to real time [s]
#t = t/t_DEM_to_t_real
# integrate velocities to displacement along x (xdispint)
# Taylor two term expansion
xdispint = numpy.zeros_like(t)
v_limit = 2.78e-3 # 1 m/hour (WIP)
dt = (t[1] - t[0])
dt2 = dt*2.
for i in numpy.arange(t.size):
if i > 0 and i < t.size-1:
acc = (numpy.min([v[i+1], v_limit]) - numpy.min([v[i-1], v_limit]))/dt2
xdispint[i] = xdispint[i-1] +\
numpy.min([v[i], v_limit])*dt + 0.5*acc*dt**2
elif i == t.size-1:
xdispint[i] = xdispint[i-1] + numpy.min([v[i], v_limit])*dt
############
# PLOTTING #
############
bbox_x = 0.03
bbox_y = 0.96
verticalalignment = 'top'
horizontalalignment = 'left'
fontweight = 'bold'
bbox = {'facecolor': 'white', 'alpha': 1.0, 'pad': 3}
# Time in days
#t = t/(60.*60.*24.)
nplots = 4
fig = plt.figure(figsize=[3.5, 8.0/4.0*nplots])
# ax1: v, ax2: I
ax1 = plt.subplot(nplots, 1, 1)
lns0 = ax1.plot(t, v, '-k', label="$v$",
linewidth=linewidth)
# lns1 = ax1.plot(t, sigma_eff/1000., '-k', label="$\\sigma'$")
# lns2 = ax1.plot(t, tau_def/1000., '-r', label="$\\tau$")
# ns2 = ax1.plot(t, tau_def/1000., '-r')
#lns3 = ax1.plot(t, tau_eff/1000., '-r', label="$\\tau'$", linewidth=linewidth)
ax1.set_ylabel('Shear velocity $v$ [ms$^{-1}$]')
ax2 = ax1.twinx()
ax2color = 'blue'
# lns4 = ax2.plot(t, p_f_top/1000.0 + 80.0, '-',
# color=ax2color,
# label='$p_\\text{f}^\\text{forcing}$')
# lns5 = ax2.semilogy(t, I, ':',
lns5 = ax2.semilogy(t, I, '--',
color=ax2color,
label='$I$', linewidth=linewidth)
ax2.set_ylabel('Inertia number $I$ [-]')
ax2.yaxis.label.set_color(ax2color)
for tl in ax2.get_yticklabels():
tl.set_color(ax2color)
#ax2.legend(loc='upper right')
#lns = lns0+lns1+lns2+lns3+lns4+lns5
#lns = lns0+lns1+lns2+lns3+lns5
#lns = lns1+lns3+lns5
#lns = lns0+lns3+lns5
lns = lns0+lns5
labs = [l.get_label() for l in lns]
# ax2.legend(lns, labs, loc='upper right', ncol=3,
# fancybox=True, framealpha=legend_alpha)
#ax1.set_ylim([-30, 200])
#ax2.set_ylim([-115, 125])
# ax1.text(bbox_x, bbox_y, 'A',
ax1.text(bbox_x, bbox_y, 'a',
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
fontweight=fontweight, bbox=bbox,
transform=ax1.transAxes)
# ax3: mu, ax4: unused
ax3 = plt.subplot(nplots, 1, 2, sharex=ax1)
#ax3.semilogy(t, mu, 'k', linewidth=linewidth)
alpha=1.0
if smooth_friction:
alpha=0.5
ax3.plot(t, mu, 'k', alpha=alpha, linewidth=linewidth)
if smooth_friction:
# smoothed
ax3.plot(t, smooth(mu, smooth_window), linewidth=2)
# label='', linewidth=1,
# alpha=alpha, color=color[c])
ax3.set_ylabel('Bulk friction $\\mu = \\tau\'/N\'$ [-]')
#ax3.set_ylabel('Bulk friction $\\mu = \\tau\'/N$ [-]')
# ax3.text(bbox_x, bbox_y, 'B',
ax3.text(bbox_x, bbox_y, 'b',
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
fontweight=fontweight, bbox=bbox,
transform=ax3.transAxes)
# ax7: n, ax8: unused
ax7 = plt.subplot(nplots, 1, 3, sharex=ax1)
if plotforcechains:
ax7.plot(t[:n.size], coordinationnumber, 'k', linewidth=linewidth)
ax7.set_ylabel('Coordination number $\\bar{n}$ [-]')
#ax7.semilogy(t, n - nkept, 'b', label='$\Delta n_\\text{heavy}$')
#ax7.set_ylim([1.0e1, 2.0e4])
#ax7.set_ylim([-0.2, 9.8])
ax7.set_ylim([-0.2, 5.2])
# ax7.text(bbox_x, bbox_y, 'D',
ax7.text(bbox_x, bbox_y, 'c',
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
fontweight=fontweight, bbox=bbox,
transform=ax7.transAxes)
# ax9: porosity or xvel, ax10: unused
#ax9 = plt.subplot(nplots, 1, 5, sharex=ax1)
ax9 = plt.subplot(nplots, 1, 4, sharex=ax1)
#poros_min = 0.375
#poros_max = 0.45
poros[:, 0] = poros[:, 2] # remove erroneous porosity increase
#cmap = matplotlib.cm.get_cmap('Blues_r')
cmap = matplotlib.cm.get_cmap('afmhot')
# im9 = ax9.pcolormesh(t, zpos_c, poros,
#zpos_c = zpos_c[:-1]
xvel = xvel[:-1]
# xvel[xvel < 0.0] = 0.0 # ignore negative velocities
# im9 = ax9.pcolormesh(t, zpos_c, poros,
im9 = ax9.pcolormesh(t, zpos_c, xvel,
cmap=cmap,
#vmin=poros_min, vmax=poros_max,
#norm=matplotlib.colors.LogNorm(vmin=1.0e-8, vmax=xvel.max()),
shading='goraud',
rasterized=rasterized)
ax9.set_ylim([zpos_c[0], sim.w_x[0]])
ax9.set_ylabel('Vertical position $z$ [m]')
cbaxes = fig.add_axes([0.32, 0.1, 0.4, 0.01]) # x,y,w,h
# ax9.add_patch(matplotlib.patches.Rectangle(
# (3.0, 0.04), # x,y
# 15., # dx
# .15, # dy
# fill=True,
# linewidth=1,
# facecolor='white'))
# ax9.add_patch(matplotlib.patches.Rectangle(
# (1.5, 0.04), # x,y
# 7., # dx
# .15, # dy
# fill=True,
# linewidth=1,
# facecolor='white',
# alpha=legend_alpha))
cb9 = plt.colorbar(im9, cax=cbaxes,
#ticks=[poros_min, poros_min + 0.5*(poros_max-poros_min), poros_max],
#ticks=[xvel.min(), xvel.min() + 0.5*(xvel.max()-xvel.min()), xvel.max()],
orientation='horizontal',
# extend='min',
cmap=cmap)
# cmap.set_under([8./255., 48./255., 107./255.]) # for poros
# cmap.set_under([1.0e-3, 1.0e-3, 1.0e-3]) # for xvel
# cb9.outline.set_color('w')
cb9.outline.set_edgecolor('w')
from matplotlib import ticker
tick_locator = ticker.MaxNLocator(nbins=4)
cb9.locator = tick_locator
cb9.update_ticks()
#cb9.set_label('Mean horizontal porosity [-]')
cb9.set_label('Norm. avg. horiz. vel. [-]', color='w')
'''
ax9.text(0.5, 0.4, 'Mean horizontal porosity [-]\\\\',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor':'white', 'alpha':1.0, 'pad':3})
'''
cb9.solids.set_rasterized(rasterized)
# change text color of colorbar to white
#axes_obj = plt.getp(im9, 'axes')
#plt.setp(plt.getp(axes_obj, 'yticklabels'), color='w')
#plt.setp(plt.getp(axes_obj, 'xticklabels'), color='w')
#plt.setp(plt.getp(cb9.ax.axes, 'yticklabels'), color='w')
cb9.ax.yaxis.set_tick_params(color='w')
# cb9.yaxis.label.set_color(ax2color)
for tl in cb9.ax.get_xticklabels():
tl.set_color('w')
cb9.ax.yaxis.set_tick_params(color='w')
# ax9.text(bbox_x, bbox_y, 'E',
ax9.text(bbox_x, bbox_y, 'd',
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
fontweight=fontweight, bbox=bbox,
transform=ax9.transAxes)
plt.setp(ax1.get_xticklabels(), visible=False)
#plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax3.get_xticklabels(), visible=False)
#plt.setp(ax4.get_xticklabels(), visible=False)
#plt.setp(ax5.get_xticklabels(), visible=False)
#plt.setp(ax6.get_xticklabels(), visible=False)
plt.setp(ax7.get_xticklabels(), visible=False)
#plt.setp(ax8.get_xticklabels(), visible=False)
ax1.set_xlim([numpy.min(t), numpy.max(t)])
#ax2.set_ylim([1e-5, 1e-3])
ax3.set_ylim([-0.2, 1.2])
ax9.set_xlabel('Time [s]')
fig.tight_layout()
plt.subplots_adjust(hspace=0.05)
filename = sid + '-combined.' + outformat
plt.savefig(filename)
plt.close()
#shutil.copyfile(filename, '/home/adc/articles/own/3/graphics/' + filename)
print(filename)
0
Example 125
def append(self, P, closed=False, itemsize=None, **kwargs):
"""
Append a new set of vertices to the collection.
For kwargs argument, n is the number of vertices (local) or the number
of item (shared)
Parameters
----------
P : np.array
Vertices positions of the path(s) to be added
closed: bool
Whether path(s) is/are closed
itemsize: int or None
Size of an individual path
caps : list, array or 2-tuple
Path start /end cap
color : list, array or 4-tuple
Path color
linewidth : list, array or float
Path linewidth
antialias : list, array or float
Path antialias area
"""
itemsize = itemsize or len(P)
itemcount = len(P)//itemsize
P = P.reshape(itemcount,itemsize,3)
if closed:
V = np.empty((itemcount,itemsize+3), dtype=self.vtype)
# Apply default values on vertices
for name in self.vtype.names:
if name not in ['collection_index', 'prev', 'curr', 'next']:
V[name][1:-2] = kwargs.get(name, self._defaults[name])
V['prev'][:,2:-1] = P
V['prev'][:,1] = V['prev'][:,-2]
V['curr'][:,1:-2] = P
V['curr'][:,-2] = V['curr'][:,1]
V['next'][:,0:-3] = P
V['next'][:,-3] = V['next'][:,0]
V['next'][:,-2] = V['next'][:,1]
else:
V = np.empty((itemcount,itemsize+2), dtype=self.vtype)
# Apply default values on vertices
for name in self.vtype.names:
if name not in ['collection_index', 'prev', 'curr', 'next']:
V[name][1:-1] = kwargs.get(name, self._defaults[name])
V['prev'][:,2:] = P
V['prev'][:,1] = V['prev'][:,2]
V['curr'][:,1:-1] = P
V['next'][:,:-2] = P
V['next'][:,-2] = V['next'][:,-3]
V[:, 0] = V[:, 1]
V[:,-1] = V[:,-2]
V = V.ravel()
V = np.repeat(V,2,axis=0)
V['id'] = np.tile([1,-1],len(V)/2)
if closed:
V = V.reshape(itemcount,2*(itemsize+3))
else:
V = V.reshape(itemcount,2*(itemsize+2))
V["id"][:,:2 ] = 2,-2
V["id"][:,-2:] = 2,-2
V = V.ravel()
# Uniforms
if self.utype:
U = np.zeros(itemcount, dtype=self.utype)
for name in self.utype.names:
if name not in ["__unused__"]:
U[name] = kwargs.get(name, self._defaults[name])
else:
U = None
Collection.append(self, vertices=V, uniforms=U,
itemsize=2*(itemsize+2+closed))
0
Example 126
Project: landlab Source File: raster_aspect.py
@deprecated(use='grid.calc_slope_at_node', version=1.0)
def calc_slope_aspect_of_nodes_horn(grid, ids=None,
vals='topographic__elevation'):
r"""Calculate slope and aspect.
.. note::
THIS CODE HAS ISSUES (SN 25-Sept-14): This code didn't perform well
on a NS facing elevation profile. Please check
slope_aspect_routines_comparison.py under landlab\examples before
using this. Suggested alternative:
calculate_slope_aspect_at_nodes_burrough
Calculates the local topographic slope (i.e., the down-dip slope, and
presented as positive), and the aspect (dip direction in radians
clockwise from north), at the given nodes, *ids*. All *ids* must be of
core nodes.
This method uses the Horn 1981 algorithm, the one employed by many GIS
packages. It should be significantly faster than alternative slope
methods.
If *ids* is not provided, the slope will be returned for all core
nodes.
*vals* is either the name of an existing grid field from which to draw
topographic data, or an array of values to use. If an array of values
is passed, it must be nnodes long. If *vals* is not provided, this
method will default to trying to use the field
"topographic__elevation".
Parameters
----------
grid : RasterModelGrid
Grid on which to calculate slopes and aspects.
ids : array_like of int, optional
Nodes on which to calculate slope and aspect.
vals : str or ndarray, optional
Node values used to measure slope and aspect.
Returns
-------
(slope, aspect) : tuple of float
*slope*: a len(ids) array of slopes at each node provided.
*aspect*: a len(ids) array of aspects at each node provided.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((4, 5))
Create a south-facing slope.
>>> elevation = np.array([
... 0., 0., 0., 0., 0,
... 1., 1., 1., 1., 1,
... 2., 2., 2., 2., 2,
... 3., 3., 3., 3., 3])
>>> (slope, aspect) = calc_slope_aspect_of_nodes_horn(grid,
... vals=elevation)
>>> len(slope) == grid.number_of_core_nodes
True
>>> len(aspect) == grid.number_of_core_nodes
True
>>> slope
array([ 1., 1., 1., 1., 1., 1.])
>>> aspect * 180. / np.pi
array([ 180., 180., 180., 180., 180., 180.])
Make the slope north-facing by multiplying elevations by -1. The slopes
will still be positive but the aspects will change.
>>> elevation *= -1
>>> (slope, aspect) = calc_slope_aspect_of_nodes_horn(grid,
... vals=elevation)
>>> slope
array([ 1., 1., 1., 1., 1., 1.])
>>> aspect * 180. / np.pi
array([ 0., 0., 0., 0., 0., 0.])
Double the slope and make it west-facing.
>>> elevation = np.array([
... 0., 1., 2., 3., 4.,
... 0., 1., 2., 3., 4.,
... 0., 1., 2., 3., 4.,
... 0., 1., 2., 3., 4.])
>>> elevation *= 2.
>>> (slope, aspect) = calc_slope_aspect_of_nodes_horn(grid,
... vals=elevation)
>>> slope
array([ 2., 2., 2., 2., 2., 2.])
>>> aspect * 180. / np.pi
array([ 270., 270., 270., 270., 270., 270.])
Make the slope east-facing by multiplying elevations by -1. The slopes
will still be positive but the aspects will change.
>>> elevation *= -1.
>>> (slope, aspect) = calc_slope_aspect_of_nodes_horn(grid,
... vals=elevation)
>>> slope
array([ 2., 2., 2., 2., 2., 2.])
>>> aspect * 180. / np.pi
array([ 90., 90., 90., 90., 90., 90.])
LLCATS: DEPR NINF SURF
"""
if ids is None:
ids = grid.core_nodes
if isinstance(vals, str):
vals = grid.at_node[vals]
else:
if len(vals) != grid.number_of_nodes:
raise IndexError('*vals* was not of a compatible length!')
# [right, top, left, bottom]
neighbors = grid.active_neighbors_at_node[ids]
# [topright, topleft, bottomleft, bottomright]
diagonals = grid._get_diagonal_list(ids)
input_array = np.empty((len(ids), 9), dtype=int)
input_array[:, 0] = ids
input_array[:, 1:5] = neighbors
input_array[:, 5:] = diagonals
slopes_array = np.apply_along_axis(_one_line_slopes, 1, input_array, grid,
vals)
slope_we = slopes_array[:, 0]
slope_sn = slopes_array[:, 1]
slope = np.sqrt(slope_we * slope_we + slope_sn * slope_sn)
# aspect = np.empty_like(slope)
aspect = np.ones(slope.size, dtype=float)
simple_cases = slope_we != 0.
complex_cases = np.logical_not(simple_cases)
complex_aspects = aspect[complex_cases]
complex_aspects[slope_sn[complex_cases] < 0.] = np.pi
complex_aspects[slope_sn[complex_cases] >= 0.] = 0.
aspect[complex_cases] = complex_aspects
# +ve is CCW rotation from x axis
angle_to_xaxis = np.arctan(slope_sn[simple_cases] / slope_we[simple_cases])
aspect[simple_cases] = (
(1. - np.sign(slope_we[simple_cases])) * 0.5) * np.pi + (
0.5 * np.pi - angle_to_xaxis)
return slope.ravel(), aspect.ravel()
0
Example 127
Project: instaseis Source File: base_netcdf_instaseis_db.py
def _get_element_info(self, coordinates):
"""
Find and collect/calculate information about the element containing
the given coordinates.
"""
k_map = {"displ_only": 6,
"strain_only": 1,
"fullfields": 1}
nextpoints = self.parsed_mesh.kdtree.query(
[coordinates.s, coordinates.z], k=k_map[self.info.dump_type])
# Find the element containing the point of interest.
mesh = self.parsed_mesh.f["Mesh"]
if self.info.dump_type == 'displ_only':
for idx in nextpoints[1]:
corner_points = np.empty((4, 2), dtype="float64")
if not self.read_on_demand:
corner_point_ids = self.parsed_mesh.fem_mesh[idx][:4]
eltype = self.parsed_mesh.eltypes[idx]
corner_points[:, 0] = \
self.parsed_mesh.mesh_S[corner_point_ids]
corner_points[:, 1] = \
self.parsed_mesh.mesh_Z[corner_point_ids]
else:
corner_point_ids = mesh["fem_mesh"][idx][:4]
# When reading from a netcdf file, the indices must be
# sorted for newer netcdf versions. The double argsort()
# gives the indices in the sorted array to restore the
# original order.
eltype = mesh["eltype"][idx]
m_s = mesh["mesh_S"]
m_z = mesh["mesh_Z"]
corner_points[:, 0] = [m_s[_i] for _i in corner_point_ids]
corner_points[:, 1] = [m_z[_i] for _i in corner_point_ids]
isin, xi, eta = finite_elem_mapping.inside_element(
coordinates.s, coordinates.z, corner_points, eltype,
tolerance=1E-3)
if isin:
id_elem = idx
break
else: # pragma: no cover
raise ValueError("Element not found")
if not self.read_on_demand:
gll_point_ids = self.parsed_mesh.sem_mesh[id_elem]
axis = bool(self.parsed_mesh.axis[id_elem])
else:
gll_point_ids = mesh["sem_mesh"][id_elem]
axis = bool(mesh["axis"][id_elem])
if axis:
col_points_xi = self.parsed_mesh.glj_points
col_points_eta = self.parsed_mesh.gll_points
else:
col_points_xi = self.parsed_mesh.gll_points
col_points_eta = self.parsed_mesh.gll_points
else:
id_elem = nextpoints[1]
col_points_xi = None
col_points_eta = None
gll_point_ids = None
axis = None
corner_points = None
eltype = None
xi = None
eta = None
return ElementInfo(
id_elem=id_elem, gll_point_ids=gll_point_ids, xi=xi, eta=eta,
corner_points=corner_points, col_points_xi=col_points_xi,
col_points_eta=col_points_eta, axis=axis, eltype=eltype)
0
Example 128
Project: GPflow Source File: hmc.py
def sample_HMC(f, num_samples, Lmin, Lmax, epsilon, x0, verbose=False,
thin=1, burn=0, RNG=np.random.RandomState(0),
return_logprobs=False):
"""
A straight-forward HMC implementation. The mass matrix is assumed to be the
identity.
f is a python function that returns the energy and its gradient
f(x) = E(x), dE(x)/dx
we then generate samples from the distribution
pi(x) = exp(-E(x))/Z
- num_samples is the number of samples to generate.
- Lmin, Lmax, epsilon are parameters of the HMC procedure to be tuned.
- x0 is the starting position for the procedure.
- verbose is a flag which turns on the display of the running accept ratio.
- thin is an integer which specifies the thinning interval
- burn is an integer which specifies how many initial samples to discard.
- RNG is a random number generator
- return_logprobs is a boolean indicating whether to return the log densities alongside the samples.
The total number of iterations is given by
burn + thin * num_samples
The return shape is always num_samples x D.
The leafrog (Verlet) integrator works by picking a random number of steps
uniformly between Lmin and Lmax, and taking steps of length epsilon.
"""
# an array to store the logprobs in (even if the user doesn't want them)
logprob_track = np.empty(num_samples)
# burn some samples if needed.
if burn > 0:
if verbose:
print("burn-in sampling started")
samples = sample_HMC(f, num_samples=burn, Lmin=Lmin, Lmax=Lmax,
epsilon=epsilon, x0=x0,
verbose=verbose, thin=1, burn=0)
if verbose:
print("burn-in sampling ended")
x0 = samples[-1]
D = x0.size
samples = np.zeros((num_samples, D))
samples[0] = x0.copy()
x = x0.copy()
logprob, grad = f(x0)
logprob, grad = -logprob, -grad
accept_count_batch = 0
for t in range(1, num_samples * thin):
# Output acceptance rate every 100 iterations
if(((t+1) % 100) == 0):
if verbose:
print("Iteration: ", t+1,
"\t Acc Rate: ", 1. * accept_count_batch, "%")
accept_count_batch = 0
# make a copy of the old state.
x_old, logprob_old, grad_old = x.copy(), logprob, grad.copy()
p_old = RNG.randn(D)
# Standard HMC - begin leapfrogging
premature_reject = False
p = p_old + 0.5 * epsilon * grad
for l in range(RNG.randint(Lmin, Lmax)):
x += epsilon * p
logprob, grad = f(x)
logprob, grad = -logprob, -grad
if np.any(np.isnan(grad)): # pragma: no cover
premature_reject = True
break
p += epsilon * grad
p -= 0.5*epsilon * grad
# leapfrogging done
# reject the proposal if there are numerical errors.
if premature_reject: # pragma: no cover
print("warning: numerical instability.\
Rejecting this proposal prematurely")
x, logprob, grad = x_old, logprob_old, grad_old
if t % thin == 0:
samples[t // thin] = x_old
logprob_track[t // thin] = logprob_old
continue
# work out whether to accept the proposal
log_accept_ratio = logprob - 0.5 * p.dot(p) -\
logprob_old + 0.5 * p_old.dot(p_old)
logu = np.log(RNG.rand())
if logu < log_accept_ratio: # accept
if t % thin == 0:
samples[t // thin] = x
logprob_track[t // thin] = logprob
accept_count_batch += 1
else: # reject
if t % thin == 0:
samples[t // thin] = x_old
logprob_track[t // thin] = logprob_old
x, logprob, grad = x_old, logprob_old, grad_old
if return_logprobs:
return samples, logprob_track
else:
return samples
0
Example 129
@cache(level=10)
def dct(n_filters, n_input):
"""Discrete cosine transform (DCT type-III) basis.
.. [1] http://en.wikipedia.org/wiki/Discrete_cosine_transform
Parameters
----------
n_filters : int > 0 [scalar]
number of output components (DCT filters)
n_input : int > 0 [scalar]
number of input components (frequency bins)
Returns
-------
dct_basis: np.ndarray [shape=(n_filters, n_input)]
DCT (type-III) basis vectors [1]_
Notes
-----
This function caches at level 10.
Examples
--------
>>> n_fft = 2048
>>> dct_filters = librosa.filters.dct(13, 1 + n_fft // 2)
>>> dct_filters
array([[ 0.031, 0.031, ..., 0.031, 0.031],
[ 0.044, 0.044, ..., -0.044, -0.044],
...,
[ 0.044, 0.044, ..., -0.044, -0.044],
[ 0.044, 0.044, ..., 0.044, 0.044]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(dct_filters, x_axis='linear')
>>> plt.ylabel('DCT function')
>>> plt.title('DCT filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
basis = np.empty((n_filters, n_input))
basis[0, :] = 1.0 / np.sqrt(n_input)
samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)
for i in range(1, n_filters):
basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)
return basis
0
Example 130
Project: tractor Source File: psfex.py
def toFits(self, fn, data=None, hdr=None,
merge=False):
'''
If merge: merge params "amp0", "amp1", ... into an "amp" array.
'''
from astrometry.util.fits import fits_table
if hdr is None:
import fitsio
hdr = fitsio.FITSHDR()
hdr.add_record(dict(name='PSFEX_T', value=typestring(type(self)),
comment='PsfEx type'))
hdr.add_record(dict(name='PSF_TYPE',
value=typestring(self.psfclass),
comment='PsfEx PSF type'))
hdr.add_record(dict(name='PSF_W', value=self.W,
comment='Image width'))
hdr.add_record(dict(name='PSF_H', value=self.H,
comment='Image height'))
#hdr.add_record(dict(name='PSF_SCALING',
hdr.add_record(dict(name='PSF_K', value=self.K,
comment='Number of PSF components'))
hdr.add_record(dict(name='PSF_NX', value=self.nx,
comment='Number of X grid points'))
hdr.add_record(dict(name='PSF_NY', value=self.ny,
comment='Number of Y grid points'))
if data is None:
data = self.splinedata
(pp,XX,YY) = data
ny,nx,nparams = pp.shape
assert(ny == self.ny)
assert(nx == self.nx)
X = self.psfclass(*pp[0,0])
names = X.getParamNames()
hdr.add_record(dict(name='PSF_NA', value=len(names),
comment='PSF number of params'))
for i,nm in enumerate(names):
hdr.add_record(dict(name='PSF_A%i' % i, value=nm,
comment='PSF param name'))
T = fits_table()
T.xx = XX.reshape((1, len(XX)))
T.yy = YY.reshape((1, len(YY)))
if merge:
# find like params and group them together.
# assume names like "amp0"
assert(self.K < 10)
pnames = set()
for nm in names:
assert(nm[-1] in '0123456789'[:self.K])
pnames.add(nm[:-1])
assert(len(pnames) * self.K == nparams)
pnames = list(pnames)
pnames.sort()
print('Pnames:', pnames)
namemap = dict([(nm,i) for i,nm in enumerate(names)])
for i,nm in enumerate(pnames):
X = np.empty((1,self.K,ny,nx))
for k in range(self.K):
X[0,k,:,:] = pp[:,:,namemap['%s%i' % (nm,k)]]
T.set(nm, X)
# X = np.dstack([pp[:,:,namemap['%s%i' % (nm, k)]] for k in range(self.K)])
# print 'pname', nm, 'array:', X.shape
# T.set(nm, X.reshape((1,self.K,ny,nx)))
else:
for i,nm in enumerate(names):
T.set(nm, pp[:,:,i].reshape((1,ny,nx)))
T.writeto(fn, header=hdr)
0
Example 131
def transform(self, traj):
"""
Maps an mdtraj Trajectory object to the selected output features
Parameters
----------
traj : mdtraj Trajectory
Trajectory object used as an input
Returns
-------
out : ndarray((T, n), dtype=float32)
Output features: For each of T time steps in the given trajectory,
a vector with all n output features selected.
"""
# if there are no features selected, return given trajectory
if len(self.active_features) == 0:
if not self._showed_warning_empty_feature_list:
warnings.warn("You have no features selected."
" Returning plain coordinates.")
self._showed_warning_empty_feature_list = True
s = traj.xyz.shape
new_shape = (s[0], s[1] * s[2])
return traj.xyz.reshape(new_shape)
# handle empty chunks (which might occur due to time lagged access
if traj.xyz.shape[0] == 0:
return np.empty((0, self.dimension()))
# otherwise build feature vector.
feature_vec = []
# TODO: consider parallel evaluation computation here, this effort is
# only worth it, if computation time dominates memory transfers
for f in self.active_features:
# perform sanity checks for custom feature input
if isinstance(f, CustomFeature):
# NOTE: casting=safe raises in numpy>=1.9
vec = f.transform(traj).astype(np.float32, casting='safe')
if vec.shape[0] == 0:
vec = np.empty((0, f.dimension))
if not isinstance(vec, np.ndarray):
raise ValueError('Your custom feature %s did not return'
' a numpy.ndarray!' % str(f.describe()))
if not vec.ndim == 2:
raise ValueError('Your custom feature %s did not return'
' a 2d array. Shape was %s'
% (str(f.describe()),
str(vec.shape)))
if not vec.shape[0] == traj.xyz.shape[0]:
raise ValueError('Your custom feature %s did not return'
' as many frames as it received!'
'Input was %i, output was %i'
% (str(f.describe()),
traj.xyz.shape[0],
vec.shape[0]))
else:
vec = f.transform(traj).astype(np.float32)
feature_vec.append(vec)
if len(feature_vec) > 1:
res = np.hstack(feature_vec)
else:
res = feature_vec[0]
return res
0
Example 132
def fromfile(infile, type=None, shape=None, sizing=STRICT,
typecode=None, dtype=None):
if isinstance(infile, (str, unicode)):
infile = open(infile, 'rb')
dtype = type2dtype(typecode, type, dtype, True)
if shape is None:
shape = (-1,)
if not isinstance(shape, tuple):
shape = (shape,)
if (list(shape).count(-1)>1):
raise ValueError("At most one unspecified dimension in shape")
if -1 not in shape:
if sizing != STRICT:
raise ValueError("sizing must be STRICT if size complete")
arr = np.empty(shape, dtype)
bytesleft=arr.nbytes
bytesread=0
while(bytesleft > _BLOCKSIZE):
data = infile.read(_BLOCKSIZE)
if len(data) != _BLOCKSIZE:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr.data[bytesread:bytesread+_BLOCKSIZE]=data
bytesread += _BLOCKSIZE
bytesleft -= _BLOCKSIZE
if bytesleft > 0:
data = infile.read(bytesleft)
if len(data) != bytesleft:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr.data[bytesread:bytesread+bytesleft]=data
return arr
##shape is incompletely specified
##read until EOF
##implementation 1: naively use memory blocks
##problematic because memory allocation can be double what is
##necessary (!)
##the most common case, namely reading in data from an unchanging
##file whose size may be determined before allocation, should be
##quick -- only one allocation will be needed.
recsize = dtype.itemsize * np.product([i for i in shape if i != -1])
blocksize = max(_BLOCKSIZE/recsize, 1)*recsize
##try to estimate file size
try:
curpos=infile.tell()
infile.seek(0,2)
endpos=infile.tell()
infile.seek(curpos)
except (AttributeError, IOError):
initsize=blocksize
else:
initsize=max(1,(endpos-curpos)/recsize)*recsize
buf = np.newbuffer(initsize)
bytesread=0
while 1:
data=infile.read(blocksize)
if len(data) != blocksize: ##eof
break
##do we have space?
if len(buf) < bytesread+blocksize:
buf=_resizebuf(buf,len(buf)+blocksize)
## or rather a=resizebuf(a,2*len(a)) ?
assert len(buf) >= bytesread+blocksize
buf[bytesread:bytesread+blocksize]=data
bytesread += blocksize
if len(data) % recsize != 0:
if sizing == STRICT:
raise SizeMismatchError("Filesize does not match specified shape")
if sizing == WARN:
_warnings.warn("Filesize does not match specified shape",
SizeMismatchWarning)
try:
infile.seek(-(len(data) % recsize),1)
except AttributeError:
_warnings.warn("Could not rewind (no seek support)",
FileSeekWarning)
except IOError:
_warnings.warn("Could not rewind (IOError in seek)",
FileSeekWarning)
datasize = (len(data)/recsize) * recsize
if len(buf) != bytesread+datasize:
buf=_resizebuf(buf,bytesread+datasize)
buf[bytesread:bytesread+datasize]=data[:datasize]
##deduce shape from len(buf)
shape = list(shape)
uidx = shape.index(-1)
shape[uidx]=len(buf) / recsize
a = np.ndarray(shape=shape, dtype=type, buffer=buf)
if a.dtype.char == '?':
np.not_equal(a, 0, a)
return a
0
Example 133
Project: faststats Source File: figrc.py
def plotCorr(l, pars, plotfunc=None, lbls=None, triangle='lower', *args, **kwargs):
""" Plot correlation matrix between variables
inputs
-------
l: dict
dictionary of variables (could be a Table)
pars: sequence of str
parameters to use
plotfunc: callable
function to be called when doing the scatter plots
lbls: sequence of str
sequence of string to use instead of dictionary keys
triangle: str in ['upper', 'lower']
Which side of the triangle to use.
*args, **kwargs are forwarded to the plot function
Example
-------
import numpy as np
figrc.ezrc(16, 1, 16, 5)
d = {}
for k in range(4):
d[k] = np.random.normal(0, k+1, 1e4)
plt.figure(figsize=(8 * 1.5, 7 * 1.5))
plotCorr(d, d.keys(), plotfunc=figrc.scatter_plot)
#plotCorr(d, d.keys(), alpha=0.2)
"""
if lbls is None:
lbls = pars
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if not len(pars) - 1 in fontmap:
fontmap[len(pars) - 1] = 3
k = 1
axes = np.empty((len(pars) + 1, len(pars)), dtype=object)
for j in range(len(pars)):
for i in range(len(pars)):
if j > i:
sharex = axes[j - 1, i]
else:
sharex = None
if i == j:
# Plot the histograms.
ax = plt.subplot(len(pars), len(pars), k)
axes[j, i] = ax
n, b, p = ax.hist(l[pars[i]], bins=50, histtype="step", color=kwargs.get("color", "b"))
if triangle == 'upper':
ax.set_xlabel(lbls[i])
ax.set_ylabel(lbls[i])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks_position('right')
ax.xaxis.set_ticks_position('bottom')
if triangle == 'upper':
if i > j:
if i > j + 1:
sharey = axes[j, i - 1]
else:
sharey = None
ax = plt.subplot(len(pars), len(pars), k, sharey=sharey, sharex=sharex)
axes[j, i] = ax
if plotfunc is None:
plt.plot(l[pars[i]], l[pars[j]], ',', **kwargs)
else:
plotfunc(l[pars[i]], l[pars[j]], ax=ax, *args, **kwargs)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False)
if triangle == 'lower':
if i < j:
if i < j:
sharey = axes[j, i - 1]
else:
sharey = None
ax = plt.subplot(len(pars), len(pars), k, sharey=sharey, sharex=sharex)
axes[j, i] = ax
if plotfunc is None:
plt.plot(l[pars[i]], l[pars[j]], ',', **kwargs)
else:
plotfunc(l[pars[i]], l[pars[j]], ax=ax, *args, **kwargs)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), visible=False)
if i == 0:
ax.set_ylabel(lbls[j])
plt.setp(ax.get_yticklabels(), visible=True)
if j == len(pars) - 1:
ax.set_xlabel(lbls[i])
plt.setp(ax.get_xticklabels(), visible=True)
N = int(0.5 * fontmap[len(pars) - 1])
if N <= 4:
N = 5
setNmajors(N, N, ax=ax, prune='both')
k += 1
setMargins(hspace=0.0, wspace=0.0)
0
Example 134
Project: merlin Source File: providers.py
def load_next_partition(self):
"""Load one block data. The number of frames will be the buffer size set during intialisation.
"""
self.logger.debug('loading next partition')
temp_set_x = numpy.empty((self.buffer_size, self.n_ins))
temp_set_y = numpy.empty((self.buffer_size, self.n_outs))
current_index = 0
### first check whether there are remaining data from previous utterance
if self.remain_frame_number > 0:
temp_set_x[current_index:self.remain_frame_number, ] = self.remain_data_x
temp_set_y[current_index:self.remain_frame_number, ] = self.remain_data_y
current_index += self.remain_frame_number
self.remain_frame_number = 0
io_fun = BinaryIOCollection()
while True:
if current_index >= self.buffer_size:
break
if self.file_index >= self.list_size:
self.end_reading = True
self.file_index = 0
break
in_features, lab_frame_number = io_fun.load_binary_file_frame(self.x_files_list[self.file_index], self.n_ins)
out_features, out_frame_number = io_fun.load_binary_file_frame(self.y_files_list[self.file_index], self.n_outs)
frame_number = lab_frame_number
if abs(lab_frame_number - out_frame_number) < 5: ## we allow small difference here. may not be correct, but sometimes, there is one/two frames difference
if lab_frame_number > out_frame_number:
frame_number = out_frame_number
else:
base_file_name = self.x_files_list[self.file_index].split('/')[-1].split('.')[0]
self.logger.critical("the number of frames in label and acoustic features are different: %d vs %d (%s)" %(lab_frame_number, out_frame_number, base_file_name))
raise
out_features = out_features[0:frame_number, ]
in_features = in_features[0:frame_number, ]
if current_index + frame_number <= self.buffer_size:
temp_set_x[current_index:current_index+frame_number, ] = in_features
temp_set_y[current_index:current_index+frame_number, ] = out_features
current_index = current_index + frame_number
else: ## if current utterance cannot be stored in the block, then leave the remaining part for the next block
used_frame_number = self.buffer_size - current_index
temp_set_x[current_index:self.buffer_size, ] = in_features[0:used_frame_number, ]
temp_set_y[current_index:self.buffer_size, ] = out_features[0:used_frame_number, ]
current_index = self.buffer_size
self.remain_data_x = in_features[used_frame_number:frame_number, ]
self.remain_data_y = out_features[used_frame_number:frame_number, ]
self.remain_frame_number = frame_number - used_frame_number
self.file_index += 1
temp_set_x = temp_set_x[0:current_index, ]
temp_set_y = temp_set_y[0:current_index, ]
numpy.random.seed(271639)
numpy.random.shuffle(temp_set_x)
numpy.random.seed(271639)
numpy.random.shuffle(temp_set_y)
shared_set_x = self.make_shared(temp_set_x, 'x')
shared_set_y = self.make_shared(temp_set_y, 'y')
shared_set_xy = (shared_set_x, shared_set_y)
# temp_set_x = self.make_shared(temp_set_x, 'x')
# temp_set_y = self.make_shared(temp_set_y, 'y')
return shared_set_xy, temp_set_x, temp_set_y
0
Example 135
Project: seizure-prediction Source File: tasks.py
def load_training_data(settings, target, pipeline, check_only, strategy=None, cv_fold_number=None, quiet=False):
cv = cv_fold_number is not None
if check_only:
return load_pipeline_data(settings, target, 'preictal', pipeline, check_only=True, quiet=quiet) or \
load_pipeline_data(settings, target, 'interictal', pipeline, check_only=True, quiet=quiet)
preictal, preictal_meta = load_pipeline_data(settings, target, 'preictal', pipeline, check_only=False, quiet=quiet)
interictal, interictal_meta = load_pipeline_data(settings, target, 'interictal', pipeline, check_only=False, quiet=quiet)
total_segments = preictal_meta.num_segments + interictal_meta.num_segments
# print 'total_segments', total_segments
if not quiet: print 'Preparing data ...',
start = time.get_seconds()
def make_fold(preictal_X_train, preictal_X_cv, interictal_X_train, interictal_X_cv):
num_train_segments = preictal_X_train.shape[0] + interictal_X_train.shape[0]
num_cv_segments = preictal_X_cv.shape[0] + interictal_X_cv.shape[0]
assert (num_train_segments + num_cv_segments) == total_segments
flattened_preictal_X_train = flatten(preictal_X_train)
flattened_interictal_X_train = flatten(interictal_X_train)
flattened_preictal_X_cv = flatten(preictal_X_cv) if cv else np.empty((0,))
flattened_interictal_X_cv = flatten(interictal_X_cv) if cv else np.empty((0,))
X_train = np.concatenate((flattened_preictal_X_train, flattened_interictal_X_train), axis=0)
X_cv = np.concatenate((flattened_preictal_X_cv, flattened_interictal_X_cv), axis=0)
preictal_y_train = np.ones((flattened_preictal_X_train.shape[0],))
preictal_y_cv = np.ones((preictal_X_cv.shape[0],))
interictal_y_train = np.zeros((flattened_interictal_X_train.shape[0],))
interictal_y_cv = np.zeros((interictal_X_cv.shape[0],))
y_train = np.concatenate((preictal_y_train, interictal_y_train), axis=0)
y_cv = np.concatenate((preictal_y_cv, interictal_y_cv), axis=0)
X_train, y_train = sklearn.utils.shuffle(X_train, y_train, random_state=0)
return jsdict({
'X_train': X_train,
'y_train': y_train,
'X_cv': X_cv,
'y_cv': y_cv,
'num_train_segments': num_train_segments,
'num_cv_segments': num_cv_segments
})
if cv:
preictal_X_train, preictal_X_cv = strategy.split_train_cv(preictal, preictal_meta, cv_fold_number)
interictal_X_train, interictal_X_cv = strategy.split_train_cv(interictal, interictal_meta, cv_fold_number, interictal=True)
data = make_fold(preictal_X_train, preictal_X_cv, interictal_X_train, interictal_X_cv)
else:
preictal_X_train = preictal
preictal_X_cv = np.empty((0,))
interictal_X_train = interictal
interictal_X_cv = np.empty((0,))
data = make_fold(preictal_X_train, preictal_X_cv, interictal_X_train, interictal_X_cv)
if not quiet: print '%ds' % (time.get_seconds() - start)
if not quiet: print 'X_train', data.X_train.shape, 'y_train', data.y_train.shape, 'X_cv', data.X_cv.shape, 'y_cv', data.y_cv.shape
return data
0
Example 136
Project: ignition Source File: rp_shallow.py
def rp_shallow_roe_1d(q_l,q_r,aux_l,aux_r,aux_global):
r"""
Roe shallow water solver in 1d::
ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r))
cbar = sqrt( 0.5 * g * (h_l + h_r))
W_1 = | 1 | s_1 = ubar - cbar
| ubar - cbar |
W_2 = | 1 | s_1 = ubar + cbar
| ubar + cbar |
a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar
a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar
*aux_global* should contain:
- *g* - (float) Gravitational constant
- *efix* - (bool) Boolean as to whether a entropy fix should be used, if
not present, false is assumed
:Version: 1.0 (2009-02-05)
"""
# Array shapes
nrp = q_l.shape[0]
meqn = 2
mwaves = 2
# Output arrays
wave = np.empty( (nrp, meqn, mwaves) )
s = np.empty( (nrp, mwaves) )
amdq = np.zeros( (nrp, meqn) )
apdq = np.zeros( (nrp, meqn) )
# Compute roe-averaged quantities
ubar = ( (q_l[:,1]/np.sqrt(q_l[:,0]) + q_r[:,1]/np.sqrt(q_r[:,0])) /
(np.sqrt(q_l[:,0]) + np.sqrt(q_r[:,0])) )
cbar = np.sqrt(0.5 * aux_global['g'] * (q_l[:,0] + q_r[:,0]))
# Compute Flux structure
delta = q_r - q_l
a1 = 0.5 * (-delta[:,1] + (ubar + cbar) * delta[:,0]) / cbar
a2 = 0.5 * ( delta[:,1] - (ubar - cbar) * delta[:,0]) / cbar
# Compute each family of waves
wave[:,0,0] = a1
wave[:,1,0] = a1 * (ubar - cbar)
s[:,0] = ubar - cbar
wave[:,0,1] = a2
wave[:,1,1] = a2 * (ubar + cbar)
s[:,1] = ubar + cbar
if aux_global['efix']:
raise NotImplementedError("Entropy fix has not been implemented.")
else:
s_index = np.zeros((nrp,2))
for m in xrange(meqn):
for mw in xrange(mwaves):
s_index[:,0] = s[:,mw]
amdq[:,m] += np.min(s_index,axis=1) * wave[:,m,mw]
apdq[:,m] += np.max(s_index,axis=1) * wave[:,m,mw]
return wave, s, amdq, apdq
0
Example 137
Project: neural-network-animation Source File: lines.py
def _mark_every_path(markevery, tpath, affine, ax_transform):
"""
Helper function that sorts out how to deal the input
`markevery` and returns the points where markers should be drawn.
Takes in the `markevery` value and the line path and returns the
sub-sampled path.
"""
# pull out the two bits of data we want from the path
codes, verts = tpath.codes, tpath.vertices
def _slice_or_none(in_v, slc):
'''
Helper function to cope with `codes` being an
ndarray or `None`
'''
if in_v is None:
return None
return in_v[slc]
# if just a float, assume starting at 0.0 and make a tuple
if isinstance(markevery, float):
markevery = (0.0, markevery)
# if just an int, assume starting at 0 and make a tuple
elif isinstance(markevery, int):
markevery = (0, markevery)
if isinstance(markevery, tuple):
if len(markevery) != 2:
raise ValueError('`markevery` is a tuple but its '
'len is not 2; '
'markevery=%s' % (markevery,))
start, step = markevery
# if step is an int, old behavior
if isinstance(step, int):
#tuple of 2 int is for backwards compatibility,
if not(isinstance(start, int)):
raise ValueError('`markevery` is a tuple with '
'len 2 and second element is an int, but '
'the first element is not an int; '
'markevery=%s' % (markevery,))
# just return, we are done here
return Path(verts[slice(start, None, step)],
_slice_or_none(codes, slice(start, None, step)))
elif isinstance(step, float):
if not (isinstance(start, int) or
isinstance(start, float)):
raise ValueError('`markevery` is a tuple with '
'len 2 and second element is a float, but '
'the first element is not a float or an '
'int; '
'markevery=%s' % (markevery,))
#calc cuemulative distance along path (in display
# coords):
disp_coords = affine.transform(tpath.vertices)
delta = np.empty((len(disp_coords), 2),
dtype=float)
delta[0, :] = 0.0
delta[1:, :] = (disp_coords[1:, :] -
disp_coords[:-1, :])
delta = np.sum(delta**2, axis=1)
delta = np.sqrt(delta)
delta = np.cuemsum(delta)
#calc distance between markers along path based on
# the axes bounding box diagonal being a distance
# of unity:
scale = ax_transform.transform(
np.array([[0, 0], [1, 1]]))
scale = np.diff(scale, axis=0)
scale = np.sum(scale**2)
scale = np.sqrt(scale)
marker_delta = np.arange(start * scale,
delta[-1],
step * scale)
#find closest actual data point that is closest to
# the theoretical distance along the path:
inds = np.abs(delta[np.newaxis, :] -
marker_delta[:, np.newaxis])
inds = inds.argmin(axis=1)
inds = np.unique(inds)
# return, we are done here
return Path(verts[inds],
_slice_or_none(codes, inds))
else:
raise ValueError('`markevery` is a tuple with '
'len 2, but its second element is not an int '
'or a float; '
'markevery=%s' % (markevery,))
elif isinstance(markevery, slice):
# mazol tov, it's already a slice, just return
return Path(verts[markevery],
_slice_or_none(codes, markevery))
elif iterable(markevery):
#fancy indexing
try:
return Path(verts[markevery],
_slice_or_none(codes, markevery))
except (ValueError, IndexError):
raise ValueError('`markevery` is iterable but '
'not a valid form of numpy fancy indexing; '
'markevery=%s' % (markevery,))
else:
raise ValueError('Value of `markevery` is not '
'recognized; '
'markevery=%s' % (markevery,))
0
Example 138
Project: distarray Source File: plot_distarray_protocol.py
def print_array_docuementation(context, array, title, text,
global_plot_filename, local_plot_filename):
"""Print some properties of the array.
The output is rst formatted so that it can be directly used for
docuementation. It includes a plot of the array distribution, some
properties that are same for each process, and properties that vary per
process.
Parameters
----------
context : distarray.globalapi.Context
The context that will be used to query the array properties.
array : DistArray
The array to describe.
title : string
The docuement section title.
text : string
A description of the array layout to add to the docuement.
global_plot_filename : string
The filename of the global array figure to add to the docuement.
local_plot_filename : string
The filename of the local array figure to add to the docuement.
"""
def rst_lines(obj):
""" Return lines of text that format obj for an .rst docuement. """
text = pformat(obj)
lines = text.split('\n')
# pformat() gives blank lines for 3d arrays, which confuse Sphinx.
trims = [line for line in lines if len(line) > 0]
return trims
def rst_code(lines):
"""Format a list of lines into a python code block.
Returns a list of text lines.
"""
code_lines = []
code_lines.append("")
code_lines.append(".. code-block:: python")
code_lines.append("")
for line in lines:
code_line = " " + line
code_lines.append(code_line)
code_lines.append("")
return code_lines
def rst_print(obj):
""" Return text that formats obj nicely for an .rst docuement. """
lines = rst_lines(obj)
text = '\n'.join(lines)
return text
def rst_plot(filename):
""" Reference a plot in the .rst docuement.
The plot must be created elsewhere, this does not make it.
The path emitted assumes some organization of the
docuementation directory.
"""
print(".. image:: %s" % (filename))
print()
def text_block_size(lines):
""" Determine the number of rows and columns to print lines.
Parameters
----------
lines : list of text strings
Returns
-------
line_count, line_width : integers
The number of lines and columns required to contain the text.
"""
line_count = len(lines)
line_width = max([len(line) for line in lines])
return line_count, line_width
def text_block_max_size(lines_list):
""" Determine number of rows/columns for the largest line list.
Parameters
----------
lines_list : list of list of text strings
Each entry in the outer list is termed a 'block'.
Each block, which is a list of text strings,
needs some size of space R x C to fit.
Returns
-------
The text box size, in lines and columns, which
is just large enough to display all of the blocks.
"""
# Get line count and width needed for each block.
block_size = empty((len(lines_list), 2), dtype=int)
for itext, lines in enumerate(lines_list):
line_count, line_width = text_block_size(lines)
block_size[itext, 0] = line_count
block_size[itext, 1] = line_width
# Get maximum which is enough to hold any of them.
max_size = block_size.max(axis=0)
max_rows, max_cols = max_size[0], max_size[1]
return max_rows, max_cols
def rst_print_lines(lines_list):
""" Print the list of lines. """
for lines in lines_list:
for line in lines:
print(line)
def rst_table(rows, cols, lines_list):
""" Print the list of lines as a .rst table. """
num_cells = rows * cols
num_texts = len(lines_list)
if num_cells != num_texts:
raise ValueError('Invalid table size %d x %d for %d entries.' % (
rows, cols, num_texts))
# Determine table size needed for biggest text blocks.
max_lines, max_cols = text_block_max_size(lines_list)
# Sphinx table row separator.
sep = '-' * max_cols
seps = [sep for i in range(cols)]
header = '+' + '+'.join(seps) + '+'
# Group text blocks into array pattern.
print(header)
for row in range(rows):
for line in range(max_lines):
col_lines = []
for col in range(cols):
iblock = row * cols + col
lines = lines_list[iblock]
if line < len(lines):
col_line = lines[line]
else:
col_line = ''
col_line = col_line.ljust(max_cols)
col_lines.append(col_line)
text = '|' + '|'.join(col_lines) + '|'
print(text)
print(header)
print()
# Examine the array on all the engines.
def _array_attrs(local_arr):
distbuffer = local_arr.__distarray__()
return dict(db_keys=list(distbuffer.keys()),
db_version=distbuffer["__version__"],
db_buffer=distbuffer["buffer"],
db_dim_data=distbuffer["dim_data"],
db_coords=local_arr.cart_coords,
)
attrs = context.apply(_array_attrs, (array.key,),
targets=array.targets)
db_keys = [a['db_keys'] for a in attrs]
db_version = [a['db_version'] for a in attrs]
db_buffer = [a['db_buffer'] for a in attrs]
db_dim_data = [a['db_dim_data'] for a in attrs]
db_coords = [a['db_coords'] for a in attrs]
# Get local ndarrays.
db_ndarrays = array.get_ndarrays()
# When preparing examples for the protocol release, we need to
# adjust the version number manually. Otherwise this would be left alone.
manual_version_update = True
if manual_version_update:
manual_version = '0.10.0'
db_version = [manual_version for version in db_version]
print("%s" % (title))
print("%s" % ('`' * len(title)))
print()
print(text)
print()
# Global array plot.
if global_plot_filename is not None:
rst_plot(global_plot_filename)
# Full (undistributed) array:
full_array = array.toarray()
print("The full (undistributed) array:")
lines = [">>> full_array"] + rst_lines(full_array)
code_lines = rst_code(lines)
rst_print_lines([code_lines])
# Properties that are the same on all processes:
print("In all processes, we have:")
lines = []
lines += [">>> distbuffer = local_array.__distarray__()"]
lines += [">>> distbuffer.keys()"] + rst_lines(db_keys[0])
lines += [">>> distbuffer['__version__']"] + rst_lines(db_version[0])
code_lines = rst_code(lines)
rst_print_lines([code_lines])
# Local arrays / properties that vary per engine.
print("The local arrays, on each separate engine:")
print()
# Local array plot.
if local_plot_filename is not None:
rst_plot(local_plot_filename)
# Properties that change per-process:
lines_list = []
for rank, (keys, version, buffer, dim_data, ndarray, coord) in enumerate(
zip(db_keys,
db_version,
db_buffer,
db_dim_data,
db_ndarrays,
db_coords)):
# Skip if local ndarray is empty, as there is no local plot.
if ndarray.size == 0:
continue
header = "In process %r:" % (coord,)
lines = []
lines += [">>> distbuffer['buffer']"] + rst_lines(buffer)
lines += [">>> distbuffer['dim_data']"] + rst_lines(dim_data)
code_lines = rst_code(lines)
lines = [header] + code_lines
lines_list.append(lines)
# Print as table with nice layout.
num_local_properties = len(lines_list)
if (num_local_properties % 2) == 0:
# 2 X N grid
rows, cols = (2, num_local_properties // 2)
else:
# N x 1 grid
rows, cols = (num_local_properties, 1)
rst_table(rows, cols, lines_list)
0
Example 139
Project: openmc Source File: summary.py
def _read_lattices(self):
self.n_lattices = self._f['geometry/n_lattices'].value
# Initialize lattices for each Lattice
# Keys - Lattice keys
# Values - Lattice objects
self.lattices = {}
for key in self._f['geometry/lattices'].keys():
if key == 'n_lattices':
continue
lattice_id = int(key.lstrip('lattice '))
index = self._f['geometry/lattices'][key]['index'].value
name = self._f['geometry/lattices'][key]['name'].value.decode()
lattice_type = self._f['geometry/lattices'][key]['type'].value.decode()
if 'offsets' in self._f['geometry/lattices'][key]:
offsets = self._f['geometry/lattices'][key]['offsets'][...]
else:
offsets = None
if lattice_type == 'rectangular':
dimension = self._f['geometry/lattices'][key]['dimension'][...]
lower_left = \
self._f['geometry/lattices'][key]['lower_left'][...]
pitch = self._f['geometry/lattices'][key]['pitch'][...]
outer = self._f['geometry/lattices'][key]['outer'].value
universe_ids = \
self._f['geometry/lattices'][key]['universes'][...]
# Create the Lattice
lattice = openmc.RectLattice(lattice_id=lattice_id, name=name)
lattice.lower_left = lower_left
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void (-22)
if outer != -22:
lattice.outer = self.universes[outer]
# Build array of Universe pointers for the Lattice
universes = \
np.empty(tuple(universe_ids.shape), dtype=openmc.Universe)
for z in range(universe_ids.shape[0]):
for y in range(universe_ids.shape[1]):
for x in range(universe_ids.shape[2]):
universes[z, y, x] = \
self.get_universe_by_id(universe_ids[z, y, x])
# Use 2D NumPy array to store lattice universes for 2D lattices
if len(dimension) == 2:
universes = np.squeeze(universes)
universes = np.atleast_2d(universes)
# Set the universes for the lattice
lattice.universes = universes
# Set the distribcell offsets for the lattice
if offsets is not None:
lattice.offsets = offsets[:, ::-1, :]
# Add the Lattice to the global dictionary of all Lattices
self.lattices[index] = lattice
if lattice_type == 'hexagonal':
n_rings = self._f['geometry/lattices'][key]['n_rings'].value
n_axial = self._f['geometry/lattices'][key]['n_axial'].value
center = self._f['geometry/lattices'][key]['center'][...]
pitch = self._f['geometry/lattices'][key]['pitch'][...]
outer = self._f['geometry/lattices'][key]['outer'].value
universe_ids = self._f[
'geometry/lattices'][key]['universes'][...]
# Create the Lattice
lattice = openmc.HexLattice(lattice_id=lattice_id, name=name)
lattice.center = center
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void (-22)
if outer != -22:
lattice.outer = self.universes[outer]
# Build array of Universe pointers for the Lattice. Note that
# we need to convert between the HDF5's square array of
# (x, alpha, z) to the Python API's format of a ragged nested
# list of (z, ring, theta).
universes = []
for z in range(n_axial):
# Add a list for this axial level.
universes.append([])
x = n_rings - 1
a = 2*n_rings - 2
for r in range(n_rings - 1, 0, -1):
# Add a list for this ring.
universes[-1].append([])
# Climb down the top-right.
for i in range(r):
universes[-1][-1].append(universe_ids[z, a, x])
x += 1
a -= 1
# Climb down the right.
for i in range(r):
universes[-1][-1].append(universe_ids[z, a, x])
a -= 1
# Climb down the bottom-right.
for i in range(r):
universes[-1][-1].append(universe_ids[z, a, x])
x -= 1
# Climb up the bottom-left.
for i in range(r):
universes[-1][-1].append(universe_ids[z, a, x])
x -= 1
a += 1
# Climb up the left.
for i in range(r):
universes[-1][-1].append(universe_ids[z, a, x])
a += 1
# Climb up the top-left.
for i in range(r):
universes[-1][-1].append(universe_ids[z, a, x])
x += 1
# Move down to the next ring.
a -= 1
# Convert the ids into Universe objects.
universes[-1][-1] = [self.get_universe_by_id(u_id)
for u_id in universes[-1][-1]]
# Handle the degenerate center ring separately.
u_id = universe_ids[z, a, x]
universes[-1].append([self.get_universe_by_id(u_id)])
# Add the universes to the lattice.
if len(pitch) == 2:
# Lattice is 3D
lattice.universes = universes
else:
# Lattice is 2D; extract the only axial level
lattice.universes = universes[0]
if offsets is not None:
lattice.offsets = offsets
# Add the Lattice to the global dictionary of all Lattices
self.lattices[index] = lattice
0
Example 140
Project: volumina Source File: segmentationEdgesItem.py
def arrayToQPath(x, y, connect='all'):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4) 0(i4)
## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect
## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex
## ...
## 0(i4)
##
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
#profiler = debug.Profiler()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])
# write first two integers
#profiler('allocate empty')
byteview = arr.view(dtype=np.ubyte)
byteview[:12] = 0
byteview.data[12:20] = struct.pack('>ii', n, 0)
#profiler('pack header')
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
# decide which points are connected by lines
assert connect == 'pairs', \
"I modified this function and now 'pairs' is the only allowed 'connect' option."
arr[1:-1]['c'][::2] = 1
arr[1:-1]['c'][1::2] = 0
#profiler('fill array')
# write last 0
lastInd = 20*(n+1)
byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)
#profiler('footer')
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
#profiler('create buffer')
ds = QtCore.QDataStream(buf)
def load_path():
ds >> path
#profiler('load')
load_path()
return path
0
Example 141
Project: mne-python Source File: _lead_dots.py
def _fast_sphere_dot_r0(r, rr1_orig, rr2s, lr1, lr2s, cosmags1, cosmags2s,
w1, w2s, volume_integral, lut, n_fact, ch_type):
"""Lead field dot product computation for M/EEG in the sphere model.
Parameters
----------
r : float
The integration radius. It is used to calculate beta as:
beta = (r * r) / (lr1 * lr2).
rr1 : array, shape (n_points x 3)
Normalized position vectors of integrations points in first sensor.
rr2s : list
Normalized position vector of integration points in second sensor.
lr1 : array, shape (n_points x 1)
Magnitude of position vector of integration points in first sensor.
lr2s : list
Magnitude of position vector of integration points in second sensor.
cosmags1 : array, shape (n_points x 1)
Direction of integration points in first sensor.
cosmags2s : list
Direction of integration points in second sensor.
w1 : array, shape (n_points x 1) | None
Weights of integration points in the first sensor.
w2s : list
Weights of integration points in the second sensor.
volume_integral : bool
If True, compute volume integral.
lut : callable
Look-up table for evaluating Legendre polynomials.
n_fact : array
Coefficients in the integration sum.
ch_type : str
The channel type. It can be 'meg' or 'eeg'.
Returns
-------
result : float
The integration sum.
"""
if w1 is None: # operating on surface, treat independently
out_shape = (len(rr2s), len(rr1_orig))
sum_axis = 1 # operate along second axis only at the end
else:
out_shape = (len(rr2s),)
sum_axis = None # operate on flattened array at the end
out = np.empty(out_shape)
rr2 = np.concatenate(rr2s)
lr2 = np.concatenate(lr2s)
cosmags2 = np.concatenate(cosmags2s)
# outer product, sum over coords
ct = np.einsum('ik,jk->ij', rr1_orig, rr2)
# expand axes
rr1 = rr1_orig[:, np.newaxis, :] # (n_rr1, n_rr2, n_coord) e.g. 4x4x3
rr2 = rr2[np.newaxis, :, :]
lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :]
beta = (r * r) / lr1lr2
if ch_type == 'meg':
sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact,
volume_integral)
sums.shape = (4,) + beta.shape
# Accuemulate the result, a little bit streamlined version
# cosmags1 = cosmags1[:, np.newaxis, :]
# cosmags2 = cosmags2[np.newaxis, :, :]
# n1c1 = np.sum(cosmags1 * rr1, axis=2)
# n1c2 = np.sum(cosmags1 * rr2, axis=2)
# n2c1 = np.sum(cosmags2 * rr1, axis=2)
# n2c2 = np.sum(cosmags2 * rr2, axis=2)
# n1n2 = np.sum(cosmags1 * cosmags2, axis=2)
n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1)
n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2)
n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1)
n2c2 = np.einsum('jk,ijk->ij', cosmags2, rr2)
n1n2 = np.einsum('ik,jk->ij', cosmags1, cosmags2)
part1 = ct * n1c1 * n2c2
part2 = n1c1 * n2c1 + n1c2 * n2c2
result = (n1c1 * n2c2 * sums[0] +
(2.0 * part1 - part2) * sums[1] +
(n1n2 + part1 - part2) * sums[2] +
(n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3])
# Give it a finishing touch!
result *= (_meg_const / lr1lr2)
if volume_integral:
result *= r
else: # 'eeg'
result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact)
result.shape = beta.shape
# Give it a finishing touch!
result *= _eeg_const
result /= lr1lr2
# now we add them all up with weights
offset = 0
result *= np.concatenate(w2s)
if w1 is not None:
result *= w1[:, np.newaxis]
for ii, w2 in enumerate(w2s):
out[ii] = np.sum(result[:, offset:offset + len(w2)], axis=sum_axis)
offset += len(w2)
return out
0
Example 142
Project: TSTools Source File: series.py
def _init_images(self, images, date_index=[9, 16], date_format='%Y%j'):
n = len(images)
if n == 0:
raise Exception('Cannot initialize a Series of 0 images')
else:
self.n = n
logger.debug('Trying to initialize a Series of %i images' % self.n)
# Extract images information
_images = np.empty(self.n, dtype=self.images.dtype)
for i, img in enumerate(images):
_images[i]['filename'] = os.path.basename(img)
_images[i]['path'] = img
_images[i]['id'] = os.path.basename(os.path.dirname(img))
try:
date = _images[i]['id'][date_index[0]:date_index[1]]
date = dt.strptime(date, date_format)
except:
try:
date = _images[i]['filename'][date_index[0]:date_index[1]]
date = dt.strptime(date, date_format)
except:
raise Exception(
'Could not parse date from ID or filename '
'(date index=%s:%s, format=%s)\n%s\n%s' %
(date_index[0], date_index[1], date_format,
_images[i]['id'], _images[i]['filename'])
)
_images[i]['date'] = date
_images[i]['ordinal'] = dt.toordinal(_images[i]['date'])
_images[i]['doy'] = int(_images[i]['date'].strftime('%j'))
sort_idx = np.argsort(_images['ordinal'])
_images = _images[sort_idx]
self.images = _images.copy()
# Extract attributes
self.gt = None
self.crs = None
ds = None
for fname in images:
try:
ds = gdal.Open(fname, gdal.GA_ReadOnly)
except:
pass
else:
break
if ds is None:
raise Exception('Could not initialize attributes for %s series: '
'could not open any images in Series with GDAL' %
self.description)
self.band_names = []
for i_b in range(ds.RasterCount):
name = ds.GetRasterBand(i_b + 1).GetDescription()
if not name:
name = 'Band %s' % str(i_b + 1)
self.band_names.append(name)
self.width = ds.RasterXSize
self.height = ds.RasterYSize
self.count = ds.RasterCount
self.dtype = gdal_array.GDALTypeCodeToNumericTypeCode(
ds.GetRasterBand(1).DataType)
self.gt = ds.GetGeoTransform()
self.crs = ds.GetProjection()
0
Example 143
Project: mne-python Source File: utils.py
def _blk_read_lims(start, stop, buf_len):
"""Deal with indexing in the middle of a data block.
Parameters
----------
start : int
Starting index.
stop : int
Ending index (exclusive).
buf_len : int
Buffer size in samples.
Returns
-------
block_start_idx : int
The first block to start reading from.
r_lims : list
The read limits.
d_lims : list
The write limits.
Notes
-----
Consider this example::
>>> start, stop, buf_len = 2, 27, 10
+---------+---------+---------
File structure: | buf0 | buf1 | buf2 |
+---------+---------+---------
File time: 0 10 20 30
+---------+---------+---------
Requested time: 2 27
| |
blockstart blockstop
| |
start stop
We need 27 - 2 = 25 samples (per channel) to store our data, and
we need to read from 3 buffers (30 samples) to get all of our data.
On all reads but the first, the data we read starts at
the first sample of the buffer. On all reads but the last,
the data we read ends on the last sample of the buffer.
We call ``this_data`` the variable that stores the current buffer's data,
and ``data`` the variable that stores the total output.
On the first read, we need to do this::
>>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP
On the second read, we need to do::
>>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP
On the final read, we need to do::
>>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP
This function encapsulates this logic to allow a loop over blocks, where
data is stored using the following limits::
>>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP
""" # noqa: E501
# this is used to deal with indexing in the middle of a sampling period
assert all(isinstance(x, int) for x in (start, stop, buf_len))
block_start_idx = (start // buf_len)
block_start = block_start_idx * buf_len
last_used_samp = stop - 1
block_stop = last_used_samp - last_used_samp % buf_len + buf_len
read_size = block_stop - block_start
n_blk = read_size // buf_len + (read_size % buf_len != 0)
start_offset = start - block_start
end_offset = block_stop - stop
d_lims = np.empty((n_blk, 2), int)
r_lims = np.empty((n_blk, 2), int)
for bi in range(n_blk):
# Triage start (sidx) and end (eidx) indices for
# data (d) and read (r)
if bi == 0:
d_sidx = 0
r_sidx = start_offset
else:
d_sidx = bi * buf_len - start_offset
r_sidx = 0
if bi == n_blk - 1:
d_eidx = stop - start
r_eidx = buf_len - end_offset
else:
d_eidx = (bi + 1) * buf_len - start_offset
r_eidx = buf_len
d_lims[bi] = [d_sidx, d_eidx]
r_lims[bi] = [r_sidx, r_eidx]
return block_start_idx, r_lims, d_lims
0
Example 144
Project: harold Source File: _system_funcs.py
def minimal_realization(A, B, C, mu_tol=1e-9):
"""
Given state matrices :math:`A,B,C` computes minimal state matrices
such that the system is controllable and observable within the
given tolerance :math:`\\mu`.
Implements a basic two pass algorithm :
1- First distance to mode cancellation is computed then also
the Hessenberg form is obtained with the identified o'ble/c'ble
block numbers. If staircase form reports that there are no
cancellations but the distance is less than the tolerance,
distance wins and the respective mode is removed.
Uses ``cancellation_distance()`` and ``staircase()`` for the tests.
Parameters
----------
A,B,C : {(n,n), (n,m), (pxn)} array_like
System matrices to be checked for minimality
mu_tol: float
The sensitivity threshold for the cancellation to be compared
with the first default output of cancellation_distance() function. The
default value is (default value is :math:`10^{-9}`)
Returns
-------
A,B,C : {(k,k), (k,m), (pxk)} array_like
System matrices that are identified as minimal with k states
instead of the original n where (k <= n)
"""
keep_looking = True
run_out_of_states = False
while keep_looking:
n = A.shape[0]
# Make sure that we still have states left
if n == 0:
A , B , C = [(np.empty((1,0)))]*3
break
kc = cancellation_distance(A,B)[0]
ko = cancellation_distance(A.T,C.T)[0]
if min(kc,ko) > mu_tol: # no cancellation
keep_looking= False
else:
Ac,Bc,Cc,blocks_c = staircase(A,B,C,block_indices=True)
Ao,Bo,Co,blocks_o = staircase(A,B,C,form='o',invert=True,
block_indices=True)
# ===============Extra Check============================
"""
Here kc,ko reports a possible cancellation so staircase
should also report fewer than n, c'ble/o'ble blocks in the
decomposition. If not, staircase tol should be increased.
Otherwise either infinite loop or uno'ble branch removes
the system matrices
Thus, we remove the last scalar or the two-by-two block
artificially. Because we trust the cancelling distance,
more than our first born. The possible cases of unc'ble
modes are
-- one real distinct eigenvalue
-- two real identical eigenvalues
-- two complex conjugate eigenvalues
We don't regret this. This is sparta.
"""
# If unobservability distance is closer, let it handle first
if ko>=kc:
if (sum(blocks_c) == n and kc <= mu_tol):
Ac_mod , Bc_mod , Cc_mod , kc_mod = Ac,Bc,Cc,kc
while kc_mod <= mu_tol:# Until cancel dist gets big
Ac_mod,Bc_mod,Cc_mod = (
Ac_mod[:-1,:-1],Bc_mod[:-1,:],Cc_mod[:,:-1])
if Ac_mod.size == 0:
A , B , C = [(np.empty((1,0)))]*3
run_out_of_states = True
break
else:
kc_mod = cancellation_distance(Ac_mod,Bc_mod)[0]
kc = kc_mod
# Fake an iterable to fool the sum below
blocks_c = [sum(blocks_c)-Ac_mod.shape[0]]
# Same with the o'ble modes
if (sum(blocks_o) == n and ko <= mu_tol):
Ao_mod , Bo_mod , Co_mod , ko_mod = Ao,Bo,Co,ko
while ko_mod <= mu_tol:# Until cancel dist gets big
Ao_mod,Bo_mod,Co_mod = (
Ao_mod[1:,1:],Bo_mod[1:,:],Co_mod[:,1:])
# If there is nothing left, break out everything
if Ao_mod.size == 0:
A , B , C = [(np.empty((1,0)))]*3
run_out_of_states = True
break
else:
ko_mod = cancellation_distance(Ao_mod,Bo_mod)[0]
ko = ko_mod
blocks_o = [sum(blocks_o)-Ao_mod.shape[0]]
# ===============End of Extra Check=====================
if run_out_of_states: break
if sum(blocks_c) > sum(blocks_o):
remove_from = 'o'
elif sum(blocks_c) < sum(blocks_o):
remove_from = 'c'
else: # both have the same number of states to be removed
if kc >= ko:
remove_from = 'o'
else:
remove_from = 'c'
if remove_from == 'c':
l = sum(blocks_c)
A , B , C = Ac[:l,:l] , Bc[:l,:] , Cc[:,:l]
else:
l = n - sum(blocks_o)
A , B , C = Ao[l:,l:] , Bo[l:,:] , Co[:,l:]
return A , B, C
0
Example 145
Project: C-PAC Source File: utils.py
def gen_scatterplot(base_dir, map_yaml, img_desc):
'''
Function to generate a scatter plot of all of the images
ran for a given centrality type
Parameters
----------
base_dir : string
filepath to the directory where the output should be written
map_yaml : string
filepath to the mapping dictoinary yaml file between afni
and cpac centrality outputs
img_desc : string
a string describing the type of images being analyzed; this
string will be used to title and name the plot png file
Returns
-------
out_png : string
filepath to the produced output png file
'''
# Import packages
import os
import yaml
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
# Init variables
cpac_bin = np.empty(0)
afni_bin = np.empty(0)
cpac_wght = np.empty(0)
afni_wght = np.empty(0)
map_dict = yaml.load(open(map_yaml, 'r'))
# Extract and build pairwise arrays
for cpac, afni in map_dict.items():
cpac_arr = nib.load(cpac).get_data().flatten()
afni_arr = nib.load(afni).get_data().flatten()
if 'binarize' in cpac:
cpac_bin = np.concatenate((cpac_bin, cpac_arr))
afni_bin = np.concatenate((afni_bin, afni_arr))
else:
cpac_wght = np.concatenate((cpac_wght, cpac_arr))
afni_wght = np.concatenate((afni_wght, afni_arr))
# Get best fit lines and set up equation strs
bin_fit = np.polyfit(cpac_bin, afni_bin, 1)
wght_fit = np.polyfit(cpac_wght, afni_wght, 1)
bin_eq_str = 'y = %.4fx + %.4f' % (bin_fit[0], bin_fit[1])
wght_eq_str = 'y = %.4fx + %.4f' % (wght_fit[0], wght_fit[1])
# Build plot
bin_pts = plt.scatter(cpac_bin, afni_bin, color='b', alpha=0.4,
label='Binarized')
wght_pts = plt.scatter(cpac_wght, afni_wght, color='r', alpha=0.4,
label='Weighted')
plt.legend(handles=[bin_pts, wght_pts])
plt.text(0.25*cpac_bin.max(), 0.75*afni_bin.max(), bin_eq_str, color='b')
plt.text(0.75*cpac_bin.max(), 0.25*afni_bin.max(), wght_eq_str, color='r')
plt.xlabel('C-PAC values')
plt.ylabel('AFNI values')
plt.title('CPAC-AFNI image intensities scatterplot: %s' % img_desc)
plt.grid()
# Save figure
fig = plt.gcf()
fig.set_size_inches(14, 9)
# Output png
out_png = os.path.join(base_dir, img_desc + '_scatter.png')
plt.savefig(out_png, dpi=150)
# Clear and close
plt.clf()
plt.close()
# Return png path
return out_png
0
Example 146
Project: cardoon Source File: sensitivity.py
def create_sensitivity_tape(device, parList, inVec):
"""
Create sensitivity AD tape
Inputs:
device: device with nodal attributes
parList: list of parameters to calculate sensitivities
inVec: input vector including parameters and perhaps nonlinear
control voltages at the end of the vector
Side effects: operating point attributes lost in internal terminals
"""
# Should store some parameter information for safety
#
# Create AD tape -----------------------------------------------------
#
a_inVec = ad.independent(inVec)
# set adouble attributes: a_inVec may be longer than parList but
# zip() truncates to the shortest list
for item, a_val in zip(parList, a_inVec):
setattr(device, item[0], a_val)
# Re-calculate coductances / parameters
linVec = np.empty(shape=0)
device.process_params()
if device.linearVCCS:
gList = []
for vccs in device.linearVCCS:
# Linear output current
g = vccs[2]
# Kludge: Make sure all elements are of the correct type
if type(g) != ad.cppad_.a_float:
g = ad.cppad_.a_float(g)
gList.append(g)
# Overwrite output vector
linVec = np.array(gList)
#import pdb; pdb.set_trace()
# Later add for time- and frequency- domain
if device.isDCSource:
val = device.get_DCsource()
# Kludge: Make sure all elements are of the correct type
if type(val) != ad.cppad_.a_float:
val = ad.cppad_.a_float(val)
valVec = np.array([val])
sourceVec = np.concatenate((linVec, valVec), axis=0)
else:
sourceVec = linVec
if device.isNonlinear:
# calculate nonlinear currents and concatenate to output
(iVec, qVec) = device.eval_cqs(a_inVec[-device.nD_nxin:])
# # Kludge: Make sure all elements are of the correct type (not needed?)
# for k,i in enumerate(iVec):
# if type(i) != ad.cppad_.a_float:
# iVec[k] = ad.cppad_.a_float(i)
a_outVec = np.concatenate((sourceVec, iVec), axis=0)
else:
# Just copy whatever we have
a_outVec = sourceVec
# tape stored in f
f = ad.adfun(a_inVec, a_outVec)
f.optimize()
# Restore element to original state
device.clean_attributes()
device.set_attributes()
device.process_params()
nd.restore_RCnumbers(device)
# End of crate tape ------------------------------------------------
return f
0
Example 147
def __init__(self, x_file_list, y_file_list, dur_file_list=None, n_ins=0, n_outs=0, buffer_size = 500000, sequential=False, network_type=None, shuffle=False):
"""Initialise a data provider
:param x_file_list: list of file names for the input files to DNN
:type x_file_list: python list
:param y_file_list: list of files for the output files to DNN
:param n_ins: the dimensionality for input feature
:param n_outs: the dimensionality for output features
:param buffer_size: the size of the buffer, indicating the number of frames in the buffer. The value depends on the memory size of RAM/GPU.
:param shuffle: True/False. To indicate whether the file list will be shuffled. When loading data block by block, the data in the buffer will be shuffle no matter this value is True or False.
"""
self.logger = logging.getLogger("ListDataProvider")
self.n_ins = n_ins
self.n_outs = n_outs
self.buffer_size = buffer_size
self.sequential = sequential
self.network_type = network_type
#remove potential empty lines and end of line signs
try:
assert len(x_file_list) > 0
except AssertionError:
self.logger.critical('first list is empty')
raise
try:
assert len(y_file_list) > 0
except AssertionError:
self.logger.critical('second list is empty')
raise
try:
assert len(x_file_list) == len(y_file_list)
except AssertionError:
self.logger.critical('two lists are of differing lengths: %d versus %d',len(x_file_list),len(y_file_list))
raise
if dur_file_list:
try:
assert len(x_file_list) == len(dur_file_list)
except AssertionError:
self.logger.critical('two lists are of differing lengths: %d versus %d',len(x_file_list),len(y_file_list))
raise
self.x_files_list = x_file_list
self.y_files_list = y_file_list
self.dur_files_list = dur_file_list
self.logger.debug('first list of items from ...%s to ...%s' % (self.x_files_list[0].rjust(20)[-20:],self.x_files_list[-1].rjust(20)[-20:]) )
self.logger.debug('second list of items from ...%s to ...%s' % (self.y_files_list[0].rjust(20)[-20:],self.y_files_list[-1].rjust(20)[-20:]) )
if shuffle:
random.seed(271638)
random.shuffle(self.x_files_list)
random.seed(271638)
random.shuffle(self.y_files_list)
if self.dur_files_list:
random.seed(271638)
random.shuffle(self.dur_files_list)
self.file_index = 0
self.list_size = len(self.x_files_list)
self.remain_data_x = numpy.empty((0, self.n_ins))
self.remain_data_y = numpy.empty((0, self.n_outs))
self.remain_frame_number = 0
self.end_reading = False
self.logger.debug('initialised')
0
Example 148
def save_as_png(surface, filename, *rect, **kwargs):
alpha = kwargs['alpha']
feedback_cb = kwargs.get('feedback_cb', None)
write_legacy_png = kwargs.get("write_legacy_png", True)
if not rect:
rect = surface.get_bbox()
x, y, w, h = rect
if w == 0 or h == 0:
# workaround to save empty docuements
x, y, w, h = 0, 0, 1, 1
# calculate bounding box in full tiles
render_tx = x/N
render_ty = y/N
render_tw = (x+w-1)/N - render_tx + 1
render_th = (y+h-1)/N - render_ty + 1
# buffer for rendering one tile row at a time
arr = numpy.empty((1*N, render_tw*N, 4), 'uint8') # rgba or rgbu
# view into arr without the horizontal padding
arr_xcrop = arr[:,x-render_tx*N:x-render_tx*N+w,:]
first_row = render_ty
last_row = render_ty+render_th-1
def render_tile_scanlines():
feedback_counter = 0
for ty in range(render_ty, render_ty+render_th):
skip_rendering = False
if kwargs.get('single_tile_pattern', False):
# optimization for simple background patterns (e.g. solid color)
if ty != first_row:
skip_rendering = True
for tx_rel in xrange(render_tw):
# render one tile
dst = arr[:,tx_rel*N:(tx_rel+1)*N,:]
if not skip_rendering:
surface.blit_tile_into(dst, alpha, render_tx+tx_rel, ty)
if feedback_cb and feedback_counter % TILES_PER_CALLBACK == 0:
feedback_cb()
feedback_counter += 1
# yield a numpy array of the scanline without padding
res = arr_xcrop
if ty == last_row:
res = res[:y+h-ty*N,:,:]
if ty == first_row:
res = res[y-render_ty*N:,:,:]
yield res
filename_sys = filename.encode(sys.getfilesystemencoding())
# FIXME: should not do that, should use open(unicode_object)
mypaintlib.save_png_fast_progressive(filename_sys, w, h, alpha,
render_tile_scanlines(),
write_legacy_png)
0
Example 149
Project: CMT Source File: CMT.py
def initialise(self, im_gray0, tl, br):
# Initialise detector, descriptor, matcher
self.detector = cv2.FeatureDetector_create(self.DETECTOR)
self.descriptor = cv2.DescriptorExtractor_create(self.DESCRIPTOR)
self.matcher = cv2.DescriptorMatcher_create(self.MATCHER)
# Get initial keypoints in whole image
keypoints_cv = self.detector.detect(im_gray0)
# Remember keypoints that are in the rectangle as selected keypoints
ind = util.in_rect(keypoints_cv, tl, br)
selected_keypoints_cv = list(itertools.compress(keypoints_cv, ind))
selected_keypoints_cv, self.selected_features = self.descriptor.compute(im_gray0, selected_keypoints_cv)
selected_keypoints = util.keypoints_cv_to_np(selected_keypoints_cv)
num_selected_keypoints = len(selected_keypoints_cv)
if num_selected_keypoints == 0:
raise Exception('No keypoints found in selection')
# Remember keypoints that are not in the rectangle as background keypoints
background_keypoints_cv = list(itertools.compress(keypoints_cv, ~ind))
background_keypoints_cv, background_features = self.descriptor.compute(im_gray0, background_keypoints_cv)
_ = util.keypoints_cv_to_np(background_keypoints_cv)
# Assign each keypoint a class starting from 1, background is 0
self.selected_classes = array(range(num_selected_keypoints)) + 1
background_classes = zeros(len(background_keypoints_cv))
# Stack background features and selected features into database
self.features_database = vstack((background_features, self.selected_features))
# Same for classes
self.database_classes = hstack((background_classes, self.selected_classes))
# Get all distances between selected keypoints in squareform
pdist = scipy.spatial.distance.pdist(selected_keypoints)
self.squareform = scipy.spatial.distance.squareform(pdist)
# Get all angles between selected keypoints
angles = np.empty((num_selected_keypoints, num_selected_keypoints))
for k1, i1 in zip(selected_keypoints, range(num_selected_keypoints)):
for k2, i2 in zip(selected_keypoints, range(num_selected_keypoints)):
# Compute vector from k1 to k2
v = k2 - k1
# Compute angle of this vector with respect to x axis
angle = math.atan2(v[1], v[0])
# Store angle
angles[i1, i2] = angle
self.angles = angles
# Find the center of selected keypoints
center = np.mean(selected_keypoints, axis=0)
# Remember the rectangle coordinates relative to the center
self.center_to_tl = np.array(tl) - center
self.center_to_tr = np.array([br[0], tl[1]]) - center
self.center_to_br = np.array(br) - center
self.center_to_bl = np.array([tl[0], br[1]]) - center
# Calculate springs of each keypoint
self.springs = selected_keypoints - center
# Set start image for tracking
self.im_prev = im_gray0
# Make keypoints 'active' keypoints
self.active_keypoints = np.copy(selected_keypoints)
# Attach class information to active keypoints
self.active_keypoints = hstack((selected_keypoints, self.selected_classes[:, None]))
# Remember number of initial keypoints
self.num_initial_keypoints = len(selected_keypoints_cv)
0
Example 150
Project: LASIF Source File: rotations.py
def get_border_latlng_list(
min_lat, max_lat, min_lng, max_lng, number_of_points_per_side=25,
rotation_axis=(0, 0, 1), rotation_angle_in_degree=0):
"""
Helper function taking a spherical section defined by latitudinal and
longitudal extension, rotate it around the given axis and rotation angle
and return a list of points outlining the region. Useful for plotting.
:param min_lat: The minimum latitude.
:param max_lat: The maximum latitude.
:param min_lng: The minimum longitude.
:param max_lng: The maximum longitude.
:param number_of_points_per_side: The number of points per side desired.
:param rotation_axis: The rotation axis. Optional.
:param rotation_angle_in_degree: The rotation angle in degrees. Optional.
"""
north_border = np.empty((number_of_points_per_side, 2))
south_border = np.empty((number_of_points_per_side, 2))
east_border = np.empty((number_of_points_per_side, 2))
west_border = np.empty((number_of_points_per_side, 2))
north_border[:, 0] = max_lat
north_border[:, 1] = np.linspace(min_lng, max_lng,
number_of_points_per_side)
east_border[:, 0] = np.linspace(max_lat, min_lat,
number_of_points_per_side)
east_border[:, 1] = max_lng
south_border[:, 0] = min_lat
south_border[:, 1] = np.linspace(max_lng, min_lng,
number_of_points_per_side)
west_border[:, 0] = np.linspace(min_lat, max_lat,
number_of_points_per_side)
west_border[:, 1] = min_lng
# Rotate everything.
for border in [north_border, south_border, east_border, west_border]:
for _i in xrange(number_of_points_per_side):
border[_i, 0], border[_i, 1] = rotate_lat_lon(
border[_i, 0], border[_i, 1], rotation_axis,
rotation_angle_in_degree)
# Fix dateline wraparounds.
for border in [north_border, south_border, east_border, west_border]:
lngs = border[:, 1]
lngs[lngs < min_lng] += 360.0
# Take care to only use every corner once.
borders = np.concatenate([north_border, east_border[1:], south_border[1:],
west_border[1:]])
borders = list(borders)
borders = [tuple(_i) for _i in borders]
return borders