Here are the examples of the python api numpy.matlib.repmat taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
70 Examples
3
Source : test_matlib.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_repmat():
a1 = np.arange(4)
x = numpy.matlib.repmat(a1, 2, 2)
y = np.array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
assert_array_equal(x, y)
if __name__ == "__main__":
3
Source : utilities.py
with MIT License
from AIDynamicAction
with MIT License
from AIDynamicAction
def rep_mat(argin, n, m):
"""
Ensures 1D result.
"""
return np.squeeze(repmat(argin, n, m))
def push_vec(matrix, vec):
3
Source : test_matlib.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def test_repmat():
a1 = np.arange(4)
x = numpy.matlib.repmat(a1, 2, 2)
y = np.array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
assert_array_equal(x, y)
3
Source : environments.py
with MIT License
from befelix
with MIT License
from befelix
def _sample_start_state(self, mean=None, std=None, n_samples=1):
""" """
init_std = self.init_std
if not std is None:
init_std = std
init_m = mean
if init_m is None:
init_m = self.init_m
samples = (repmat(init_std, n_samples, 1) * np.random.randn(n_samples, self.n_s)
+ repmat(init_m, n_samples, 1))
return samples.T.squeeze()
def normalize(self, state=None, action=None):
3
Source : ldes.py
with MIT License
from buyizhiyou
with MIT License
from buyizhiyou
def PSR(response,rate):
max_response=np.max(response)
h,w=response.shape
k=4/(h*w)
yy,xx=np.unravel_index(np.argmax(response, axis=None),response.shape)
idx=np.arange(w)-xx
idy=np.arange(h)-yy
idx=repmat(idx,h,1)
idy=repmat(idy,w,1).T
t=idx**2+idy**2
delta=1-np.exp(-k*t.astype(np.float32))
r=(max_response-response)/delta
r[np.isnan(r)]=np.inf
return np.min(r)
def get_center_likelihood(likelihood_map, sz):
3
Source : util.py
with MIT License
from fanglinpu
with MIT License
from fanglinpu
def setNormSkel(self, norm_skel):
if len(norm_skel)%3 != 0:
raise ValueError('invalid length of the skeleton mat')
jntNum = len(norm_skel)/3
self.norm_skel = norm_skel.copy().astype(np.float32)
self.crop_skel = norm_skel.copy()*self.skel_norm_ratio
self.com3D = np.zeros([3])
self.com3D[2] = 200
self.skel = (self.crop_skel + repmat(self.com3D, 1, jntNum))[0]
self.skel = self.skel.astype(np.float32)
def setCropSkel(self, crop_skel):
3
Source : util.py
with MIT License
from fanglinpu
with MIT License
from fanglinpu
def setCropSkel(self, crop_skel):
if len(crop_skel)%3 != 0:
raise ValueError('invalid length of the skeleton mat')
jntNum = len(crop_skel)/3
self.crop_skel = crop_skel.astype(np.float32)
self.skel = (self.crop_skel + repmat(self.com3D, 1, jntNum))[0]
self.skel = self.skel.astype(np.float32)
self.normSkel()
def setSkel(self, skel):
3
Source : util.py
with MIT License
from fanglinpu
with MIT License
from fanglinpu
def setSkel(self, skel):
if len(skel)%3 != 0:
raise ValueError('invalid length of the skeleton mat')
jntNum = len(skel)/3
self.skel = skel.astype(np.float32)
#crop_skel is the training label for neurual network, normalize wrt com3D
self.crop_skel = (self.skel - repmat(self.com3D, 1, jntNum))[0]
self.crop_skel = self.crop_skel.astype(np.float32)
self.normSkel()
def saveAnnotatedSample(self, path):
3
Source : util.py
with MIT License
from fanglinpu
with MIT License
from fanglinpu
def crop2D(self):
self.crop_skel = self.norm_skel * np.float32(50.0)
skel = self.crop_skel.copy()
jntNum = len(skel)/3
skel = skel.reshape(-1, 3)
skel += repmat(self.com3D, jntNum, 1)
for i, jnt in enumerate(skel):
jnt = Camera.to2D(jnt)
pt = np.array([jnt[0],jnt[1], 1.0], np.float32).reshape(3,1)
pt = self.trans*pt
skel[i,0], skel[i,1] = pt[0], pt[1]
return skel
def full2D(self):
3
Source : utils.py
with MIT License
from int-brain-lab
with MIT License
from int-brain-lab
def raised_cosine(duration, nbases, binfun):
nbins = binfun(duration)
ttb = repmat(np.arange(1, nbins + 1).reshape(-1, 1), 1, nbases)
dbcenter = nbins / nbases
cwidth = 4 * dbcenter
bcenters = 0.5 * dbcenter + dbcenter * np.arange(0, nbases)
x = ttb - repmat(bcenters.reshape(1, -1), nbins, 1)
bases = (np.abs(x / cwidth) < 0.5) * (np.cos(x * np.pi * 2 / cwidth) * 0.5 + 0.5)
return bases
def full_rcos(duration, nbases, binfun, n_before=1):
3
Source : utils.py
with MIT License
from int-brain-lab
with MIT License
from int-brain-lab
def full_rcos(duration, nbases, binfun, n_before=1):
if not isinstance(n_before, int):
n_before = int(n_before)
nbins = binfun(duration)
ttb = repmat(np.arange(1, nbins + 1).reshape(-1, 1), 1, nbases)
dbcenter = nbins / (nbases - 2)
cwidth = 4 * dbcenter
bcenters = 0.5 * dbcenter + dbcenter * np.arange(-n_before, nbases - n_before)
x = ttb - repmat(bcenters.reshape(1, -1), nbins, 1)
bases = (np.abs(x / cwidth) < 0.5) * (np.cos(x * np.pi * 2 / cwidth) * 0.5 + 0.5)
return bases
def neglog(weights, x, y):
3
Source : m6plot.py
with Apache License 2.0
from NCAR
with Apache License 2.0
from NCAR
def yzWeight(y, z):
"""
Calculates the weights to use when calculating the statistics of a y-z section.
y(nj+1) is a 1D vector of column edge positions and z(nk+1,nj) is the interface
elevations of each column. Returns weight(nk,nj).
"""
dz = z[:-1,:] - z[1:,:]
return numpy.matlib.repmat(y[1:] - y[:-1], dz.shape[0], 1) * dz
def dunne_rainbow(N=256):
3
Source : gprpyTools.py
with MIT License
from NSGeophysics
with MIT License
from NSGeophysics
def tpowGain(data,twtt,power):
'''
Apply a t-power gain to each trace with the given exponent.
INPUT:
data data matrix whose columns contain the traces
twtt two-way travel time values for the rows in data
power exponent
OUTPUT:
newdata data matrix after t-power gain
'''
factor = np.reshape(twtt**(float(power)),(len(twtt),1))
factmat = matlib.repmat(factor,1,data.shape[1])
return np.multiply(data,factmat)
def agcGain(data,window):
3
Source : SSFstatic.py
with MIT License
from Rassibassi
with MIT License
from Rassibassi
def randomizeSteps(stepSizes, spanLength, nSpans, sigma=0.01):
stepSizes = matlib.repmat(stepSizes, nSpans, 1)
stepSizes = stepSizes + np.random.normal(0, sigma*stepSizes[0,0], size=stepSizes.shape)
stepSizes[:,-1] = stepSizes[:,-1] - (np.sum( stepSizes, axis=1 ) - spanLength)
return stepSizes
def defaultParameters(D=16.4640, Fc=1.9341e+14, precision='double'):
3
Source : fxpefac.py
with GNU General Public License v3.0
from uarif1
with GNU General Public License v3.0
from uarif1
def smooth(x, n):
# snx = x.shape[1]
nf = x.shape[0]
c = np.cumsum(x, 1)
y = np.hstack((
c[:, np.arange(0, n+1, 2)]/repmat(np.arange(1, n+1, 2), nf, 1),
(c[:, n:]-c[:, :-n])/n,
(repmat(c[:, -1], 1, int(np.floor(n/2)))-c[:, np.arange(-1-n+2, -1, 2)])
/ repmat(np.arange(n-2, 0, -2), nf, 1)))
return y
def timesm(x, n):
3
Source : fxpefac.py
with GNU General Public License v3.0
from uarif1
with GNU General Public License v3.0
from uarif1
def timesm(x, n):
if not np.mod(n, 2):
n += 1
nx = x.shape[1]
# nf = x.shape[0]
c = np.cumsum(x, 0)
n = int(n)
mid = int(np.round(n/2))
y = np.vstack((c[mid:n, :]/repmat(np.arange(mid+1, n+1)[np.newaxis].T, 1, nx),
(c[n:, :]-c[: -n, :])/n,
(repmat(c[-1, :], mid, 1) - c[-1-n+1: -1-mid, :])
/ repmat(np.arange(n-1, mid, -1)[np.newaxis].T, 1, nx)))
return y
3
Source : rrt_connect3D.py
with MIT License
from zhm-real
with MIT License
from zhm-real
def NEAREST_NEIGHBOR(self, q, tree):
# find the nearest neighbor in the tree
V = np.array(tree.V)
if len(V) == 1:
return V[0]
xr = repmat(q, len(V), 1)
dists = np.linalg.norm(xr - V, axis=1)
return tuple(tree.V[np.argmin(dists)])
def RANDOM_CONFIG(self):
3
Source : utils3D.py
with MIT License
from zhm-real
with MIT License
from zhm-real
def nearest(initparams, x, isset=False):
V = np.array(initparams.V)
if initparams.i == 0:
return initparams.V[0]
xr = repmat(x, len(V), 1)
dists = np.linalg.norm(xr - V, axis=1)
return tuple(initparams.V[np.argmin(dists)])
def near(initparams, x):
3
Source : utils3D.py
with MIT License
from zhm-real
with MIT License
from zhm-real
def near(initparams, x):
# s = np.array(s)
V = np.array(initparams.V)
if initparams.i == 0:
return [initparams.V[0]]
cardV = len(initparams.V)
eta = initparams.eta
gamma = initparams.gamma
# min{γRRT∗ (log(card (V ))/ card (V ))1/d, η}
r = min(gamma * ((np.log(cardV) / cardV) ** (1/3)), eta)
if initparams.done:
r = 1
xr = repmat(x, len(V), 1)
inside = np.linalg.norm(xr - V, axis=1) < r
nearpoints = V[inside]
return np.array(nearpoints)
def steer(initparams, x, y, DIST=False):
0
Source : SVMatrixGenerator.py
with BSD 2-Clause "Simplified" License
from AlexandrovLab
with BSD 2-Clause "Simplified" License
from AlexandrovLab
def extract_kat_regions(res,imd,subs,kmin_samples,pvalue_thresh,rate_factor_thresh,doMerging,kmin_filter,bp_rate):
segInterDist = res['yhat']
kataegis_threshold = imd
kat_regions_all = pd.DataFrame()
positions = subs['pos']
katLoci = segInterDist < = kataegis_threshold # flag specifying if a point is in a peak
if sum(katLoci>0):
start_regions= np.asarray(np.where(katLoci[1:] & ~(katLoci[:-1]) | ((katLoci[1:] & (katLoci[:-1])) & (segInterDist[1:] != segInterDist[:len(katLoci)-1])) ))[0]+1
if katLoci[0]:
start_regions = np.hstack( (0 ,start_regions))
end_regions = np.asarray(np.where(~katLoci[1:] & (katLoci[:-1]) | ((katLoci[1:] & (katLoci[:-1])) & (segInterDist[1:] != segInterDist[:-1])) ))[0]
if katLoci[-1]:
end_regions = np.hstack( (end_regions,len(katLoci)-1))
#handling Special cases
if (len(end_regions)+len(start_regions)>0): # if there are any discontinuities in the segmentation at all
if (len(end_regions)==1) & (len(start_regions)==0):
start_regions = 0
elif (len(end_regions)==0) & (len(start_regions)==1):
end_regions = len(positions)-1
elif ((end_regions[0] < start_regions[0]) & (start_regions[-1]> end_regions[-1])):
start_regions = np.hstack( (0 ,start_regions))
end_regions = np.hstack( (end_regions,len(positions)-1))
elif (end_regions[0] < start_regions[0]):
# starts will be one shorter
start_regions = np.hstack( (0 ,start_regions))
elif (start_regions[-1] > end_regions[-1]):
end_regions = np.hstack( (end_regions,len(positions)-1))
# prepare a data structure that will be later filled up
columnslist=['chr','start_bp','end_bp','length_bp','number_bps','number_bps_clustered','avgDist_bp','no_samples','no_del','no_dup','no_inv','np_trn','firstBp','lastBp']
temp = matlib.repmat(np.nan,len(start_regions),len(columnslist))
kat_regions_all = pd.DataFrame(temp,columns=columnslist)
kat_regions_all['chr'] = subs['chr'][subs['chr'].index[0]]
kat_regions_all['firstBp'] = start_regions
kat_regions_all['lastBp'] = end_regions
#print('intermittent= ', time.time()-t)
#pdb.set_trace()
#t = time.time()
kat_regions_all= hotspotInfo2(kat_regions_all,subs,segInterDist)
#print('hotspot1= ', time.time()-t)
#pdb.set_trace()
step_segInterDist_left = [np.nan] * len(segInterDist)
step_segInterDist_left[1: len(segInterDist)] = segInterDist[1:len(segInterDist)] - segInterDist[0:len(segInterDist)-1]
step_segInterDist_right = [np.nan] * len(segInterDist)
step_segInterDist_right[0: len(segInterDist)-1] = segInterDist[0: len(segInterDist)-1] - segInterDist[1:len(segInterDist)]
kat_regions_all['step_left'] = list(step_segInterDist_left[i] for i in start_regions)
kat_regions_all['step_right'] = list(step_segInterDist_right[i] for i in end_regions)
# run the filters on the regions of increased frequency
# make sure there are at least kmin samples
#t =time.time()
if (not kat_regions_all.empty) & (len(kat_regions_all)>0) :
kat_regions_all = kat_regions_all[kat_regions_all['no_samples'] >= kmin_samples]
# make sure there are at least kmin.filter breakpoints
if (not np.isnan(kmin_filter)):
kat_regions_all = kat_regions_all[kat_regions_all['number_bps'] >= kmin_filter]
if (not kat_regions_all.empty) & (len(kat_regions_all)>0) :
kat_regions_all = assignPvalues(kat_regions_all,subs,bp_rate)
kat_regions_all = kat_regions_all[kat_regions_all['pvalue'] < = pvalue_thresh]
kat_regions_all = kat_regions_all[kat_regions_all['rate_factor']>= rate_factor_thresh]
# merge segments if both were found to be peaks
kat_regions_all=kat_regions_all.reset_index(drop=True)
if (doMerging):
if len(kat_regions_all)>1:
for r in range(1,len(kat_regions_all)):
if kat_regions_all['lastBp'][r-1] == kat_regions_all['firstBp'][r] -1 :
# merge two segments
kat_regions_all['firstBp'][r] = kat_regions_all['firstBp'][r-1]
kat_regions_all['firstBp'][r-1] = np.nan
kat_regions_all['lastBp'][r-1] = np.nan
kat_regions_all['avgDist_bp'][r] = np.nan # this will need to be updated as segments are being merged
# remove some of the merged segments
columns_backup = kat_regions_all.columns.to_list()
kat_regions_all = kat_regions_all[ list(not( np.isnan(kat_regions_all['firstBp'].values[i])) and not np.isnan(kat_regions_all['lastBp'].values[i]) for i in range(len(kat_regions_all)))]
if kat_regions_all.empty:
kat_regions_all = pd.DataFrame(columns=columns_backup)
kat_regions_all = hotspotInfo2(kat_regions_all, subs, segInterDist)
kat_regions_all = assignPvalues(kat_regions_all, subs, bp_rate)
return kat_regions_all
#######################################################
def annotateBedpe(sv_bedpe):
0
Source : gen_syn.py
with MIT License
from amber0309
with MIT License
from amber0309
def gen_randD(n_mech, N):
"""
generate a random data set
n_mech - number of underlying mechanisms
N - number of pts from each mechanism
"""
model_ind = numpy.matlib.repmat([1, 2, 3, 4, 5], n_mech, 1)
model_ind = row_permute_independently(model_ind)
D = []
label_true = []
for i in range(0, n_mech):
func = model_ind[:,i].tolist()
func = func + [0, N]
D.append(gen_each(func))
label_true = label_true + [i] * N
D = np.concatenate(D, axis = 0)
label_true = np.asarray(label_true)
return D, label_true
def gen_D(inlist):
0
Source : DDRTree_py.py
with BSD 3-Clause "New" or "Revised" License
from aristoteleo
with BSD 3-Clause "New" or "Revised" License
from aristoteleo
def sqdist(a, b):
"""calculate the square distance between a, b
Arguments
---------
a: 'np.ndarray'
A matrix with :math:`D \times N` dimension
b: 'np.ndarray'
A matrix with :math:`D \times N` dimension
Returns
-------
dist: 'np.ndarray'
A numeric value for the different between a and b
"""
aa = np.sum(a ** 2, axis=0)
bb = np.sum(b ** 2, axis=0)
ab = a.T.dot(b)
aa_repmat = matlib.repmat(aa[:, None], 1, b.shape[1])
bb_repmat = matlib.repmat(bb[None, :], a.shape[1], 1)
dist = abs(aa_repmat + bb_repmat - 2 * ab)
return dist
def repmat(X, m, n):
0
Source : DDRTree_py.py
with BSD 3-Clause "New" or "Revised" License
from aristoteleo
with BSD 3-Clause "New" or "Revised" License
from aristoteleo
def repmat(X, m, n):
"""This function returns an array containing m (n) copies of A in the row (column) dimensions. The size of B is
size(A)*n when A is a matrix.For example, repmat(np.matrix(1:4), 2, 3) returns a 4-by-6 matrix.
Arguments
---------
X: 'np.ndarray'
An array like matrix.
m: 'int'
Number of copies on row dimension
n: 'int'
Number of copies on column dimension
Returns
-------
xy_rep: 'np.ndarray'
A matrix of repmat
"""
xy_rep = matlib.repmat(X, m, n)
return xy_rep
def eye(m, n):
0
Source : scVectorField.py
with BSD 3-Clause "New" or "Revised" License
from aristoteleo
with BSD 3-Clause "New" or "Revised" License
from aristoteleo
def SparseVFC(
X: np.ndarray,
Y: np.ndarray,
Grid: np.ndarray,
M: int = 100,
a: float = 5,
beta: float = None,
ecr: float = 1e-5,
gamma: float = 0.9,
lambda_: float = 3,
minP: float = 1e-5,
MaxIter: int = 500,
theta: float = 0.75,
div_cur_free_kernels: bool = False,
velocity_based_sampling: bool = True,
sigma: float = 0.8,
eta: float = 0.5,
seed=0,
lstsq_method: str = "drouin",
verbose: int = 1,
) -> dict:
"""Apply sparseVFC (vector field consensus) algorithm to learn a functional form of the vector field from random
samples with outlier on the entire space robustly and efficiently. (Ma, Jiayi, etc. al, Pattern Recognition, 2013)
Arguments
---------
X: 'np.ndarray'
Current state. This corresponds to, for example, the spliced transcriptomic state.
Y: 'np.ndarray'
Velocity estimates in delta t. This corresponds to, for example, the inferred spliced transcriptomic
velocity or total RNA velocity based on metabolic labeling data estimated calculated by dynamo.
Grid: 'np.ndarray'
Current state on a grid which is often used to visualize the vector field. This corresponds to, for example,
the spliced transcriptomic state or total RNA state.
M: 'int' (default: 100)
The number of basis functions to approximate the vector field.
a: 'float' (default: 10)
Parameter of the model of outliers. We assume the outliers obey uniform distribution, and the volume of
outlier's variation space is `a`.
beta: 'float' (default: 0.1)
Parameter of Gaussian Kernel, k(x, y) = exp(-beta*||x-y||^2).
If None, a rule-of-thumb bandwidth will be computed automatically.
ecr: 'float' (default: 1e-5)
The minimum limitation of energy change rate in the iteration process.
gamma: 'float' (default: 0.9)
Percentage of inliers in the samples. This is an initial value for EM iteration, and it is not important.
lambda_: 'float' (default: 0.3)
Represents the trade-off between the goodness of data fit and regularization. Larger Lambda_ put more
weights on regularization.
minP: 'float' (default: 1e-5)
The posterior probability Matrix P may be singular for matrix inversion. We set the minimum value of P as
minP.
MaxIter: 'int' (default: 500)
Maximum iteration times.
theta: 'float' (default: 0.75)
Define how could be an inlier. If the posterior probability of a sample is an inlier is larger than theta,
then it is regarded as an inlier.
div_cur_free_kernels: `bool` (default: False)
A logic flag to determine whether the divergence-free or curl-free kernels will be used for learning the
vector field.
sigma: 'int' (default: `0.8`)
Bandwidth parameter.
eta: 'int' (default: `0.5`)
Combination coefficient for the divergence-free or the curl-free kernels.
seed : int or 1-d array_like, optional (default: `0`)
Seed for RandomState. Must be convertible to 32 bit unsigned integers. Used in sampling control points.
Default is to be 0 for ensure consistency between different runs.
lstsq_method: 'str' (default: `drouin`)
The name of the linear least square solver, can be either 'scipy` or `douin`.
verbose: `int` (default: `1`)
The level of printing running information.
Returns
-------
VecFld: 'dict'
A dictionary which contains:
X: Current state.
valid_ind: The indices of cells that have finite velocity values.
X_ctrl: Sample control points of current state.
ctrl_idx: Indices for the sampled control points.
Y: Velocity estimates in delta t.
beta: Parameter of the Gaussian Kernel for the kernel matrix (Gram matrix).
V: Prediction of velocity of X.
C: Finite set of the coefficients for the
P: Posterior probability Matrix of inliers.
VFCIndex: Indexes of inliers found by sparseVFC.
sigma2: Energy change rate.
grid: Grid of current state.
grid_V: Prediction of velocity of the grid.
iteration: Number of the last iteration.
tecr_vec: Vector of relative energy changes rate comparing to previous step.
E_traj: Vector of energy at each iteration,
where V = f(X), P is the posterior probability and VFCIndex is the indexes of inliers found by sparseVFC.
Note that V = `con_K(Grid, X_ctrl, beta).dot(C)` gives the prediction of velocity on Grid (but can also be any
point in the gene expression state space).
"""
logger = LoggerManager.gen_logger("SparseVFC")
temp_logger = LoggerManager.get_temp_timer_logger()
logger.info("[SparseVFC] begins...")
logger.log_time()
need_utility_time_measure = verbose > 1
X_ori, Y_ori = X.copy(), Y.copy()
valid_ind = np.where(np.isfinite(Y.sum(1)))[0]
X, Y = X[valid_ind], Y[valid_ind]
N, D = Y.shape
grid_U = None
# Construct kernel matrix K
tmp_X, uid = np.unique(X, axis=0, return_index=True) # return unique rows
M = min(M, tmp_X.shape[0])
if velocity_based_sampling:
logger.info("Sampling control points based on data velocity magnitude...")
idx = sample_by_velocity(Y[uid], M, seed=seed)
else:
idx = np.random.RandomState(seed=seed).permutation(tmp_X.shape[0]) # rand select some initial points
idx = idx[range(M)]
ctrl_pts = tmp_X[idx, :]
if beta is None:
h = bandwidth_selector(ctrl_pts)
beta = 1 / h ** 2
K = (
con_K(ctrl_pts, ctrl_pts, beta, timeit=need_utility_time_measure)
if div_cur_free_kernels is False
else con_K_div_cur_free(ctrl_pts, ctrl_pts, sigma, eta, timeit=need_utility_time_measure)[0]
)
U = (
con_K(X, ctrl_pts, beta, timeit=need_utility_time_measure)
if div_cur_free_kernels is False
else con_K_div_cur_free(X, ctrl_pts, sigma, eta, timeit=need_utility_time_measure)[0]
)
if Grid is not None:
grid_U = (
con_K(Grid, ctrl_pts, beta, timeit=need_utility_time_measure)
if div_cur_free_kernels is False
else con_K_div_cur_free(Grid, ctrl_pts, sigma, eta, timeit=need_utility_time_measure)[0]
)
M = ctrl_pts.shape[0] * D if div_cur_free_kernels else ctrl_pts.shape[0]
if div_cur_free_kernels:
X = X.flatten()[:, None]
Y = Y.flatten()[:, None]
# Initialization
V = X.copy() if div_cur_free_kernels else np.zeros((N, D))
C = np.zeros((M, 1)) if div_cur_free_kernels else np.zeros((M, D))
i, tecr, E = 0, 1, 1
# test this
sigma2 = sum(sum((Y - X) ** 2)) / (N * D) if div_cur_free_kernels else sum(sum((Y - V) ** 2)) / (N * D)
sigma2 = 1e-7 if sigma2 < 1e-8 else sigma2
tecr_vec = np.ones(MaxIter) * np.nan
E_vec = np.ones(MaxIter) * np.nan
P = None
while i < MaxIter and tecr > ecr and sigma2 > 1e-8:
# E_step
E_old = E
P, E = get_P(Y, V, sigma2, gamma, a, div_cur_free_kernels)
E = E + lambda_ / 2 * np.trace(C.T.dot(K).dot(C))
E_vec[i] = E
tecr = abs((E - E_old) / E)
tecr_vec[i] = tecr
# logger.report_progress(count=i, total=MaxIter, progress_name="E-step iteration")
if need_utility_time_measure:
logger.info(
"iterate: %d, gamma: %.3f, energy change rate: %s, sigma2=%s"
% (i, gamma, scinot(tecr, 3), scinot(sigma2, 3))
)
# M-step. Solve linear system for C.
temp_logger.log_time()
P = np.maximum(P, minP)
if div_cur_free_kernels:
P = np.kron(P, np.ones((int(U.shape[0] / P.shape[0]), 1))) # np.kron(P, np.ones((D, 1)))
lhs = (U.T * np.matlib.tile(P.T, [M, 1])).dot(U) + lambda_ * sigma2 * K
rhs = (U.T * np.matlib.tile(P.T, [M, 1])).dot(Y)
else:
UP = U.T * numpy.matlib.repmat(P.T, M, 1)
lhs = UP.dot(U) + lambda_ * sigma2 * K
rhs = UP.dot(Y)
if need_utility_time_measure:
temp_logger.finish_progress(progress_name="computing lhs and rhs")
temp_logger.log_time()
C = lstsq_solver(lhs, rhs, method=lstsq_method, timeit=need_utility_time_measure)
# Update V and sigma**2
V = U.dot(C)
Sp = sum(P) / 2 if div_cur_free_kernels else sum(P)
sigma2 = (sum(P.T.dot(np.sum((Y - V) ** 2, 1))) / np.dot(Sp, D))[0]
# Update gamma
numcorr = len(np.where(P > theta)[0])
gamma = numcorr / X.shape[0]
if gamma > 0.95:
gamma = 0.95
elif gamma < 0.05:
gamma = 0.05
i += 1
if i == 0 and not (tecr > ecr and sigma2 > 1e-8):
raise Exception(
"please check your input parameters, "
f"tecr: {tecr}, ecr {ecr} and sigma2 {sigma2},"
f"tecr must larger than ecr and sigma2 must larger than 1e-8"
)
grid_V = None
if Grid is not None:
grid_V = np.dot(grid_U, C)
VecFld = {
"X": X_ori,
"valid_ind": valid_ind,
"X_ctrl": ctrl_pts,
"ctrl_idx": idx,
"Y": Y_ori,
"beta": beta,
"V": V.reshape((N, D)) if div_cur_free_kernels else V,
"C": C,
"P": P,
"VFCIndex": np.where(P > theta)[0],
"sigma2": sigma2,
"grid": Grid,
"grid_V": grid_V,
"iteration": i - 1,
"tecr_traj": tecr_vec[:i],
"E_traj": E_vec[:i],
}
if div_cur_free_kernels:
VecFld["div_cur_free_kernels"], VecFld["sigma"], VecFld["eta"] = (
True,
sigma,
eta,
)
temp_logger.log_time()
(
_,
VecFld["df_kernel"],
VecFld["cf_kernel"],
) = con_K_div_cur_free(X, ctrl_pts, sigma, eta, timeit=need_utility_time_measure)
temp_logger.finish_progress(progress_name="con_K_div_cur_free")
logger.finish_progress(progress_name="SparseVFC")
return VecFld
class BaseVectorField:
0
Source : sampling_models.py
with MIT License
from befelix
with MIT License
from befelix
def sample_n_step(self, x0, K, k, n=1, n_samples=1000):
""" Sample from the distribution of the GP system of n steps
Args:
x0 (numpy.ndarray[float]): vector of shape n_s x 1 representing the
initial (deterministic) state of the system
K (numpy.ndarray[float]): array of shape n x n_u x n_s; the state feedback
linear controller per time step.
k (numpy.ndarray[float]) array of shape n x n_u; the feed-forward
control modifications per time step.
n (int, optional): number of time steps to propagate forward
n_samples (int, optional): number of samples to propagate through
the system.
Returns:
S (numpy.ndarray[float]): n_samples x n_s samples from the pdf of
the n-step ahead propagation of the system
S_all (numpy.ndarray[float]): n x n_samples x n_s samples for all
intermediate states of the system in the n-step ahead propagation
of the system.
"""
assert n > 0, "The time horizon n for the multi-step sampling must be positive!"
assert np.shape(K) == (
n, self.n_u, self.n_s), "Required shape of K is ({},{},{})".format(n,
self.n_u,
self.n_s)
assert np.shape(k) == (n, self.n_u), "Required shape of k is ({},{})".format(n,
self.n_u)
S_all = np.empty((n, n_samples, self.n_s)) # The samples for each time step
# Get samples from the predictive distribution for the first time step
u0 = np.dot(K[0], x0) + k[0, :, None]
inp0 = np.vstack((x0, u0)).T
S = self.GP.sample_from_gp(inp0, size=n_samples).reshape((n_samples, self.n_s))
S_all[0] = S
# Sample from other time steps (if n > 1).
for i in range(1, n):
U = np.dot(S, K[i].T) + repmat(k[i, :, None], n_samples, 1)
inp = np.hstack((S, U))
S = self.GP.sample_from_gp(inp, size=1).reshape((n_samples, self.n_s))
S_all[i] = S
return S.squeeze(), S_all
def inside_ellipsoid_ratio(self, S, Q, p):
0
Source : utils.py
with MIT License
from befelix
with MIT License
from befelix
def sample_inside_polytope(x, a, b):
"""
for a set of samples x = [x_1,..,x_k]^T
check sample_wise
Ax_i \leq b , i=1,..,k
x: k x n np.ndarray[float]
The samples (k samples of dimensionality n)
a: m x n np.ndarray[float]
the matrix of the linear inequality
b: m x 1 np.ndarray[float]
the vector of the linear inequality
"""
k, _ = x.shape
c = np.dot(a, x.T) - repmat(b, 1, k)
return np.all(c < 0, axis=0).squeeze()
def feedback_ctrl(x, k_ff, k_fb=None, p=None):
0
Source : panorama.py
with MIT License
from bertjiazheng
with MIT License
from bertjiazheng
def lineFromTwoPoint(pt1, pt2):
"""
Generate line segment based on two points on panorama
pt1, pt2: two points on panorama
line:
1~3-th dim: normal of the line
4-th dim: the projection dimension ID
5~6-th dim: the u of line segment endpoints in projection plane
"""
numLine = pt1.shape[0]
lines = np.zeros((numLine, 6))
n = np.cross(pt1, pt2)
n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)
lines[:, 0:3] = n
areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))
areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))
areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))
planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1
lines[:, 3] = planeIDs
for i in range(numLine):
uv = xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])
umax = uv[:, 0].max() + np.pi
umin = uv[:, 0].min() + np.pi
if umax - umin > np.pi:
lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
else:
lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi
return lines
def lineIdxFromCors(cor_all, im_w, im_h):
0
Source : optspace.py
with BSD 3-Clause "New" or "Revised" License
from biocore
with BSD 3-Clause "New" or "Revised" License
from biocore
def grassmann_manifold_two(U, step_size, n_components):
"""
The second Grassmann Manifold
Parameters
----------
U, step_size, n_components
"""
# get the step from the manifold
step = np.sum(U**2, axis=1) / (2 * step_size * n_components)
step = 2 * np.multiply(np.exp((step - 1)**2), (step - 1))
step[step < 0] = 0
step = step.reshape(len(step), 1)
step = np.multiply(U, repmat(step, 1, n_components)) / \
(step_size * n_components)
return step
def rank_estimate(obs, eps, k=20, lam=0.05,
0
Source : dataset.py
with Apache License 2.0
from chrhenning
with Apache License 2.0
from chrhenning
def _to_one_hot(self, labels, reverse=False):
""" Transform a list of labels into a 1-hot encoding.
Args:
labels: A list of class labels.
reverse: If true, then one-hot encoded samples are transformed back
to categorical labels.
Returns:
The 1-hot encoded labels.
"""
if not self.classification:
raise RuntimeError('This method can only be called for ' +
'classification datasets.')
# Initialize encoder.
if self._one_hot_encoder is None:
self._one_hot_encoder = OneHotEncoder( \
categories=[range(self.num_classes)])
num_time_steps = 1
if self.sequence:
num_time_steps = labels.shape[1] // np.prod(self.out_shape)
self._one_hot_encoder.fit(npm.repmat(
np.arange(self.num_classes), num_time_steps, 1).T)
if reverse:
# Unfortunately, there is no inverse function in the OneHotEncoder
# class. Therefore, we take the one-hot-encoded "labels" samples
# and take the indices of all 1 entries. Note, that these indices
# are returned as tuples, where the second column contains the
# original column indices. These column indices from "labels"
# mudolo the number of classes results in the original labels.
return np.reshape(np.argwhere(labels)[:,1] % self.num_classes,
(labels.shape[0], -1))
else:
return self._one_hot_encoder.transform(labels).toarray()
@staticmethod
0
Source : dataset.py
with Apache License 2.0
from chrhenning
with Apache License 2.0
from chrhenning
def _to_one_hot(self, labels, reverse=False):
""" Transform a list of labels into a 1-hot encoding.
Args:
labels: A list of class labels.
reverse: If ``True``, then one-hot encoded samples are transformed
back to categorical labels.
Returns:
The 1-hot encoded labels.
"""
if not self.classification:
raise RuntimeError('This method can only be called for ' +
'classification datasets.')
# Initialize encoder.
if self._one_hot_encoder is None:
self._one_hot_encoder = OneHotEncoder( \
categories=[range(self.num_classes)])
self._one_hot_encoder.fit(npm.repmat(
np.arange(self.num_classes), 1, 1).T)
if reverse:
# Unfortunately, there is no inverse function in the OneHotEncoder
# class. Therefore, we take the one-hot-encoded "labels" samples
# and take the indices of all 1 entries. Note, that these indices
# are returned as tuples, where the second column contains the
# original column indices. These column indices from "labels"
# mudolo the number of classes results in the original labels.
return np.reshape(np.argwhere(labels)[:,1] % self.num_classes,
(labels.shape[0], -1))
else:
if self.sequence:
assert len(self.out_shape) == 1
num_time_steps = labels.shape[1] # // 1
n_samples, _ = labels.shape
labels = labels.reshape(n_samples * num_time_steps, 1)
labels = self._one_hot_encoder.transform(labels).toarray()
labels = labels.reshape(n_samples,
num_time_steps * self.num_classes)
return labels
else:
return self._one_hot_encoder.transform(labels).toarray()
@staticmethod
0
Source : seq_smnist.py
with Apache License 2.0
from chrhenning
with Apache License 2.0
from chrhenning
def _generate_data(self, x_data, y_data, max_seq_len, digits, seq_len,
n_samples, use_one_hot, class_partition,
upsample_control):
"""Generates data for a single sequence stroke MNIST task with
specified length and digits to use.
Args:
x_data (list): Original stroke mnist input data, with every list
entry being a numpy.ndarray of shape ``[4, stroke_seq_len]``
y_data (list): Original stroke mnist labels, with every list
entry being a numpy.ndarray of shape ``[10]``
max_seq_len (int): The maximum length of a sequence (i.e. number of
timesteps of a sample)
digits (tuple): The two digits used to build the sequence
seq_len (int): The length of the sequence of digits to build
n_samples (int): The number of samples that should be generated
use_one_hot (bool): Whether or not to use one hot encodings
class_partition (list): If sequences should be grouped into 2
different classes this list specifies the class partition.
upsample_control (bool): See constructor docstring.
Returns:
(tuple): Tuple containing:
- **in_data** (numpy.ndarray): Numpy array of shape
``[n_samples, max_num_time_steps * 4]``.
- **out_data** (numpy.ndarray): Numpy array of shape
``[n_samples, max_num_time_steps]``.
- **sample_lengths** (numpy.ndarray): The original unpadded sequence
lengths.
"""
# modify seq_len in case we do upsampling control
if upsample_control:
upsample_factor = seq_len
seq_len = 1
if not self.two_class:
raise NotImplementedError()
# construct all possible classes
classes = ["".join(seq) for seq in \
itertools.product("01", repeat=seq_len)]
# get the right number of samples per class to get a balanced data set
# with the desired n_samples.
num = n_samples
div = len(classes)
n_samples_per_class = [num // div + (1 if x < num % div else 0) \
for x in range (div)]
# find indices of samples with the wanted digit class
y_data = [np.argmax(y) for y in y_data]
digit_idx = []
digit_idx.append(np.where(np.asarray(y_data) == digits[0])[0])
digit_idx.append(np.where(np.asarray(y_data) == digits[1])[0])
# generate samples for every class
samples = []
labels = []
for i,c in enumerate(classes):
this_label = i
digits_to_sample = [int(c[i]) for i in range(len(c))]
for s in range(n_samples_per_class[i]):
this_sample = None
for d in digits_to_sample:
rand_idx = self._rstate.randint(len(digit_idx[d]))
sample_idx = digit_idx[d][rand_idx]
digit_sample = x_data[sample_idx]
if this_sample is None:
this_sample = digit_sample
else:
this_sample = np.vstack((this_sample,digit_sample))
samples.append(this_sample)
labels.append(this_label)
# if configured sort labels into 2 classes
labels = np.asarray(labels)
if self.two_class and not upsample_control:
lbl_mask = np.isin(labels, class_partition)
labels[~lbl_mask] = 0
labels[lbl_mask] = 1
if upsample_control:
for i,s in enumerate(samples):
# Initial timestep is absolute start position of digit. To
# translate to a higher resolution image, we can just multiply
# the abolute position vby the scaling factor.
upsample = s[0,:]*upsample_factor
for t in np.arange(1,s.shape[0]):
# don't do upsampling at end of strokes or end of digits
if all((s[t,2] == 0, s[t,3] == 0)):
# Repeat original stroke "upsample_factor" times, such
# that the relative stroke length is identical if
# images are normalized to same resolution.
for k in range(upsample_factor):
upsample = np.vstack((upsample, s[t,:]))
else:
upsample = np.vstack((upsample, s[t,:]))
samples[i] = upsample
# structure output data
out_data = labels.reshape(-1, 1)
if use_one_hot:
n_classes = 2**seq_len
if self.two_class:
n_classes = 2
# FIXME We shouldn't call this method if the validation set size is
# zero.
if out_data.size == 0:
out_data = np.matlib.repmat(out_data, 1, n_classes)
else:
# FIXME use internal method `_to_one_hot` and set required class
# attributes beforehand.
one_hot_encoder = OneHotEncoder(categories=[range(n_classes)])
one_hot_encoder.fit(npm.repmat(np.arange(n_classes), 1, 1).T)
out_data = one_hot_encoder.transform(out_data).toarray()
if self.target_per_timestep:
out_data = np.matlib.repmat(np.asarray(out_data), 1, max_seq_len)
# structure input data
in_data = np.zeros((n_samples,max_seq_len,4))
sample_lengths = np.zeros(n_samples)
for i,s in enumerate(samples):
in_data[i,:s.shape[0],:] = s
sample_lengths[i] = s.shape[0]
in_data = self._flatten_array(in_data)
return in_data, out_data, sample_lengths
def get_identifier(self):
0
Source : hypothesis_test.py
with MIT License
from davidglavas
with MIT License
from davidglavas
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
0
Source : PlotHandler.py
with MIT License
from f1tenth
with MIT License
from f1tenth
def plot_vehicle(self,
pos: np.ndarray,
heading: float,
width: float,
length: float,
zorder: int = 10,
color_str: str = 'blue',
id_in: str = 'default') -> None:
"""
Highlight a vehicle pose with a scaled bounding box.
:param pos: position of the vehicle's center of gravity
:param heading: heading of the vehicle (0.0 beeing north) [in rad]
:param width: width of the vehicle [in m]
:param length: length of the vehicle [in m]
:param zorder: z-order of the plotted object (layer in the plot)
:param color_str: string specifying color (use default color strings)
:param id_in: sting with an unique object ID (previously plotted objects with same ID will be removed)
"""
# delete highlighted positions with handle
if id_in in self.__veh_patch.keys():
self.__veh_patch[id_in].remove()
del self.__veh_patch[id_in]
theta = heading - np.pi / 2
bbox = (npm.repmat([[pos[0]], [pos[1]]], 1, 4)
+ np.matmul([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]],
[[-length / 2, length / 2, length / 2, -length / 2],
[-width / 2, -width / 2, width / 2, width / 2]]))
patch = np.array(bbox).transpose()
patch = np.vstack((patch, patch[0, :]))
plt_patch = ptch.Polygon(patch, facecolor=color_str, zorder=zorder)
self.__veh_patch[id_in] = self.__main_ax.add_artist(plt_patch)
def highlight_lines(self,
0
Source : util.py
with MIT License
from fanglinpu
with MIT License
from fanglinpu
def __init__(self, dm = None, dms = None, skel = None, com2D = None, flag = None):
if not isinstance(com2D, np.ndarray):
(self.crop_dm, self.trans, self.com3D) = dm.Detector()
else:
(self.crop_dm, self.trans, self.com3D)= dm.cropArea3D(dm.dmData, com2D)
(self.crop_dms, self.transs, self.com3Ds)= dms.cropArea3D(dms.dmData, com2D)
self.normDm()
self.dm = dm.dmData
try:
self.normDms()
self.dms = dms.dmData
except:
pass
if isinstance(skel, np.ndarray):
if len(skel)%3 != 0:
raise ValueError('invalid length of the skeleton mat')
jntNum = len(skel)/3
self.skel = skel.astype(np.float32)
#crop_skel is the training label for neurual network, normalize wrt com3D
self.crop_skel = (self.skel - repmat(self.com3D, 1, jntNum))[0]
self.crop_skel = self.crop_skel.astype(np.float32)
self.normSkel()
self.skel2 = np.flipud(self.crop2D()) #np.flipud
#print(self.skel2.shape)
#self.showAnnotatedSample()
#self.saveAnnotatedSample('salam.jpg')
# save only the norm_dm and norm_skel for training, clear all initial size data
def saveOnlyForTrain(self):
0
Source : brain_model_test.py
with Apache License 2.0
from google
with Apache License 2.0
from google
def create_simulated_impulse_responses(self, num_input_channels,
simulated_unattended_gain=0.10):
"""Create the basic impulse responses for this dataset.
Generate random responses, one for the attended signal and the other for
the unattended signal. Give them a realistic overall shape over 250ms.
Do this once for a dataset (so we can generate multiple examples later
using the same impulse responses).
Args:
num_input_channels: How many input channels to synthesize
simulated_unattended_gain: How much lower is the unattended channel?
Creates:
attended_impulse_response, unattended_impulse_response:
Two 0.25s x num_input_channel arrays representing the impulse response
from the input sound (i.e. speech) to the system (i.e. EEG/meg)
response.
"""
self.impulse_length = .25 # Length of the TRF
self.impulse_times = np.arange(self.impulse_length*self.fs) / self.fs
self.envelope = 30*self.impulse_times * np.exp(-self.impulse_times*30,
dtype=np.float32)
self.envelope_all_chans = numpy.matlib.repmat(np.reshape(self.envelope,
(-1, 1)),
1, num_input_channels)
self.attended_impulse_response = np.random.randn(
self.impulse_times.shape[0],
num_input_channels) * self.envelope_all_chans
self.unattended_impulse_response = np.random.randn(
self.impulse_times.shape[0],
num_input_channels) * self.envelope_all_chans
# Cut the magnitude of the unattended inpulse response so as noise it will
# have a smaller or bigger effect.
logging.info('SimulatedData:initialize_dataset: Setting unattended gain '
'to %s', simulated_unattended_gain)
self.unattended_impulse_response *= simulated_unattended_gain
def create_simulated_speech_signals(self):
0
Source : decoding_test.py
with Apache License 2.0
from google
with Apache License 2.0
from google
def create_simulated_impulse_responses(self, num_input_channels,
simulated_unattended_gain=0.10):
"""Create the basic impulse responses for this dataset.
Generate random responses, one for the attended signal and the other for
the unattended signal. Give them a realistic overall shape over 250ms.
Do this once for a dataset (so we can generate multiple examples later
using the same impulse responses).
Args:
num_input_channels: How many input channels to synthesize
simulated_unattended_gain: How much lower is the unattended channel?
Creates:
attended_impulse_response, unattended_impulse_response:
Two 0.25s x num_input_channel arrays representing the impulse response
from the input sound (i.e. speech) to the system (i.e. EEG/meg)
response.
"""
self.impulse_length = .25 # Length of the TRF
self.impulse_times = np.arange(self.impulse_length*self.fs) / self.fs
self.envelope = 30*self.impulse_times * np.exp(-self.impulse_times*30,
dtype=np.float32)
self.envelope_all_chans = numpy.matlib.repmat(np.reshape(self.envelope,
(-1, 1)),
1, num_input_channels)
self.attended_impulse_response = np.random.randn(
self.impulse_times.shape[0],
num_input_channels) * self.envelope_all_chans
self.unattended_impulse_response = np.random.randn(
self.impulse_times.shape[0],
num_input_channels) * self.envelope_all_chans
# Cut the magnitude of the unattended inpulse response so as noise it will
# have a smaller or bigger effect.
logging.info('SimulatedData:initialize_dataset: Setting unattended ' +
'gain to %s', simulated_unattended_gain)
self.unattended_impulse_response *= simulated_unattended_gain
def create_simulated_speech_signals(self):
0
Source : rpca_decomposition.py
with MIT License
from hsipl
with MIT License
from hsipl
def GA(data):
X = data.transpose()
K = 1
epsilon = 10 * np.finfo(float).eps
N, D = X.shape
vectors = np.zeros([D, K])
vectors[:] = np.NAN
for k in range(K):
mu = np.random.rand(D, 1) - 0.5
mu = mu / np.linalg.norm(mu)
for iterate in range(3):
dots = np.dot(X, mu)
mu = (np.dot(dots.transpose(), X)).transpose()
mu = mu / np.linalg.norm(mu)
for iterate in range(N):
prev_mu = mu.copy()
dot_signs = np.sign(np.dot(X, mu))
mu = np.dot(dot_signs.transpose(), X)
mu = (mu / np.linalg.norm(mu)).transpose()
if np.max(abs(mu - prev_mu)) < epsilon:
break
if k == 0:
vectors[:, k] = mu.reshape(D)
X = X - np.dot(np.dot(X, mu), mu.transpose())
new_min = np.min(data[:])
new_max = np.max(data[:])
L = nma_rescale(vectors, new_min, new_max)
L = mb.repmat(L, 1, data.shape[1])
S = data - L
return L, S
def GM(data):
0
Source : rpca_decomposition.py
with MIT License
from hsipl
with MIT License
from hsipl
def GM(data):
X = data.transpose()
K = 1
epsilon = 1e-5
N, D = X.shape
vectors = np.zeros([D, K])
vectors[:] = np.NAN
for k in range(K):
mu = np.random.rand(D, 1) - 0.5
mu = mu / np.linalg.norm(mu)
for iterate in range(3):
dots = np.dot(X, mu)
mu = (np.dot(dots.transpose(), X)).transpose()
mu = mu / np.linalg.norm(mu)
for iterate in range(N):
prev_mu = mu.copy()
dot_signs = np.sign(np.dot(X, mu))
mu = (np.median(X / dot_signs, 0)).reshape(D, 1)
mu = mu[:] / np.linalg.norm(mu)
if np.max(abs(mu - prev_mu)) < epsilon:
break
if k == 0:
vectors[:, k] = mu.reshape(D)
X = X - np.dot(np.dot(X, mu), mu.transpose())
new_min = np.min(data[:])
new_max = np.max(data[:])
L = nma_rescale(vectors, new_min, new_max)
L = mb.repmat(L, 1, data.shape[1])
S = data - L
return L, S
def Godec(data):
0
Source : rpca_decomposition.py
with MIT License
from hsipl
with MIT License
from hsipl
def TGA(data):
X = data.transpose()
percent = 0.5
K = 1
N, D = X.shape
vectors = np.zeros([D, K])
vectors[:] = np.NAN
epsilon = 1e-5
for k in range(K):
mu = np.random.rand(D, 1) - 0.5
mu = mu / np.linalg.norm(mu)
for iterate in range(3):
dots = np.dot(X, mu)
mu = (np.dot(dots.transpose(), X)).transpose()
mu = mu / np.linalg.norm(mu)
for iterate in range(N):
prev_mu = mu.copy()
dot_signs = np.sign(np.dot(X, mu))
mu = trim_mean(X * dot_signs.reshape(N, 1), percent)
mu = (mu[:] / np.linalg.norm(mu)).reshape(D, 1)
if np.max(abs(mu - prev_mu)) < epsilon:
break
if k == 0:
vectors[:, k] = mu.reshape(D)
X = X - np.dot(np.dot(X, mu), mu.transpose())
new_min = np.min(data[:])
new_max = np.max(data[:])
L = nma_rescale(vectors, new_min, new_max)
L = mb.repmat(L, 1, data.shape[1])
S = data - L
return L, S
def nma_rescale(A, new_min, new_max):
0
Source : KinematicModel.py
with MIT License
from intelligent-control-lab
with MIT License
from intelligent-control-lab
def reset(self, dT, goals):
"""This function reset the robot state to initial, and set the goals to given goals. This function is useful when the user need to make sure all the robot are tested under the same goal sequence,
Args:
dT (float): the seperation between two control output
goals (ndarray): n*6 array of goal specification. [x y z 0 0 0]
"""
self.dT = dT
self.set_goals(goals)
self.init_x(self.init_state)
self.x_his = repmat(self.x, 1, 50)
self.n = np.shape(self.x)[0]
self.H = matrix(eye(self.n))
self.kalman_P = matrix(eye(self.n)) * (self.measure_noise**2)
self.x_est = self.observe(self.x)
self.m = matrix(zeros((6,1)))
self.m_his = repmat(self.m, 1, 50)
self.x_pred = zeros((self.n,1))
self.trace = repmat(self.get_P(), 1, 100)
self.goal_achieved = 0
self.time = 0
self.last_collision_time = 0
self.score = dict()
self.score['collision_cnt'] = 0
self.score['safety'] = 0
self.score['nearest_dis'] = 1e9
self.score['efficiency'] = 0
self.predictability = 0
self.get_closest_X(np.vstack([10,10,10,0,0,0]))
def set_goals(self, goals):
0
Source : Kinetic.py
with MIT License
from jmcelve2
with MIT License
from jmcelve2
def create(self):
"""
Add the "kinetic" attribute to the Kinetic2D object.
Returns:
A numpy.array grid of kinetic values with units of Hartree
energy.
"""
d = abs(self.x[0][1]-self.x[0][0])
a_xy = np_matlib.repmat(np.ones((1, self.n_x-1)), self.n_y, 1).flatten()
dx2 = -2*np.diag(np.ones((1, self.n_y*self.n_x))[0]) \
+ 1*np.diag(a_xy, k=-self.n_y) \
+ 1*np.diag(a_xy, k=self.n_y)
dx2 = dx2 / d**2
dy22 = -2*np.diag(np.ones((1, self.n_y))[0]) \
+ 1*np.diag(np.ones((1, self.n_y-1))[0], -1) \
+ 1*np.diag(np.ones((1, self.n_y-1))[0], 1)
dy2 = np.kron(np.eye(self.n_x), dy22)
dy2 = dy2 / d**2
kinetic = (-HBAR**2/(2*MASS_E*MASS)) * (dx2 + dy2)
return Kinetic(dict(**self.params, **dict(kinetic=kinetic)))
def kinetic_factory(grid_params):
0
Source : DeepESN.py
with BSD 3-Clause "New" or "Revised" License
from lucapedrelli
with BSD 3-Clause "New" or "Revised" License
from lucapedrelli
def __init__(self, Nu,Nr,Nl, configs, verbose=0):
# initialize the DeepESN model
if verbose:
sys.stdout.write('init DeepESN...')
sys.stdout.flush()
rhos = np.array(configs.rhos) # spectral radius (maximum absolute eigenvalue)
lis = np.array(configs.lis) # leaky rate
iss = np.array(configs.iss) # input scale
IPconf = configs.IPconf # configuration for Deep Intrinsic Plasticity
reservoirConf = configs.reservoirConf # reservoir configurations
if len(rhos.shape) == 0:
rhos = npm.repmat(rhos, 1,Nl)[0]
if len(lis.shape) == 0:
lis = npm.repmat(lis, 1,Nl)[0]
if len(iss.shape) == 0:
iss = npm.repmat(iss, 1,Nl)[0]
self.W = {} # recurrent weights
self.Win = {} # recurrent weights
self.Gain = {} # activation function gain
self.Bias = {} # activation function bias
self.Nu = Nu # number of inputs
self.Nr = Nr # number of units per layer
self.Nl = Nl # number of layers
self.rhos = rhos.tolist() # list of spectral radius
self.lis = lis # list of leaky rate
self.iss = iss # list of input scale
self.IPconf = IPconf
self.readout = configs.readout
# sparse recurrent weights init
if reservoirConf.connectivity < 1:
for layer in range(Nl):
self.W[layer] = np.zeros((Nr,Nr))
for row in range(Nr):
number_row_elements = round(reservoirConf.connectivity * Nr)
row_elements = random.sample(range(Nr), number_row_elements)
self.W[layer][row,row_elements] = np.random.uniform(-1,+1, size = (1,number_row_elements))
# full-connected recurrent weights init
else:
for layer in range(Nl):
self.W[layer] = np.random.uniform(-1,+1, size = (Nr,Nr))
# layers init
for layer in range(Nl):
target_li = lis[layer]
target_rho = rhos[layer]
input_scale = iss[layer]
if layer==0:
self.Win[layer] = np.random.uniform(-input_scale, input_scale, size=(Nr,Nu+1))
else:
self.Win[layer] = np.random.uniform(-input_scale, input_scale, size=(Nr,Nr+1))
Ws = (1-target_li) * np.eye(self.W[layer].shape[0], self.W[layer].shape[1]) + target_li * self.W[layer]
eig_value,eig_vector = np.linalg.eig(Ws)
actual_rho = np.max(np.absolute(eig_value))
Ws = (Ws *target_rho)/actual_rho
self.W[layer] = (target_li**-1) * (Ws - (1.-target_li) * np.eye(self.W[layer].shape[0], self.W[layer].shape[1]))
self.Gain[layer] = np.ones((Nr,1))
self.Bias[layer] = np.zeros((Nr,1))
if verbose:
print('done.')
sys.stdout.flush()
def computeLayerState(self, input, layer, initialStatesLayer = None, DeepIP = 0):
0
Source : DeepESN.py
with BSD 3-Clause "New" or "Revised" License
from lucapedrelli
with BSD 3-Clause "New" or "Revised" License
from lucapedrelli
def computeLayerState(self, input, layer, initialStatesLayer = None, DeepIP = 0):
# compute the state of a layer with pre-training if DeepIP == 1
state = np.zeros((self.Nr, input.shape[1]))
if initialStatesLayer is None:
initialStatesLayer = np.zeros(state[:,0:1].shape)
input = self.Win[layer][:,0:-1].dot(input) + np.expand_dims(self.Win[layer][:,-1],1)
if DeepIP:
state_net = np.zeros((self.Nr, input.shape[1]))
state_net[:,0:1] = input[:,0:1]
state[:,0:1] = self.lis[layer] * np.tanh(np.multiply(self.Gain[layer], state_net[:,0:1]) + self.Bias[layer])
else:
#state[:,0:1] = self.lis[layer] * np.tanh(np.multiply(self.Gain[layer], input[:,0:1]) + self.Bias[layer])
state[:,0:1] = (1-self.lis[layer]) * initialStatesLayer + self.lis[layer] * np.tanh( np.multiply(self.Gain[layer], self.W[layer].dot(initialStatesLayer) + input[:,0:1]) + self.Bias[layer])
for t in range(1,state.shape[1]):
if DeepIP:
state_net[:,t:t+1] = self.W[layer].dot(state[:,t-1:t]) + input[:,t:t+1]
state[:,t:t+1] = (1-self.lis[layer]) * state[:,t-1:t] + self.lis[layer] * np.tanh(np.multiply(self.Gain[layer], state_net[:,t:t+1]) + self.Bias[layer])
eta = self.IPconf.eta
mu = self.IPconf.mu
sigma2 = self.IPconf.sigma**2
# IP learning rule
deltaBias = -eta*((-mu/sigma2)+ np.multiply(state[:,t:t+1], (2*sigma2+1-(state[:,t:t+1]**2)+mu*state[:,t:t+1])/sigma2))
deltaGain = eta / npm.repmat(self.Gain[layer],1,state_net[:,t:t+1].shape[1]) + deltaBias * state_net[:,t:t+1]
# update gain and bias of activation function
self.Gain[layer] = self.Gain[layer] + deltaGain
self.Bias[layer] = self.Bias[layer] + deltaBias
else:
state[:,t:t+1] = (1-self.lis[layer]) * state[:,t-1:t] + self.lis[layer] * np.tanh( np.multiply(self.Gain[layer], self.W[layer].dot(state[:,t-1:t]) + input[:,t:t+1]) + self.Bias[layer])
return state
def computeDeepIntrinsicPlasticity(self, inputs):
0
Source : seq_smnist.py
with Apache License 2.0
from mariacer
with Apache License 2.0
from mariacer
def _generate_data(self, x_data, y_data, max_seq_len, digits, seq_len,
n_samples, use_one_hot, class_partition):
"""Generates data for a single sequence stroke MNIST task with
specified length and digits to use.
Args:
x_data (list): Original stroke mnist input data, with every list
entry being a numpy.ndarray of shape ``[4, stroke_seq_len]``
y_data (list): Original stroke mnist labels, with every list
entry being a numpy.ndarray of shape ``[10]``
max_seq_len (int): The maximum length of a sequence (i.e. number of
timesteps of a sample)
digits (tuple): The two digits used to build the sequence
seq_len (int): The length of the sequence of digits to build
n_samples (int): The number of samples that should be generated
use_one_hot (bool): Whether or not to use one hot encodings
class_partition (list): If sequences should be grouped into 2
different classes this list specifies the class partition.
Returns:
(tuple): Tuple containing:
- **in_data** (numpy.ndarray): Numpy array of shape
``[n_samples, max_num_time_steps * 4]``.
- **out_data** (numpy.ndarray): Numpy array of shape
``[n_samples, max_num_time_steps]``.
- **sample_lengths** (numpy.ndarray): The original unpadded sequence
lengths.
"""
# construct all possible classes
classes = ["".join(seq) for seq in \
itertools.product("01", repeat=seq_len)]
# get the right number of samples per class to get a balanced data set
# with the desired n_samples.
num = n_samples
div = len(classes)
n_samples_per_class = [num // div + (1 if x < num % div else 0) \
for x in range (div)]
# find indices of samples with the wanted digit class
y_data = [np.argmax(y) for y in y_data]
digit_idx = []
digit_idx.append(np.where(np.asarray(y_data) == digits[0])[0])
digit_idx.append(np.where(np.asarray(y_data) == digits[1])[0])
# generate samples for every class
samples = []
labels = []
for i,c in enumerate(classes):
this_label = i
digits_to_sample = [int(c[i]) for i in range(len(c))]
for s in range(n_samples_per_class[i]):
this_sample = None
for d in digits_to_sample:
rand_idx = self._rstate.randint(len(digit_idx[d]))
sample_idx = digit_idx[d][rand_idx]
digit_sample = x_data[sample_idx]
if this_sample is None:
this_sample = digit_sample
else:
this_sample = np.vstack((this_sample,digit_sample))
samples.append(this_sample)
labels.append(this_label)
# if configured sort labels into 2 classes
labels = np.asarray(labels)
if self.two_class:
lbl_mask = np.isin(labels, class_partition)
labels[~lbl_mask] = 0
labels[lbl_mask] = 1
# structure output data
out_data = labels.reshape(-1, 1)
if use_one_hot:
n_classes = 2**seq_len
if self.two_class:
n_classes = 2
# FIXME We shouldn't call this method if the validation set size is
# zero.
if out_data.size == 0:
out_data = np.matlib.repmat(out_data, 1, n_classes)
else:
# FIXME use internal method `_to_one_hot` and set required class
# attributes beforehand.
one_hot_encoder = OneHotEncoder(categories=[range(n_classes)])
one_hot_encoder.fit(npm.repmat(np.arange(n_classes), 1, 1).T)
out_data = one_hot_encoder.transform(out_data).toarray()
if self.target_per_timestep:
out_data = np.matlib.repmat(np.asarray(out_data), 1, max_seq_len)
# structure input data
in_data = np.zeros((n_samples,max_seq_len,4))
sample_lengths = np.zeros(n_samples)
for i,s in enumerate(samples):
in_data[i,:s.shape[0],:] = s
sample_lengths[i] = s.shape[0]
in_data = self._flatten_array(in_data)
return in_data, out_data, sample_lengths
def get_identifier(self):
0
Source : util.py
with GNU General Public License v3.0
from masabdi
with GNU General Public License v3.0
from masabdi
def __init__(self, dm = None, dms = None, skel = None, com2D = None, flag = None):
if not isinstance(com2D, np.ndarray):
(self.crop_dm, self.trans, self.com3D) = dm.Detector()
else:
(self.crop_dm, self.trans, self.com3D)= dm.cropArea3D(dm.dmData, com2D)
(self.crop_dms, self.transs, self.com3Ds)= dms.cropArea3D(dms.dmData, com2D)
self.normDm()
self.dm = dm.dmData
try:
self.normDms()
self.dms = dms.dmData
except:
pass
if isinstance(skel, np.ndarray):
if len(skel)%3 != 0:
raise ValueError('invalid length of the skeleton mat')
jntNum = len(skel)/3
self.skel = skel.astype(np.float32)
#crop_skel is the training label for neurual network, normalize wrt com3D
self.crop_skel = (self.skel - repmat(self.com3D, 1, jntNum))[0]
self.crop_skel = self.crop_skel.astype(np.float32)
self.normSkel()
self.skel2 = np.flipud(self.crop2D()) #np.flipud
#print(self.skel2.shape)
#self.showAnnotatedSample()
#self.saveAnnotatedSample('salam.jpg')
# save only the norm_dm and norm_skel for training, clear all initial size data
def saveOnlyForTrain(self):
0
Source : mtm_functions.py
with GNU General Public License v3.0
from mathildejutras
with GNU General Public License v3.0
from mathildejutras
def mtm_svd_lfv(ts2d,nw,kk,dt) :
# Compute spectrum at each grid point
p, n = ts2d.shape
# Remove the mean and divide by std
vm = np.nanmean(ts2d, axis=0) # mean
vmrep = repmat(vm,ts2d.shape[0],1)
ts2d = ts2d - vmrep
vs = np.nanstd(ts2d, axis=0) # standard deviation
vsrep = repmat(vs,ts2d.shape[0],1)
ts2d = np.divide(ts2d,vsrep)
ts2d = np.nan_to_num(ts2d)
# Slepian tapers
psi = dpss(p,nw,kk)
npad = 2**int(np.ceil(np.log2(abs(p)))+2)
nf = int(npad/2)
ddf = 1./(npad*dt)
fr = np.arange(0,nf)*ddf
# Get the matrix of spectrums
psimats = []
for k in range(kk):
psimat2 = np.transpose( repmat(psi[k,:],ts2d.shape[1],1) )
psimat2 = np.multiply(psimat2,ts2d)
psimats.append(psimat2)
psimats=np.array(psimats)
nev = np.fft.fft(psimats,n=npad,axis=1)
nev = np.fft.fftshift(nev,axes=(1))
nev = nev[:,nf:,:]
# Calculate svd for each frequency
lfvs = np.zeros(nf)*np.nan
for j in range(nf) :
U,S,V = np.linalg.svd(nev[:,j,:], full_matrices=False)
lfvs[j] = S[0]**2/(np.nansum(S[1:])**2)
return fr, lfvs
# Function 2) Calculate the confidence interval of the LFV calculations
def mtm_svd_conf(ts2d,nw,kk,dt,niter,sl) :
0
Source : mtm_functions.py
with GNU General Public License v3.0
from mathildejutras
with GNU General Public License v3.0
from mathildejutras
def mtm_svd_conf(ts2d,nw,kk,dt,niter,sl) :
# Compute spectrum at each grid point
p, n = ts2d.shape
npad = 2**int(np.ceil(np.log2(abs(p)))+2)
nf = int(npad/2)
ddf = 1./(npad*dt)
fr = np.arange(0,nf)*ddf
# range of frequencies
fran = [0, 0.5/dt]
nfmin = (np.abs(fr-fran[0])).argmin()
nfmax = (np.abs(fr-fran[1])).argmin()
fr = fr[nfmin:nfmax]
q = [int(niter*each) for each in sl]
# Remove the mean and divide by std
vm = np.nanmean(ts2d, axis=0) # mean
vmrep = repmat(vm,ts2d.shape[0],1)
ts2d = ts2d - vmrep
vs = np.nanstd(ts2d, axis=0) # standard deviation
vsrep = repmat(vs,ts2d.shape[0],1)
ts2d = np.divide(ts2d,vsrep)
ts2d = np.nan_to_num(ts2d)
# Slepian tapers
psi = dpss(p,nw,kk)
partvar = np.ones((niter, len(fr)+1))*np.nan
for it in range(niter):
print('Iter %i'%it)
shr = np.random.permutation(ts2d) # random permutation of each time series
# Spectral estimation
psimats = []
for k in range(kk):
psimat2 = np.transpose( repmat(psi[k,:],shr.shape[1],1) )
psimat2 = np.multiply(psimat2,shr)
psimats.append(psimat2)
psimats=np.array(psimats)
nevconf = np.fft.fft(psimats,n=npad,axis=1)
nevconf = np.fft.fftshift(nevconf,axes=(1))
nevconf = nevconf[:,nf:,:]
# Calculate svd for each frequency
for j in range(nfmin,nfmax) :
U,S,V = np.linalg.svd(nevconf[:,j,:], full_matrices=False)
partvar[it,j] = S[0]**2/(np.nansum(S[1:])**2)
np.sort(partvar, axis=1)
freq_sec = nw/(p*dt)
ibs = (np.abs(fr-freq_sec)).argmin() ; ibs=range(ibs)
ibns = range(ibs[-1]+1, len(fr))
fray = 1./(p*dt)
fbw = 2*nw*fray
ifbw = int(round(fbw/(fr[2]-fr[1])))
evalper = np.zeros((len(q),len(fr)))
for i in range(len(q)):
y2 = np.zeros(len(fr))
y = partvar[q[i],:] ### right order indices?
y1 = signal.medfilt(y,ifbw)
y2[ibs] = np.mean(y[ibs])
a = np.polyfit(fr[ibns],y1[ibns],10)
y2[ibns] = np.polyval(a, fr[ibns])
evalper[i,:] = y2
return fr, evalper
def envel(ff0, iif, fr, dt, ddf, p, kk, psi, V) :
0
Source : mtm_functions.py
with GNU General Public License v3.0
from mathildejutras
with GNU General Public License v3.0
from mathildejutras
def mtm_svd_recon(ts2d, nw, kk, dt, fo) :
imode = 0
lan = 0
vw = 0
# Compute spectrum at each grid point
p, n = ts2d.shape
# Remove the mean and divide by std
vm = np.nanmean(ts2d, axis=0) # mean
vmrep = repmat(vm,ts2d.shape[0],1)
ts2d = ts2d - vmrep
vs = np.nanstd(ts2d, axis=0) # standard deviation
vsrep = repmat(vs,ts2d.shape[0],1)
ts2d = np.divide(ts2d,vsrep)
ts2d = np.nan_to_num(ts2d)
# Slepian tapers
psi = dpss(p,nw,kk)
npad = 2**int(np.ceil(np.log2(abs(p)))+2)
nf = int(npad/2)
ddf = 1./(npad*dt)
fr = np.arange(0,nf)*ddf
# Get the matrix of spectrums
psimats = []
for k in range(kk):
psimat2 = np.transpose( repmat(psi[k,:],ts2d.shape[1],1) )
psimat2 = np.multiply(psimat2,ts2d)
psimats.append(psimat2)
psimats=np.array(psimats)
nev = np.fft.fft(psimats,n=npad,axis=1)
nev = np.fft.fftshift(nev,axes=(1))
nev = nev[:,nf:,:]
# Initialiser les matrices de sorties
S = np.ones((kk, len(fo)))*np.nan
vexp = [] ; totvarexp = [] ; iis = []
D = vsrep
envmax = np.zeros(len(fo))*np.nan
for i1 in range(len(fo)):
# closest frequency
iif = (np.abs(fr - fo[i1])).argmin()
iis.append(iif)
ff0 = fr[iif]
print('( %i ) %.2f cyclesyr | %.2f yr'%(iif,ff0,1/ff0))
U,S0,Vh = np.linalg.svd(nev[:,iif,:].T,full_matrices=False)
##V = Vh.T.conj()
V = Vh
S[:,i1] = S0
env1 = envel(ff0, iif, fr, dt, ddf, p, kk, psi, V) # condition 1
cs=[1]
sn=[0]
c=np.cos(2*np.pi*ff0*dt)
s=np.sin(2*np.pi*ff0*dt)
for i2 in range(1,p):
cs.append( cs[i2-1]*c-sn[i2-1]*s )
sn.append( cs[i2-1]*s+sn[i2-1]*c )
CS = [complex(cs[i], sn[i]) for i in range(len(cs))]
CS=np.conj(CS)
# Reconstructions
R = np.real( D * S[imode, i1] * np.outer(U[:,imode], CS*env1).T )
vsr=np.var(R,axis=0)
vexp.append( vsr/(vs**2)*100 )
totvarexp.append( np.nansum(vsr)/np.nansum(vs**2)*100 )
return vexp, totvarexp, iis
0
Source : useful.py
with BSD 3-Clause "New" or "Revised" License
from MICA-MNI
with BSD 3-Clause "New" or "Revised" License
from MICA-MNI
def zscore_matrix(data, group, controlCode):
"""
Z-score data relative to a given group (author: @saratheriver)
Parameters
----------
data : pandas.DataFrame
Data matrix (e.g. thickness data), shape = (n_subject, n_region)
group : list
Group assignment (e.g, [0, 0, 0, 1, 1, 1], same length as n_subject.
controlCode : int
Value that corresponds to "baseline" group
Returns
-------
Z : pandas.DataFrame
Z-scored data relative to control code
"""
C = [i for i, x in enumerate(group) if x == controlCode]
n = len(group)
z1 = data - npm.repmat(np.mean(data.iloc[C, ]), n, 1)
z2 = np.std(data.iloc[C, ], ddof=1)
return z1 / z2
def reorder_sctx(data):
0
Source : initial_alignment.py
with Apache License 2.0
from MWod
with Apache License 2.0
from MWod
def ransac_rigid(source_points, target_points, confidence, threshold):
try:
max_iters = 25000
transform, inliers = cv2.estimateAffinePartial2D(source_points, target_points, 0, ransacReprojThreshold = threshold, maxIters = max_iters, confidence = confidence)
source_points = np.squeeze(source_points, None)
target_points = np.squeeze(target_points, None)
transform = cv2.estimateRigidTransform(
np.resize(source_points[matlib.repmat(inliers.astype(bool), 1, 2)],
(np.sum(inliers), 2)),
np.resize(target_points[matlib.repmat(inliers.astype(bool), 1, 2)],
(np.sum(inliers), 2)),
0)
if transform is not None:
t_transform = transform
transform = np.eye(3)
transform[0:2, 0:3] = t_transform
failed = False
else:
transform = np.eye(3)
failed = True
except:
transform = np.eye(3)
failed = True
return transform, failed
See More Examples