numpy.inner

Here are the examples of the python api numpy.inner taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

146 Examples 7

3 Source : kernels.py
with MIT License
from akhilvasvani

def linear_kernel(**kwargs):
    def f(x1, x2):
        return np.inner(x1, x2)
    return f


def polynomial_kernel(power, coef, **kwargs):

3 Source : kernels.py
with MIT License
from akhilvasvani

def polynomial_kernel(power, coef, **kwargs):
    def f(x1, x2):
        return (np.inner(x1, x2) + coef)**power
    return f


def rbf_kernel(gamma, **kwargs):

3 Source : house_prices.py
with MIT License
from ayush194

def partial(data, theta, i):
	temp = 0.0
	for row in data:
		temp += (np.inner(row[:-1], theta) - row[-1]) * row[i]
	temp /= data.shape[0]
	return temp

def J(data, theta):

3 Source : house_prices.py
with MIT License
from ayush194

def J(data, theta):
	j = 0.0;
	for row in data:
		#print(theta.transpose())
		#print(np.multiply(row[:-1], theta.transpose()))
		j += (np.inner(row[:-1], theta) - row[-1]) ** 2
	j /= (2 * data.shape[0])
	return j

def J_alt(data, theta):

3 Source : svm_implemented.py
with MIT License
from ayush194

def gaussian(xi, xj, σ):
	tmp = xi - xj
	norm = (np.inner(tmp, tmp)).sum()
	k = math.exp(-norm / np.float64(2 * (σ ** 2)))
	return k

#print(gaussian(y, y, 1))

svc = svm.SVC(C=100, gamma=10, probability=True, kernel=gaussian)

3 Source : markov_switching.py
with MIT License
from birforce

    def cov_params_opg(self):
        """
        (array) The variance / covariance matrix. Computed using the outer
        product of gradients method.
        """
        score_obs = self.model.score_obs(self.params, transformed=True).T
        cov_params, singular_values = pinv_extended(
            np.inner(score_obs, score_obs))

        if self._rank is None:
            self._rank = np.linalg.matrix_rank(np.diag(singular_values))

        return cov_params

    @cache_readonly

3 Source : imitator.py
with MIT License
from CalciferZh

  def compute_rodrigues(self, x, y):
    """
    Compute rotation matrix R such that y = Rx.

    Parameter
    ---------
    x: Ndarray to be rotated.
    y: Ndarray after rotation.

    """
    theta = np.arccos(np.inner(x, y) / (np.linalg.norm(x) * np.linalg.norm(y)))
    axis = np.squeeze(np.cross(x, y))
    return transforms3d.axangles.axangle2mat(axis, theta)

  def map_R_asf_smpl(self):

3 Source : N-Charlie.py
with GNU General Public License v3.0
from chbpku

def emergency_index(info):  # consider time, distance
    time = np.inner(info[0], info[1]) / (np.inner(info[1], info[1]) + 1e-12)  #   <  0:approaching, >0:leaving
    dist = np.linalg.norm(info[0] - time * info[1])
    dist -= (info[2] + 1)
    time_index = 0 if time > 0 else 1 - np.tanh(-time / 50)
    dist_index = 1 / (np.exp(dist / 1.5))
    # print(time,time_index)
    return time_index * dist_index


def value_index(info):

3 Source : N-Charlie.py
with GNU General Public License v3.0
from chbpku

def value_index(info):
    time = np.inner(info[0], info[1]) / (np.inner(info[1], info[1]) + 1e-12)  #   <  0:approaching, >0:leaving
    dist = np.linalg.norm(info[0] - time * info[1])
    dist -= (info[2] + 1)
    time_index =  0 if time  <  0 else 1 - np.tanh(-time / 50)
    dist_index = 2 / (1 + np.exp(dist / 5.))
    return time_index * dist_index


def chase(info):

3 Source : N-Charlie.py
with GNU General Public License v3.0
from chbpku

def chase(info):
    # dv = Consts['DELTA_VELOC']*Consts['EJECT_MASS_RATIO']/(1.5*Consts['DEFAULT_RADIUS'])
    A = np.tan(np.angle(info[0][0] + 1j * info[0][1]))  # A and D are two parameters to calculate x, y
    D = (info[1][0] - A * info[1][1]) / (Consts["DELTA_VELOC"] * Consts["EJECT_MASS_RATIO"])
    if A ** 2 - D ** 2 + 1 >= 0:
        y = (-A * D + np.sqrt(A ** 2 - D ** 2 + 1)) / (A ** 2 + 1)
        x = -info[0][0] / abs(info[0][0]) * np.sqrt(1 - y ** 2)
        z = x + 1j * y
    else:
        if np.inner(info[0], info[1]) > 0:
            z = info[1][0] + 1j * (info[1][1])
            z = -z / abs(z)
        else:
            z = info[1][0] + 1j * info[1][1]
            z = z / abs(z)
    return z


def dodge(info):

3 Source : ephemeris.py
with MIT License
from commaai

  def get_sat_info(self, time):
    if not self.healthy:
      return None
    dt = time - self.data['t0']
    deg = self.data['deg']
    deg_t = self.data['deg_t']
    sat_pos = np.array([sum((dt**p)*self.data['x'][deg-p] for p in range(deg+1)),
                        sum((dt**p)*self.data['y'][deg-p] for p in range(deg+1)),
                        sum((dt**p)*self.data['z'][deg-p] for p in range(deg+1))])
    sat_vel = np.array([sum(p*(dt**(p-1))*self.data['x'][deg-p] for p in range(1,deg+1)),
                        sum(p*(dt**(p-1))*self.data['y'][deg-p] for p in range(1,deg+1)),
                        sum(p*(dt**(p-1))*self.data['z'][deg-p] for p in range(1,deg+1))])
    time_err = sum((dt**p)*self.data['clock'][deg_t-p] for p in range(deg_t+1))
    time_err_rate = sum(p*(dt**(p-1))*self.data['clock'][deg_t-p] for p in range(1,deg_t+1))
    time_err_with_rel = time_err - 2*np.inner(sat_pos, sat_vel)/SPEED_OF_LIGHT**2
    return sat_pos, sat_vel, time_err_with_rel, time_err_rate


class GPSEphemeris(Ephemeris):

3 Source : utils.py
with MIT License
from cpinte

def rotate_vec(u,v,angle):
    '''
      rotate a vector (u) around an axis defined by another vector (v)
      by an angle (theta) using the Rodrigues rotation formula
    '''
    k = v/np.sqrt(np.inner(v,v))
    w = np.cross(k,u)
    k_dot_u = np.inner(k,u)
    for i,uval in enumerate(u):
        u[i] = u[i]*np.cos(angle) + w[i]*np.sin(angle) + k[i]*k_dot_u*(1.-np.cos(angle))
    return u

def rotate_coords(x,y,z,inc,PA):

3 Source : pwr.py
with MIT License
from dmparker0

    def combine(self):
        self.combined = self.systems[0].values[['Team']]
        for system in self.systems:
            self.combined = pd.merge(self.combined, system.values, on='Team', suffixes=('','_'))
            self.combined = self.combined[[x for x in self.combined if x != 'Games Played_']]
            new_z = stats.zscore(self.combined[system.pwrcol].values)
            new_weights = [system.weight] * self.combined.shape[0]
            if 'z_scores' not in self.combined:    
                self.combined['z_scores'] = [[x] for x in new_z]
                self.combined['weights'] = [[x] for x in new_weights]
            else:
                self.combined['z_scores'] = [x[0] + [x[1]] for x in list(zip(self.combined['z_scores'].values, new_z))]
                self.combined['weights'] = [x[0] + [x[1]] for x in list(zip(self.combined['weights'].values, new_weights))]
        zipped = zip(self.combined['z_scores'].values, self.combined['weights'].values)
        self.combined['Avg_z'] = [np.inner(x, y) / np.sum(y) for x, y in zipped]
        self.combined['PWR'] = self.combined['Avg_z'].values * 5
        return PWR(regress_to=self.regress_to, values=self.combined[['Team','PWR','Games Played']]).calculate()

3 Source : solvers.py
with MIT License
from ExcitedStates

        def __call__(self):
            if not self.initialized:
                self.initialize()

            self._solution = cvxopt.solvers.qp(
                self._quad_obj, self._lin_obj,
                self._le_constraints, self._le_bounds
            )
            self.obj_value = 2 * self._solution['primal objective'] + np.inner(self._target, self._target)
            self.weights = np.asarray(self._solution['x']).ravel()

    class CPLEX_MIQPSolver(_Base_QPSolver):

3 Source : solvers.py
with MIT License
from ExcitedStates

        def initialize(self):
            self._quad_obj = np.inner(self._models, self._models)
            self._lin_obj = -np.inner(self._models, self._target)

            self.initialized = True

        def __call__(self, cardinality=None, exact=False, threshold=None):

3 Source : extract_from_single_split.py
with GNU General Public License v3.0
from gonenhila

def similarity(w1, w2, space):
    # dot product of vectors corresponding to two words (w1 and w2) in a space
    i1 = w2i[space][w1]
    i2 = w2i[space][w2]
    vec1 = wv[space][i1, :]
    vec2 = wv[space][i2, :]
    return np.inner(vec1, vec2)

def extract_freqs(filename, vocab):

3 Source : geometry.py
with MIT License
from huawei-noah

    def getcorners(self):
        corners = self._getunrotatedegocorners()

        if abs(self.yaw) > 1e-30 or abs(self.pitch) > 1e-30 or abs(self.roll) > 1e-30:
            corners = np.inner(corners, self.rotation)

        corners += self.translation

        return corners

    def getvolume(self):

3 Source : geometry.py
with MIT License
from huawei-noah

def yaw_rotate(yaw, vectors):
    rotation = yaw_rotation(yaw)
    return np.inner(vectors, rotation)


# In webots yaw is CC around the y-axis!
# In webots pitch is CC around the z-axis!
# In webots roll is CC around the x-axis!
def yawrollpitch_rotation(yaw, pitch, roll):

3 Source : geometry.py
with MIT License
from huawei-noah

def rotate(yaw, pitch, roll, vectors):
    rotation = yawrollpitch_rotation(yaw, pitch, roll)
    return np.inner(vectors, rotation)


def visualize_boxes(ax, boxes, markers=None, colors=None, faces_color=None):

3 Source : exp_family.py
with Apache License 2.0
from hyperion-ml

    def elbo(self, x, u_x=None, N=1, log_h=None, sample_weight=None, batch_size=None):
        assert self.is_init
        if u_x is None:
            N, u_x = self.accum_suff_stats(
                x, sample_weight=sample_weight, batch_size=batch_size
            )
        if log_h is None:
            log_h = self.accum_log_h(x, sample_weight=sample_weight)
        return log_h + np.inner(u_x, self.eta) - N * self.A

    def log_prob(self, x, u_x=None, method="nat"):

3 Source : exp_family.py
with Apache License 2.0
from hyperion-ml

    def log_prob_nat(self, x, u_x=None):
        assert self.is_init
        if u_x is None:
            u_x = self.compute_suff_stats(x)
        return self.log_h(x) + np.inner(u_x, self.eta) - self.A

    @staticmethod

3 Source : exp_family_mixture.py
with Apache License 2.0
from hyperion-ml

    def elbo(self, x, u_x=None, N=1, log_h=None, sample_weight=None, batch_size=None):
        if u_x is None:
            N, u_x = self.accum_suff_stats(
                x, sample_weight=sample_weight, batch_size=batch_size
            )
        if log_h is None:
            log_h = self.accum_log_h(x, sample_weight=sample_weight)
        return log_h + np.sum(u_x * self.eta) + np.inner(N, self.log_pi - self.A)

    def log_prob(self, x, u_x=None, mode="nat"):

3 Source : math.py
with Apache License 2.0
from hyperion-ml

def fisher_ratio(mu1, Sigma1, mu2, Sigma2):
    """Computes the Fisher ratio between two classes
    from the class means and covariances.
    """
    S = Sigma1 + Sigma2
    L = invert_pdmat(S)[0]
    delta = mu1 - mu2
    return np.inner(delta, L(delta))


def fisher_ratio_with_precs(mu1, Lambda1, mu2, Lambda2):

3 Source : exponential_family.py
with Apache License 2.0
from hyperion-ml

    def elbo(self, x, u_x=None, N=1, logh=None, sample_weight=None, batch_size=None):
        if u_x is None:
            N, u_x = self.accum_suff_stats(
                x, sample_weight=sample_weight, batch_size=batch_size
            )
        if logh is None:
            logh = self.accum_logh(x, sample_weight=sample_weight)
        return logh + np.inner(u_x, self.eta) - N * self.A

    def eval_llk(self, x, u_x=None, mode="nat"):

3 Source : exponential_family.py
with Apache License 2.0
from hyperion-ml

    def eval_llk_nat(self, x, u_x=None):
        if u_x is None:
            u_x = self.compute_suff_stats(x)
        return self.logh(x) + np.inner(u_x, self.eta) - self.A

    @staticmethod

3 Source : vector.py
with MIT License
from Jaseci-Labs

def dot_product(vec_a: list, vec_b: list, meta):
    """
    Caculate the dot product of two given vectors
    Param 1 - First vector
    Param 2 - Second vector

    Return - float between 0 and 1
    """

    return np.inner(vec_a, vec_b).tolist()


@jaseci_action()

3 Source : exp_family.py
with Apache License 2.0
from jsalt2019-diadet

    def elbo(self, x, u_x=None, N=1, log_h=None, sample_weight=None, batch_size=None):
        assert self.is_init
        if u_x is None:
            N, u_x = self.accum_suff_stats(x, sample_weight=sample_weight,
                                           batch_size=batch_size)
        if log_h is None:
            log_h = self.accum_log_h(x, sample_weight=sample_weight)
        return log_h + np.inner(u_x, self.eta) - N*self.A


    
    def log_prob(self, x, u_x=None, method='nat'):

3 Source : exp_family.py
with Apache License 2.0
from jsalt2019-diadet

    def log_prob_nat(self, x, u_x = None):
        assert self.is_init
        if u_x is None:
            u_x = self.compute_suff_stats(x)
        return self.log_h(x) + np.inner(u_x, self.eta) - self.A

    
    
    @staticmethod

3 Source : exp_family_mixture.py
with Apache License 2.0
from jsalt2019-diadet

    def elbo(self, x, u_x=None, N=1, log_h=None, sample_weight=None, batch_size=None):
        if u_x is None:
            N, u_x = self.accum_suff_stats(x, sample_weight=sample_weight,
                                           batch_size=batch_size)
        if log_h is None:
            log_h = self.accum_log_h(x, sample_weight=sample_weight)
        return log_h + np.sum(u_x * self.eta) + np.inner(N, self.log_pi - self.A)

    
    def log_prob(self, x, u_x=None, mode='nat'):

3 Source : math.py
with Apache License 2.0
from jsalt2019-diadet

def fisher_ratio(mu1, Sigma1, mu2, Sigma2):
    """Computes the Fisher ratio between two classes
       from the class means and covariances.
    """
    S=Sigma1+Sigma2
    L=invert_pdmat(S)[0]
    delta=mu1-mu2
    return np.inner(delta, L(delta))



def fisher_ratio_with_precs(mu1, Lambda1, mu2, Lambda2):

3 Source : exponential_family.py
with Apache License 2.0
from jsalt2019-diadet

    def elbo(self, x, u_x=None, N=1, logh=None, sample_weight=None, batch_size=None):
        if u_x is None:
            N, u_x = self.accum_suff_stats(x, sample_weight=sample_weight,
                                        batch_size=batch_size)
        if logh is None:
            logh = self.accum_logh(x, sample_weight=sample_weight)
        return logh + np.inner(u_x, self.eta) - N*self.A

    
    def eval_llk(self, x, u_x=None, mode='nat'):

3 Source : exponential_family.py
with Apache License 2.0
from jsalt2019-diadet

    def eval_llk_nat(self, x, u_x = None):
        if u_x is None:
            u_x = self.compute_suff_stats(x)
        return self.logh(x) + np.inner(u_x, self.eta) - self.A

    
    
    @staticmethod

3 Source : geometric.py
with BSD 2-Clause "Simplified" License
from krematas

def ray_plane_intersection(ray_origin, ray_dir, plane_origin, plane_normal):
    n_rays = ray_dir.shape[0]
    denom = np.inner(plane_normal, ray_dir)
    p0l0 = plane_origin - ray_origin
    t = np.divide(np.inner(p0l0, plane_normal), denom)
    point3d = np.tile(ray_origin, (n_rays, 1)) + np.multiply(np.tile(t, (3, 1)).T, ray_dir)
    return point3d


def ray_triangle_intersection_vec(ray_origin, ray_dir, v1, v2, v3):

3 Source : KNN_classification.py
with Apache License 2.0
from lijinmin1

def cal_distance(vec1,vec2,cos=False,n=2):
    '''
    计算两个向量之间的距离,默认为欧式距离,可以调整改变阶数, cos为True是计算余弦相似度
    :return:返回一个距离值
    '''
    if not cos:
        return np.sum((vec1-vec2)**2)**0.5
    else:
        num = np.inner(vec1,vec2)   # 若为行向量则 A * B.T
        denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)
        if denom==0:
            denom=0.001
        cos = num / denom  # 余弦值
        return 1-cos

def KNN_predict(k,word_list,train_met,test_met,train_df):

3 Source : KNN_regression.py
with Apache License 2.0
from lijinmin1

def cal_distance(vec1,vec2,cos=False,n=2):
    '''
    计算两个向量之间的距离,默认为欧式距离,可以调整改变阶数,也可以换成余弦相似度
    :return:返回一个距离值
    '''
    if not cos:
        return np.sum((vec1-vec2)**2)**0.5
    else:
        num = np.inner(vec1,vec2)  # 若为行向量则 A * B.T
        denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)
        if denom==0:
            denom=0.001
        cos = num / denom  # 余弦值
        return 1-cos


def normalize(target_list):

3 Source : RT_transform.py
with Apache License 2.0
from liyi14

def calc_rt_dist_q(Rq_src, Rq_tgt, T_src, T_tgt):

    rd_rad = np.arccos(np.inner(Rq_src, Rq_tgt) ** 2 * 2 - 1)
    rd_deg = rd_rad / pi * 180
    td = LA.norm(T_tgt - T_src)
    return rd_deg, td


def calc_rt_dist_m(pose_src, pose_tgt):

3 Source : filter_item_by_word_similarity.py
with MIT License
from m3dev

    def run(self):
        word2items = self.load('word2items')
        word2embedding = self.load('word2embedding')
        item2title_embedding = self.load('item2title_embedding')

        filtered_word2items = defaultdict(list)
        for word, items in word2items.items():
            word_embedding = word2embedding[word]
            for item in items:
                title_embedding = item2title_embedding[item]
                if np.inner(word_embedding, title_embedding) > self.no_below:
                    filtered_word2items[word].append(item)

        self.dump(dict(filtered_word2items))

3 Source : spline.py
with GNU Lesser General Public License v3.0
from meco-group

    def __call__(self, x):
        """Evaluate TensorBSpline
        There still seems to be something wrong here...
        """
        s = np.inner(self.basis[-1](x[-1]).toarray(), self.coeffs)
        for i in reversed(list(range(self.dims() - 1))):
            s = np.inner(self.basis[i](x[i]).toarray(), s)
        return s

    def __add__(self, other):

3 Source : spline.py
with GNU Lesser General Public License v3.0
from meco-group

    def integral(self):
        """Returns the value of the integral over the support.

        This is a literal implementation of formula X.33 from deBoor and
        assumes that at x = knots[-1], only the last basis function is active
        """
        knots = [b.knots for b in self.basis]
        coeffs = self.coeffs
        deg = [b.degree for b in self.basis]
        K = [(k[d + 1:] - k[:-(d + 1)]) / (d + 1)
             for (k, d) in zip(knots, deg)]
        if self.dims() == 2: #get_module(self.coeffs) in ['cvxpy', 'cvxopt']:
            i = cvxopt.matrix(K[0]).T * self.coeffs * cvxopt.matrix(K[1])
            return i
        i = np.inner(K[-1], coeffs)
        for ki in K[:-1]:
            i = np.inner(ki, i)
        return i

3 Source : _linalg.py
with MIT License
from mhostetter

def inner(a, b):
    """
    https://numpy.org/doc/stable/reference/generated/numpy.inner.html#numpy.inner
    """
    if not type(a) is type(b):
        raise TypeError(f"Operation 'inner' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")

    if type(a).is_prime_field:
        return _lapack_linalg(a, b, np.inner)

    if a.ndim == 0 or b.ndim == 0:
        return a * b
    if not a.shape[-1] == b.shape[-1]:
        raise ValueError(f"Operation 'inner' requires `a` and `b` to have the same last dimension, not {a.shape} and {b.shape}.")

    return np.sum(a * b, axis=-1)


def outer(a, b, out=None):  # pylint: disable=unused-argument

3 Source : test_linalg.py
with MIT License
from mhostetter

def test_inner_exceptions():
    with pytest.raises(TypeError):
        a = galois.GF(2**4).Random(5)
        b = galois.GF(2**5).Random(5)
        np.inner(a, b)
    with pytest.raises(ValueError):
        a = galois.GF(2**4).Random((3,4))
        b = galois.GF(2**4).Random((3,5))
        np.inner(a, b)


def test_inner_scalar_scalar(field):

3 Source : test_linalg.py
with MIT License
from mhostetter

def test_inner_scalar_scalar(field):
    dtype = random.choice(field.dtypes)
    a = field.Random(dtype=dtype)
    b = field.Random(dtype=dtype)
    c = np.inner(a, b)
    assert type(c) is field
    assert c.dtype == dtype
    assert c == a*b


def test_inner_vector_vector(field):

3 Source : test_linalg.py
with MIT License
from mhostetter

def test_inner_vector_vector(field):
    dtype = random.choice(field.dtypes)
    a = field.Random(3, dtype=dtype)
    b = field.Random(3, dtype=dtype)
    c = np.inner(a, b)
    assert type(c) is field
    assert c.dtype == dtype
    assert array_equal(c, np.sum(a * b))


def test_outer_exceptions():

3 Source : nonce.py
with MIT License
from NLPrinceton

def rank_nonces(w2v, nonces, vectors):
  ranks = []
  SRR = 0.0
  for nonce, vector in zip(nonces, vectors):
    vector /= norm(vector)
    sim = np.inner(w2v[nonce], vector)
    r = sum(np.inner(v, vector)>sim for v in w2v.values())+1
    SRR += 1.0/r
    ranks.append(r)
  med = np.median(ranks)
  write('\rMedian='+str(med)+'; MRR='+str(SRR/len(nonces))+'\n')


def nonces(model, w2v, C, X, words, counts):

3 Source : nonce.py
with MIT License
from NLPrinceton

def eval_chimeras(w2v, probelists, ratlists, vectors):
  rhos = []
  for probes, ratings, vector in zip(probelists, ratlists, vectors):
    sims = []
    vector /= norm(vector)
    for i, probe in enumerate(probes):
      try:
        sims.append(np.inner(w2v[probe]/norm(w2v[probe]), vector))
      except KeyError:
        ratings = ratings[:i]+ratings[i+1:]
    rhos.append(spearmanr(ratings, sims))
  write('\ravg rho='+str(np.mean(rhos))+'\n')


def chimeras(model, w2v, C, X, counts):

3 Source : synset.py
with MIT License
from NLPrinceton

def cossim(u, v):
  normu = norm(u)
  if normu:
    normv = norm(v)
    if normv:
      return np.inner(u, v)/normu/normv
  return 0.0


def wordnet():

3 Source : GSASIIlattice.py
with MIT License
from pedrobcst

def prodMGMT(G,Mat):
    '''Transform metric tensor by matrix
    
    :param G: array metric tensor
    :param Mat: array transformation matrix
    :return: array new metric tensor
    
    '''
    return np.inner(np.inner(Mat,G),Mat)        #right
#    return np.inner(Mat,np.inner(Mat,G))       #right
#    return np.inner(np.inner(G,Mat).T,Mat)      #right
#    return np.inner(Mat,np.inner(G,Mat).T)     #right
    
def TransformCell(cell,Trans):

3 Source : GSASIIlattice.py
with MIT License
from pedrobcst

def HKL2SpAng(H,cell,SGData):
    """Computes spherical coords for hkls; view along 001

    :param array H: arrays of hkl
    :param tuple cell: a,b,c, alpha, beta, gamma (degrees)
    :param dict SGData: space group dictionary
    :returns: arrays of r,phi,psi (radius,inclination,azimuth) about 001 
    """
    A,B = cell2AB(cell)
    xH = np.inner(B.T,H)
    r = np.sqrt(np.sum(xH**2,axis=0))
    phi = acosd(xH[2]/r)
    psi = atan2d(xH[1],xH[0])
    phi = np.where(phi>90.,180.-phi,phi)
#    GSASIIpath.IPyBreak()
    return r,phi,psi
    
def U6toUij(U6):

3 Source : GSASIIlattice.py
with MIT License
from pedrobcst

def CosAngle(U,V,G):
    """ calculate cos of angle between U & V in generalized coordinates 
    defined by metric tensor G

    :param U: 3-vectors assume numpy arrays, can be multiple reflections as (N,3) array
    :param V: 3-vectors assume numpy arrays, only as (3) vector
    :param G: metric tensor for U & V defined space assume numpy array
    :returns:
        cos(phi)
    """
    u = (U.T/np.sqrt(np.sum(np.inner(U,G)*U,axis=1))).T
    v = V/np.sqrt(np.inner(V,np.inner(G,V)))
    cosP = np.inner(u,np.inner(G,v))
    return cosP

def CosSinAngle(U,V,G):

3 Source : GSASIIlattice.py
with MIT License
from pedrobcst

def CosSinAngle(U,V,G):
    """ calculate sin & cos of angle between U & V in generalized coordinates 
    defined by metric tensor G

    :param U: 3-vectors assume numpy arrays
    :param V: 3-vectors assume numpy arrays
    :param G: metric tensor for U & V defined space assume numpy array
    :returns:
        cos(phi) & sin(phi)
    """
    u = U/np.sqrt(np.inner(U,np.inner(G,U)))
    v = V/np.sqrt(np.inner(V,np.inner(G,V)))
    cosP = np.inner(u,np.inner(G,v))
    sinP = np.sqrt(max(0.0,1.0-cosP**2))
    return cosP,sinP
    
def criticalEllipse(prob):

See More Examples