numpy.testing.assert_array_almost_equal

Here are the examples of the python api numpy.testing.assert_array_almost_equal taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

174 Examples 7

Example 151

Project: FaST-LMM Source File: lmm.py
    def nLLeval_test(self, y_test, beta, h2=0.0, logdelta=None, delta=None, sigma2=1.0, Kstar_star=None, robust=False):
        """
        compute out-of-sample log-likelihood

        robust: boolean
                indicates if eigenvalues will be truncated at 1E-9 or 1E-4. The former (default) one was used in FastLMMC,
                but may lead to numerically unstable solutions.
        """
        assert y_test.ndim == 1, "y_test should have 1 dimension"
        mu = self.predictMean(beta, h2=h2, logdelta=logdelta, delta=delta)
        res = y_test - mu

        sigma = self.predictVariance(h2=h2, logdelta=logdelta, delta=delta, sigma2=sigma2, Kstar_star=Kstar_star)

        #TODO: benchmark, record speed difference
        """
        # efficient computation of: (y - mu)^T sigma2^{-1} (y - mu)
        # Solve the linear system x = (L L^T)^-1 res

        try:
            L = SP.linalg.cho_factor(sigma)
            res_sig = SP.linalg.cho_solve(L, res)
            logdetK = NP.linalg.slogdet(sigma)[1]

        except Exception, detail:
            print "Cholesky failed, using eigen-value decomposition!"
        """

        [S_,U_] = LA.eigh(sigma)

        if robust:
            S_nonz=(S_>1E-4)
        else:
            S_nonz=(S_>1E-9)
        assert sum(S_nonz) > 0, "Some eigenvalues should be nonzero"
        S = S_[S_nonz]
        U = U_[:, S_nonz]
        Sdi = 1 / S

        res_sig = res.T.dot(Sdi * U).dot(U.T)
        logdetK = SP.log(S).sum()

        # some sanity checks
        if False:
            res_sig3 = SP.linalg.pinv(sigma).dot(res)
            NP.testing.assert_array_almost_equal(res_sig, res_sig3, decimal=2)

        # see Carl Rasmussen's book on GPs, equation 5.10, or 
        term1 = -0.5 * logdetK
        term2 = -0.5 * SP.dot(res_sig.reshape(-1).T, res.reshape(-1)) #Change the inputs to the functions so that these are vectors, not 1xn,nx1
        term3 = -0.5 * len(res) * SP.log(2 * SP.pi)

        if term2 < -10000:
            logging.warning("looks like nLLeval_test is running into numerical difficulties")

            SC = S.copy()
            SC.sort()

            logging.warning(["delta:", delta, "log det", logdetK, "term 2", term2, "term 3:", term3 ])
            logging.warning(["largest eigv:", SC[-1], "second largest eigv:", SC[-2], "smallest eigv:", SC[0] ])
            logging.warning(["ratio 1large/2large:", SC[-1]/SC[-2], "ratio lrg/small:", SC[-1]/SC[0] ])
        
        neg_log_likelihood = -(term1 + term2 + term3)

        return neg_log_likelihood

Example 152

Project: neural-network-animation Source File: test_triangulation.py
def test_triinterp():
    # Test points within triangles of masked triangulation.
    x, y = np.meshgrid(np.arange(4), np.arange(4))
    x = x.ravel()
    y = y.ravel()
    z = 1.23*x - 4.79*y
    triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
                 [3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
                 [6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
                 [10, 14, 13], [10, 11, 14], [11, 15, 14]]
    mask = np.zeros(len(triangles))
    mask[8:10] = 1
    triang = mtri.Triangulation(x, y, triangles, mask)
    linear_interp = mtri.LinearTriInterpolator(triang, z)
    cubic_min_E = mtri.CubicTriInterpolator(triang, z)
    cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')

    xs = np.linspace(0.25, 2.75, 6)
    ys = [0.25, 0.75, 2.25, 2.75]
    xs, ys = np.meshgrid(xs, ys)  # Testing arrays with array.ndim = 2
    for interp in (linear_interp, cubic_min_E, cubic_geom):
        zs = interp(xs, ys)
        assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))

    # Test points outside triangulation.
    xs = [-0.25, 1.25, 1.75, 3.25]
    ys = xs
    xs, ys = np.meshgrid(xs, ys)
    for interp in (linear_interp, cubic_min_E, cubic_geom):
        zs = linear_interp(xs, ys)
        assert_array_equal(zs.mask, [[True]*4]*4)

    # Test mixed configuration (outside / inside).
    xs = np.linspace(0.25, 1.75, 6)
    ys = [0.25, 0.75, 1.25, 1.75]
    xs, ys = np.meshgrid(xs, ys)
    for interp in (linear_interp, cubic_min_E, cubic_geom):
        zs = interp(xs, ys)
        matest.assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
        mask = (xs >= 1) * (xs <= 2) * (ys >= 1) * (ys <= 2)
        assert_array_equal(zs.mask, mask)

    # 2nd order patch test: on a grid with an 'arbitrary shaped' triangle,
    # patch test shall be exact for quadratic functions and cubic
    # interpolator if *kind* = user
    (a, b, c) = (1.23, -4.79, 0.6)

    def quad(x, y):
        return a*(x-0.5)**2 + b*(y-0.5)**2 + c*x*y

    def gradient_quad(x, y):
        return (2*a*(x-0.5) + c*y, 2*b*(y-0.5) + c*x)

    x = np.array([0.2, 0.33367, 0.669, 0., 1., 1., 0.])
    y = np.array([0.3, 0.80755, 0.4335, 0., 0., 1., 1.])
    triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
                          [1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
    triang = mtri.Triangulation(x, y, triangles)
    z = quad(x, y)
    dz = gradient_quad(x, y)
    # test points for 2nd order patch test
    xs = np.linspace(0., 1., 5)
    ys = np.linspace(0., 1., 5)
    xs, ys = np.meshgrid(xs, ys)
    cubic_user = mtri.CubicTriInterpolator(triang, z, kind='user', dz=dz)
    interp_zs = cubic_user(xs, ys)
    assert_array_almost_equal(interp_zs, quad(xs, ys))
    (interp_dzsdx, interp_dzsdy) = cubic_user.gradient(x, y)
    (dzsdx, dzsdy) = gradient_quad(x, y)
    assert_array_almost_equal(interp_dzsdx, dzsdx)
    assert_array_almost_equal(interp_dzsdy, dzsdy)

    # Cubic improvement: cubic interpolation shall perform better than linear
    # on a sufficiently dense mesh for a quadratic function.
    n = 11
    x, y = np.meshgrid(np.linspace(0., 1., n+1), np.linspace(0., 1., n+1))
    x = x.ravel()
    y = y.ravel()
    z = quad(x, y)
    triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
    xs, ys = np.meshgrid(np.linspace(0.1, 0.9, 5), np.linspace(0.1, 0.9, 5))
    xs = xs.ravel()
    ys = ys.ravel()
    linear_interp = mtri.LinearTriInterpolator(triang, z)
    cubic_min_E = mtri.CubicTriInterpolator(triang, z)
    cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
    zs = quad(xs, ys)
    diff_lin = np.abs(linear_interp(xs, ys) - zs)
    for interp in (cubic_min_E, cubic_geom):
        diff_cubic = np.abs(interp(xs, ys) - zs)
        assert(np.max(diff_lin) >= 10.*np.max(diff_cubic))
        assert(np.dot(diff_lin, diff_lin) >=
               100.*np.dot(diff_cubic, diff_cubic))

Example 153

Project: neural-network-animation Source File: test_triangulation.py
def test_triinterpcubic_C1_continuity():
    # Below the 4 tests which demonstrate C1 continuity of the
    # TriCubicInterpolator (testing the cubic shape functions on arbitrary
    # triangle):
    #
    # 1) Testing continuity of function & derivatives at corner for all 9
    #    shape functions. Testing also function values at same location.
    # 2) Testing C1 continuity along each edge (as gradient is polynomial of
    #    2nd order, it is sufficient to test at the middle).
    # 3) Testing C1 continuity at triangle barycenter (where the 3 subtriangles
    #    meet)
    # 4) Testing C1 continuity at median 1/3 points (midside between 2
    #    subtriangles)

    # Utility test function check_continuity
    def check_continuity(interpolator, loc, values=None):
        """
        Checks the continuity of interpolator (and its derivatives) near
        location loc. Can check the value at loc itself if *values* is
        provided.

        *interpolator* TriInterpolator
        *loc* location to test (x0, y0)
        *values* (optional) array [z0, dzx0, dzy0] to check the value at *loc*
        """
        n_star = 24       # Number of continuity points in a boundary of loc
        epsilon = 1.e-10  # Distance for loc boundary
        k = 100.          # Continuity coefficient
        (loc_x, loc_y) = loc
        star_x = loc_x + epsilon*np.cos(np.linspace(0., 2*np.pi, n_star))
        star_y = loc_y + epsilon*np.sin(np.linspace(0., 2*np.pi, n_star))
        z = interpolator([loc_x], [loc_y])[0]
        (dzx, dzy) = interpolator.gradient([loc_x], [loc_y])
        if values is not None:
            assert_array_almost_equal(z, values[0])
            assert_array_almost_equal(dzx[0], values[1])
            assert_array_almost_equal(dzy[0], values[2])
        diff_z = interpolator(star_x, star_y) - z
        (tab_dzx, tab_dzy) = interpolator.gradient(star_x, star_y)
        diff_dzx = tab_dzx - dzx
        diff_dzy = tab_dzy - dzy
        assert_array_less(diff_z, epsilon*k)
        assert_array_less(diff_dzx, epsilon*k)
        assert_array_less(diff_dzy, epsilon*k)

    # Drawing arbitrary triangle (a, b, c) inside a unit square.
    (ax, ay) = (0.2, 0.3)
    (bx, by) = (0.33367, 0.80755)
    (cx, cy) = (0.669, 0.4335)
    x = np.array([ax, bx, cx, 0., 1., 1., 0.])
    y = np.array([ay, by, cy, 0., 0., 1., 1.])
    triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
                          [1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
    triang = mtri.Triangulation(x, y, triangles)

    for idof in range(9):
        z = np.zeros(7, dtype=np.float64)
        dzx = np.zeros(7, dtype=np.float64)
        dzy = np.zeros(7, dtype=np.float64)
        values = np.zeros([3, 3], dtype=np.float64)
        case = idof//3
        values[case, idof % 3] = 1.0
        if case == 0:
            z[idof] = 1.0
        elif case == 1:
            dzx[idof % 3] = 1.0
        elif case == 2:
            dzy[idof % 3] = 1.0
        interp = mtri.CubicTriInterpolator(triang, z, kind='user',
                                           dz=(dzx, dzy))
        # Test 1) Checking values and continuity at nodes
        check_continuity(interp, (ax, ay), values[:, 0])
        check_continuity(interp, (bx, by), values[:, 1])
        check_continuity(interp, (cx, cy), values[:, 2])
        # Test 2) Checking continuity at midside nodes
        check_continuity(interp, ((ax+bx)*0.5, (ay+by)*0.5))
        check_continuity(interp, ((ax+cx)*0.5, (ay+cy)*0.5))
        check_continuity(interp, ((cx+bx)*0.5, (cy+by)*0.5))
        # Test 3) Checking continuity at barycenter
        check_continuity(interp, ((ax+bx+cx)/3., (ay+by+cy)/3.))
        # Test 4) Checking continuity at median 1/3-point
        check_continuity(interp, ((4.*ax+bx+cx)/6., (4.*ay+by+cy)/6.))
        check_continuity(interp, ((ax+4.*bx+cx)/6., (ay+4.*by+cy)/6.))
        check_continuity(interp, ((ax+bx+4.*cx)/6., (ay+by+4.*cy)/6.))

Example 154

Project: neural-network-animation Source File: test_triangulation.py
def test_triinterpcubic_cg_solver():
    # Now 3 basic tests of the Sparse CG solver, used for
    # TriCubicInterpolator with *kind* = 'min_E'
    # 1) A commonly used test involves a 2d Poisson matrix.
    def poisson_sparse_matrix(n, m):
        """
        Sparse Poisson matrix.

        Returns the sparse matrix in coo format resulting from the
        discretisation of the 2-dimensional Poisson equation according to a
        finite difference numerical scheme on a uniform (n, m) grid.
        Size of the matrix: (n*m, n*m)
        """
        l = m*n
        rows = np.concatenate([
            np.arange(l, dtype=np.int32),
            np.arange(l-1, dtype=np.int32), np.arange(1, l, dtype=np.int32),
            np.arange(l-n, dtype=np.int32), np.arange(n, l, dtype=np.int32)])
        cols = np.concatenate([
            np.arange(l, dtype=np.int32),
            np.arange(1, l, dtype=np.int32), np.arange(l-1, dtype=np.int32),
            np.arange(n, l, dtype=np.int32), np.arange(l-n, dtype=np.int32)])
        vals = np.concatenate([
            4*np.ones(l, dtype=np.float64),
            -np.ones(l-1, dtype=np.float64), -np.ones(l-1, dtype=np.float64),
            -np.ones(l-n, dtype=np.float64), -np.ones(l-n, dtype=np.float64)])
        # In fact +1 and -1 diags have some zeros
        vals[l:2*l-1][m-1::m] = 0.
        vals[2*l-1:3*l-2][m-1::m] = 0.
        return vals, rows, cols, (n*m, n*m)

    # Instantiating a sparse Poisson matrix of size 48 x 48:
    (n, m) = (12, 4)
    mat = mtri.triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m))
    mat.compress_csc()
    mat_dense = mat.to_dense()
    # Testing a sparse solve for all 48 basis vector
    for itest in range(n*m):
        b = np.zeros(n*m, dtype=np.float64)
        b[itest] = 1.
        x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m),
                                       tol=1.e-10)
        assert_array_almost_equal(np.dot(mat_dense, x), b)

    # 2) Same matrix with inserting 2 rows - cols with null diag terms
    # (but still linked with the rest of the matrix by extra-diag terms)
    (i_zero, j_zero) = (12, 49)
    vals, rows, cols, _ = poisson_sparse_matrix(n, m)
    rows = rows + 1*(rows >= i_zero) + 1*(rows >= j_zero)
    cols = cols + 1*(cols >= i_zero) + 1*(cols >= j_zero)
    # adding extra-diag terms
    rows = np.concatenate([rows, [i_zero, i_zero-1, j_zero, j_zero-1]])
    cols = np.concatenate([cols, [i_zero-1, i_zero, j_zero-1, j_zero]])
    vals = np.concatenate([vals, [1., 1., 1., 1.]])
    mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols,
                                                 (n*m + 2, n*m + 2))
    mat.compress_csc()
    mat_dense = mat.to_dense()
    # Testing a sparse solve for all 50 basis vec
    for itest in range(n*m + 2):
        b = np.zeros(n*m + 2, dtype=np.float64)
        b[itest] = 1.
        x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.ones(n*m + 2),
                                       tol=1.e-10)
        assert_array_almost_equal(np.dot(mat_dense, x), b)

    # 3) Now a simple test that summation of duplicate (i.e. with same rows,
    # same cols) entries occurs when compressed.
    vals = np.ones(17, dtype=np.float64)
    rows = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],
                    dtype=np.int32)
    cols = np.array([0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
                    dtype=np.int32)
    dim = (3, 3)
    mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim)
    mat.compress_csc()
    mat_dense = mat.to_dense()
    assert_array_almost_equal(mat_dense, np.array([
        [1., 2., 0.], [2., 1., 5.], [0., 5., 1.]], dtype=np.float64))

Example 155

Project: mne-python Source File: test_spectral.py
@slow_test
def test_spectral_connectivity():
    """Test frequency-domain connectivity methods"""
    # Use a case known to have no spurious correlations (it would bad if
    # nosetests could randomly fail):
    np.random.seed(0)

    sfreq = 50.
    n_signals = 3
    n_epochs = 8
    n_times = 256

    tmin = 0.
    tmax = (n_times - 1) / sfreq
    data = np.random.randn(n_epochs, n_signals, n_times)
    times_data = np.linspace(tmin, tmax, n_times)
    # simulate connectivity from 5Hz..15Hz
    fstart, fend = 5.0, 15.0
    for i in range(n_epochs):
        data[i, 1, :] = filter_data(data[i, 0, :], sfreq, fstart, fend,
                                    **filt_kwargs)
        # add some noise, so the spectrum is not exactly zero
        data[i, 1, :] += 1e-2 * np.random.randn(n_times)

    # First we test some invalid parameters:
    assert_raises(ValueError, spectral_connectivity, data, method='notamethod')
    assert_raises(ValueError, spectral_connectivity, data,
                  mode='notamode')

    # test invalid fmin fmax settings
    assert_raises(ValueError, spectral_connectivity, data, fmin=10,
                  fmax=10 + 0.5 * (sfreq / float(n_times)))
    assert_raises(ValueError, spectral_connectivity, data, fmin=10, fmax=5)
    assert_raises(ValueError, spectral_connectivity, data, fmin=(0, 11),
                  fmax=(5, 10))
    assert_raises(ValueError, spectral_connectivity, data, fmin=(11,),
                  fmax=(12, 15))

    methods = ['coh', 'cohy', 'imcoh', ['plv', 'ppc', 'pli', 'pli2_unbiased',
               'wpli', 'wpli2_debiased', 'coh']]

    modes = ['multitaper', 'fourier', 'cwt_morlet']

    # define some frequencies for cwt
    cwt_frequencies = np.arange(3, 24.5, 1)

    for mode in modes:
        for method in methods:
            if method == 'coh' and mode == 'multitaper':
                # only check adaptive estimation for coh to reduce test time
                check_adaptive = [False, True]
            else:
                check_adaptive = [False]

            if method == 'coh' and mode == 'cwt_morlet':
                # so we also test using an array for num cycles
                cwt_n_cycles = 7. * np.ones(len(cwt_frequencies))
            else:
                cwt_n_cycles = 7.

            for adaptive in check_adaptive:

                if adaptive:
                    mt_bandwidth = 1.
                else:
                    mt_bandwidth = None

                con, freqs, times, n, _ = spectral_connectivity(
                    data, method=method, mode=mode, indices=None, sfreq=sfreq,
                    mt_adaptive=adaptive, mt_low_bias=True,
                    mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
                    cwt_n_cycles=cwt_n_cycles)

                assert_true(n == n_epochs)
                assert_array_almost_equal(times_data, times)

                if mode == 'multitaper':
                    upper_t = 0.95
                    lower_t = 0.5
                elif mode == 'fourier':
                    # other estimates have higher variance
                    upper_t = 0.8
                    lower_t = 0.75
                else:  # cwt_morlet
                    upper_t = 0.64
                    lower_t = 0.63

                # test the simulated signal
                if method == 'coh':
                    idx = np.searchsorted(freqs, (fstart + trans_bandwidth,
                                                  fend - trans_bandwidth))
                    # we see something for zero-lag
                    assert_true(np.all(con[1, 0, idx[0]:idx[1]] > upper_t),
                                con[1, 0, idx[0]:idx[1]].min())

                    if mode != 'cwt_morlet':
                        idx = np.searchsorted(freqs,
                                              (fstart - trans_bandwidth * 2,
                                               fend + trans_bandwidth * 2))
                        assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
                        assert_true(np.all(con[1, 0, idx[1]:] < lower_t),
                                    con[1, 0, idx[1:]].max())
                elif method == 'cohy':
                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
                    # imaginary coh will be zero
                    check = np.imag(con[1, 0, idx[0]:idx[1]])
                    assert_true(np.all(check < lower_t), check.max())
                    # we see something for zero-lag
                    assert_true(np.all(np.abs(con[1, 0, idx[0]:idx[1]]) >
                                upper_t))

                    idx = np.searchsorted(freqs, (fstart - trans_bandwidth * 2,
                                                  fend + trans_bandwidth * 2))
                    if mode != 'cwt_morlet':
                        assert_true(np.all(np.abs(con[1, 0, :idx[0]]) <
                                    lower_t))
                        assert_true(np.all(np.abs(con[1, 0, idx[1]:]) <
                                    lower_t))
                elif method == 'imcoh':
                    idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
                    # imaginary coh will be zero
                    assert_true(np.all(con[1, 0, idx[0]:idx[1]] < lower_t))
                    idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
                    assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
                    assert_true(np.all(con[1, 0, idx[1]:] < lower_t),
                                con[1, 0, idx[1]:].max())

                # compute same connections using indices and 2 jobs
                indices = np.tril_indices(n_signals, -1)

                if not isinstance(method, list):
                    test_methods = (method, _CohEst)
                else:
                    test_methods = method

                stc_data = _stc_gen(data, sfreq, tmin)
                con2, freqs2, times2, n2, _ = spectral_connectivity(
                    stc_data, method=test_methods, mode=mode, indices=indices,
                    sfreq=sfreq, mt_adaptive=adaptive, mt_low_bias=True,
                    mt_bandwidth=mt_bandwidth, tmin=tmin, tmax=tmax,
                    cwt_frequencies=cwt_frequencies,
                    cwt_n_cycles=cwt_n_cycles, n_jobs=2)

                assert_true(isinstance(con2, list))
                assert_true(len(con2) == len(test_methods))

                if method == 'coh':
                    assert_array_almost_equal(con2[0], con2[1])

                if not isinstance(method, list):
                    con2 = con2[0]  # only keep the first method

                    # we get the same result for the probed connections
                    assert_array_almost_equal(freqs, freqs2)
                    assert_array_almost_equal(con[indices], con2)
                    assert_true(n == n2)
                    assert_array_almost_equal(times_data, times2)
                else:
                    # we get the same result for the probed connections
                    assert_true(len(con) == len(con2))
                    for c, c2 in zip(con, con2):
                        assert_array_almost_equal(freqs, freqs2)
                        assert_array_almost_equal(c[indices], c2)
                        assert_true(n == n2)
                        assert_array_almost_equal(times_data, times2)

                # compute same connections for two bands, fskip=1, and f. avg.
                fmin = (5., 15.)
                fmax = (15., 30.)
                con3, freqs3, times3, n3, _ = spectral_connectivity(
                    data, method=method, mode=mode, indices=indices,
                    sfreq=sfreq, fmin=fmin, fmax=fmax, fskip=1, faverage=True,
                    mt_adaptive=adaptive, mt_low_bias=True,
                    mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
                    cwt_n_cycles=cwt_n_cycles)

                assert_true(isinstance(freqs3, list))
                assert_true(len(freqs3) == len(fmin))
                for i in range(len(freqs3)):
                    assert_true(np.all((freqs3[i] >= fmin[i]) &
                                       (freqs3[i] <= fmax[i])))

                # average con2 "manually" and we get the same result
                if not isinstance(method, list):
                    for i in range(len(freqs3)):
                        freq_idx = np.searchsorted(freqs2, freqs3[i])
                        con2_avg = np.mean(con2[:, freq_idx], axis=1)
                        assert_array_almost_equal(con2_avg, con3[:, i])
                else:
                    for j in range(len(con2)):
                        for i in range(len(freqs3)):
                            freq_idx = np.searchsorted(freqs2, freqs3[i])
                            con2_avg = np.mean(con2[j][:, freq_idx], axis=1)
                            assert_array_almost_equal(con2_avg, con3[j][:, i])

Example 156

Project: mne-python Source File: test_ica.py
@slow_test
@requires_sklearn
def test_ica_additional():
    """Test additional ICA functionality."""
    import matplotlib.pyplot as plt
    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None, random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=True, exclude='bads')
    epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
                        baseline=(None, 0), preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    template = _get_ica_map(ica)[0]
    corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4,
              n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert_true(np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
                                        ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4, 20, l_trans_bandwidth='auto',
                       h_trans_bandwidth='auto', filter_length='auto',
                       phase='zero', fir_window='hamming')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10], filter_length='auto', trans_bandwidth=10,
                             phase='zero', fir_window='hamming')
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                     'pca_mean_', 'pca_explained_variance_',
                     '_pre_whitener']:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw, target='EOG 061', score_func=func,
                                   start=0, stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw,
                  target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
                             eog_ch=ch_name, skew_criterion=idx,
                             var_criterion=idx, kurt_criterion=idx)
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        ica.labels_ = None
        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
                      method='ctps')
        assert_raises(ValueError, ica.find_bads_ecg, raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog, target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs,
                  target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw, target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw, target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_true(len(ica_raw._filenames) == 0)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)

Example 157

Project: mne-python Source File: test_cluster_level.py
def test_cluster_permutation_with_connectivity():
    """Test cluster level permutations with connectivity matrix
    """
    try:
        try:
            from sklearn.feature_extraction.image import grid_to_graph
        except ImportError:
            from scikits.learn.feature_extraction.image import grid_to_graph
    except ImportError:
        return
    condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
        _get_conditions()

    n_pts = condition1_1d.shape[1]
    # we don't care about p-values in any of these, so do fewer permutations
    args = dict(seed=None, max_step=1, exclude=None,
                step_down_p=0, t_power=1, threshold=1.67,
                check_disjoint=False, n_permutations=50)

    did_warn = False
    for X1d, X2d, func, spatio_temporal_func in \
            [(condition1_1d, condition1_2d,
              permutation_cluster_1samp_test,
              spatio_temporal_cluster_1samp_test),
             ([condition1_1d, condition2_1d],
              [condition1_2d, condition2_2d],
              permutation_cluster_test,
              spatio_temporal_cluster_test)]:
        out = func(X1d, **args)
        connectivity = grid_to_graph(1, n_pts)
        out_connectivity = func(X1d, connectivity=connectivity, **args)
        assert_array_equal(out[0], out_connectivity[0])
        for a, b in zip(out_connectivity[1], out[1]):
            assert_array_equal(out[0][a], out[0][b])
            assert_true(np.all(a[b]))

        # test spatio-temporal w/o time connectivity (repeat spatial pattern)
        connectivity_2 = sparse.coo_matrix(
            linalg.block_diag(connectivity.asfptype().todense(),
                              connectivity.asfptype().todense()))

        if isinstance(X1d, list):
            X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d]
        else:
            X1d_2 = np.concatenate((X1d, X1d), axis=1)

        out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args)
        # make sure we were operating on the same values
        split = len(out[0])
        assert_array_equal(out[0], out_connectivity_2[0][:split])
        assert_array_equal(out[0], out_connectivity_2[0][split:])

        # make sure we really got 2x the number of original clusters
        n_clust_orig = len(out[1])
        assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig)

        # Make sure that we got the old ones back
        data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
        data_2 = set([np.sum(out_connectivity_2[0][a]) for a in
                     out_connectivity_2[1][:]])
        assert_true(len(data_1.intersection(data_2)) == len(data_1))

        # now use the other algorithm
        if isinstance(X1d, list):
            X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2]
        else:
            X1d_3 = np.reshape(X1d_2, (-1, 2, n_space))

        out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50,
                                                  connectivity=connectivity,
                                                  max_step=0, threshold=1.67,
                                                  check_disjoint=True)
        # make sure we were operating on the same values
        split = len(out[0])
        assert_array_equal(out[0], out_connectivity_3[0][0])
        assert_array_equal(out[0], out_connectivity_3[0][1])

        # make sure we really got 2x the number of original clusters
        assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig)

        # Make sure that we got the old ones back
        data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
        data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in
                     out_connectivity_3[1]])
        assert_true(len(data_1.intersection(data_2)) == len(data_1))

        # test new versus old method
        out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50,
                                                  connectivity=connectivity,
                                                  max_step=2, threshold=1.67)
        out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50,
                                                  connectivity=connectivity,
                                                  max_step=1, threshold=1.67)

        # clusters could be in a different order
        sums_4 = [np.sum(out_connectivity_4[0][a])
                  for a in out_connectivity_4[1]]
        sums_5 = [np.sum(out_connectivity_4[0][a])
                  for a in out_connectivity_5[1]]
        sums_4 = np.sort(sums_4)
        sums_5 = np.sort(sums_5)
        assert_array_almost_equal(sums_4, sums_5)

        if not _force_serial:
            assert_raises(ValueError, spatio_temporal_func, X1d_3,
                          n_permutations=1, connectivity=connectivity,
                          max_step=1, threshold=1.67, n_jobs=-1000)

        # not enough TFCE params
        assert_raises(KeyError, spatio_temporal_func, X1d_3,
                      connectivity=connectivity, threshold=dict(me='hello'))

        # too extreme a start threshold
        with warnings.catch_warnings(record=True) as w:
            spatio_temporal_func(X1d_3, connectivity=connectivity,
                                 threshold=dict(start=10, step=1))
        if not did_warn:
            assert_true(len(w) == 1)
            did_warn = True

        # too extreme a start threshold
        assert_raises(ValueError, spatio_temporal_func, X1d_3,
                      connectivity=connectivity, tail=-1,
                      threshold=dict(start=1, step=-1))
        assert_raises(ValueError, spatio_temporal_func, X1d_3,
                      connectivity=connectivity, tail=-1,
                      threshold=dict(start=-1, step=1))

        # wrong type for threshold
        assert_raises(TypeError, spatio_temporal_func, X1d_3,
                      connectivity=connectivity, threshold=[])

        # wrong value for tail
        assert_raises(ValueError, spatio_temporal_func, X1d_3,
                      connectivity=connectivity, tail=2)

        # make sure it actually found a significant point
        out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50,
                                                  connectivity=connectivity,
                                                  max_step=1,
                                                  threshold=dict(start=1,
                                                                 step=1))
        assert_true(np.min(out_connectivity_6[2]) < 0.05)

Example 158

Project: mne-python Source File: test_event.py
def test_find_events():
    """Test find events in raw file."""
    events = read_events(fname)
    raw = read_raw_fif(raw_fname, preload=True)
    # let's test the defaulting behavior while we're at it
    extra_ends = ['', '_1']
    orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
    os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
    if 'MNE_STIM_CHANNEL_1' in os.environ:
        del os.environ['MNE_STIM_CHANNEL_1']
    events2 = find_events(raw)
    assert_array_almost_equal(events, events2)
    # now test with mask
    events11 = find_events(raw, mask=3, mask_type='not_and')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        events22 = read_events(fname, mask=3)
        assert_true(sum('events masked' in str(ww.message) for ww in w) == 1)
    assert_array_equal(events11, events22)

    # Reset some data for ease of comparison
    raw._first_samps[0] = 0
    raw.info['sfreq'] = 1000
    raw._update_times()

    stim_channel = 'STI 014'
    stim_channel_idx = pick_channels(raw.info['ch_names'],
                                     include=[stim_channel])

    # test digital masking
    raw._data[stim_channel_idx, :5] = np.arange(5)
    raw._data[stim_channel_idx, 5:] = 0
    # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'

    assert_raises(TypeError, find_events, raw, mask="0")
    assert_raises(ValueError, find_events, raw, mask=0, mask_type='blah')
    # testing mask_type. default = 'not_and'
    assert_array_equal(find_events(raw, shortest_event=1, mask=1,
                                   mask_type='not_and'),
                       [[2, 0, 2], [4, 2, 4]])
    assert_array_equal(find_events(raw, shortest_event=1, mask=2,
                                   mask_type='not_and'),
                       [[1, 0, 1], [3, 0, 1], [4, 1, 4]])
    assert_array_equal(find_events(raw, shortest_event=1, mask=3,
                                   mask_type='not_and'),
                       [[4, 0, 4]])
    assert_array_equal(find_events(raw, shortest_event=1, mask=4,
                                   mask_type='not_and'),
                       [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    # testing with mask_type = 'and'
    assert_array_equal(find_events(raw, shortest_event=1, mask=1,
                       mask_type='and'),
                       [[1, 0, 1], [3, 0, 1]])
    assert_array_equal(find_events(raw, shortest_event=1, mask=2,
                       mask_type='and'),
                       [[2, 0, 2]])
    assert_array_equal(find_events(raw, shortest_event=1, mask=3,
                       mask_type='and'),
                       [[1, 0, 1], [2, 1, 2], [3, 2, 3]])
    assert_array_equal(find_events(raw, shortest_event=1, mask=4,
                       mask_type='and'),
                       [[4, 0, 4]])

    # test empty events channel
    raw._data[stim_channel_idx, :] = 0
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, :4] = 1
    assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))

    raw._data[stim_channel_idx, -1:] = 9
    assert_array_equal(find_events(raw), [[14399, 0, 9]])

    # Test that we can handle consecutive events with no gap
    raw._data[stim_channel_idx, 10:20] = 5
    raw._data[stim_channel_idx, 20:30] = 6
    raw._data[stim_channel_idx, 30:32] = 5
    raw._data[stim_channel_idx, 40] = 6

    assert_array_equal(find_events(raw, consecutive=False),
                       [[10, 0, 5],
                        [40, 0, 6],
                        [14399, 0, 9]])
    assert_array_equal(find_events(raw, consecutive=True),
                       [[10, 0, 5],
                        [20, 5, 6],
                        [30, 6, 5],
                        [40, 0, 6],
                        [14399, 0, 9]])
    assert_array_equal(find_events(raw),
                       [[10, 0, 5],
                        [20, 5, 6],
                        [40, 0, 6],
                        [14399, 0, 9]])
    assert_array_equal(find_events(raw, output='offset', consecutive=False),
                       [[31, 0, 5],
                        [40, 0, 6],
                        [14399, 0, 9]])
    assert_array_equal(find_events(raw, output='offset', consecutive=True),
                       [[19, 6, 5],
                        [29, 5, 6],
                        [31, 0, 5],
                        [40, 0, 6],
                        [14399, 0, 9]])
    assert_raises(ValueError, find_events, raw, output='step',
                  consecutive=True)
    assert_array_equal(find_events(raw, output='step', consecutive=True,
                                   shortest_event=1),
                       [[10, 0, 5],
                        [20, 5, 6],
                        [30, 6, 5],
                        [32, 5, 0],
                        [40, 0, 6],
                        [41, 6, 0],
                        [14399, 0, 9],
                        [14400, 9, 0]])
    assert_array_equal(find_events(raw, output='offset'),
                       [[19, 6, 5],
                        [31, 0, 6],
                        [40, 0, 6],
                        [14399, 0, 9]])
    assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
                       [[10, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
                       [[10, 0, 5],
                        [20, 5, 6],
                        [30, 6, 5]])
    assert_array_equal(find_events(raw, output='offset', consecutive=False,
                                   min_duration=0.002),
                       [[31, 0, 5]])
    assert_array_equal(find_events(raw, output='offset', consecutive=True,
                                   min_duration=0.002),
                       [[19, 6, 5],
                        [29, 5, 6],
                        [31, 0, 5]])
    assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
                       [[10, 0, 5],
                        [20, 5, 6]])

    # test find_stim_steps merge parameter
    raw._data[stim_channel_idx, :] = 0
    raw._data[stim_channel_idx, 0] = 1
    raw._data[stim_channel_idx, 10] = 4
    raw._data[stim_channel_idx, 11:20] = 5
    assert_array_equal(find_stim_steps(raw, pad_start=0, merge=0,
                                       stim_channel=stim_channel),
                       [[0, 0, 1],
                        [1, 1, 0],
                        [10, 0, 4],
                        [11, 4, 5],
                        [20, 5, 0]])
    assert_array_equal(find_stim_steps(raw, merge=-1,
                                       stim_channel=stim_channel),
                       [[1, 1, 0],
                        [10, 0, 5],
                        [20, 5, 0]])
    assert_array_equal(find_stim_steps(raw, merge=1,
                                       stim_channel=stim_channel),
                       [[1, 1, 0],
                        [11, 0, 5],
                        [20, 5, 0]])

    # put back the env vars we trampled on
    for s, o in zip(extra_ends, orig_envs):
        if o is not None:
            os.environ['MNE_STIM_CHANNEL%s' % s] = o

Example 159

Project: mne-python Source File: test_tfr.py
def test_time_frequency():
    """Test the to-be-deprecated time-frequency transform (PSD and ITC)."""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = read_raw_fif(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                       stim=False, include=include, exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    # Now compute evoked
    evoked = epochs.average()
    power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
                              return_itc=False)
    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                              use_fft=True, return_itc=True, decim=slice(0, 2))
    # Test picks argument and average parameter
    assert_raises(ValueError, tfr_morlet, epochs, freqs=freqs,
                  n_cycles=n_cycles, return_itc=True, average=False)

    power_picks, itc_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=True, picks=picks, average=True)

    epochs_power_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=False, picks=picks, average=False)
    power_picks_avg = epochs_power_picks.average()
    # the actual data arrays here are equivalent, too...
    assert_array_almost_equal(power.data, power_picks.data)
    assert_array_almost_equal(power.data, power_picks_avg.data)
    assert_array_almost_equal(itc.data, itc_picks.data)
    assert_array_almost_equal(power.data, power_evoked.data)

    print(itc)  # test repr
    print(itc.ch_names)  # test property
    itc += power  # test add
    itc -= power  # test sub

    power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert_true('meg' in power)
    assert_true('grad' in power)
    assert_false('mag' in power)
    assert_false('eeg' in power)

    assert_equal(power.nave, nave)
    assert_equal(itc.nave, nave)
    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(power_.data.shape == (len(picks), len(freqs), 2))
    assert_true(power_.data.shape == itc_.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    # grand average
    itc2 = itc.copy()
    itc2.info['bads'] = [itc2.ch_names[0]]  # test channel drop
    gave = grand_average([itc2, itc])
    assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
                                   itc2.data.shape[1],
                                   itc2.data.shape[2]))
    assert_equal(itc2.ch_names[1:], gave.ch_names)
    assert_equal(gave.nave, 2)
    itc2.drop_channels(itc2.info["bads"])
    assert_array_almost_equal(gave.data, itc2.data)
    itc2.data = np.ones(itc2.data.shape)
    itc.data = np.zeros(itc.data.shape)
    itc2.nave = 2
    itc.nave = 1
    itc.drop_channels([itc.ch_names[0]])
    combined_itc = combine_tfr([itc2, itc])
    assert_array_almost_equal(combined_itc.data,
                              np.ones(combined_itc.data.shape) * 2 / 3)

    # more tests
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
                            return_itc=True)

    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,
                     return_itc=False).data[0]
    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
    tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,
                      decim=slice(0, 2), average=False,
                      return_itc=False).data[0]
    assert_true(tfr2.shape == (len(picks), len(freqs), 2))

    single_power = tfr_morlet(epochs, freqs, 2, average=False,
                              return_itc=False).data
    single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),
                               average=False, return_itc=False).data
    single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),
                               average=False, return_itc=False).data
    single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),
                               average=False, return_itc=False).data

    assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
    assert_array_almost_equal(np.mean(single_power2, axis=0),
                              power.data[:, :, :2])
    assert_array_almost_equal(np.mean(single_power3, axis=0),
                              power.data[:, :, 1:3])
    assert_array_almost_equal(np.mean(single_power4, axis=0),
                              power.data[:, :, 2:4])

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)

    # Test decimation:
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in [2, 3, 8, 9]:
        for use_fft in [True, False]:
            power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
                                    use_fft=use_fft, return_itc=True,
                                    decim=decim)
            assert_equal(power.data.shape[2],
                         np.ceil(float(len(times)) / decim))
    freqs = list(range(50, 55))
    decim = 2
    _, n_chan, n_time = data.shape
    tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,
                     return_itc=False).data[0]
    assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))

    # Test cwt modes
    Ws = morlet(512, [10, 20], n_cycles=2)
    assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
    for use_fft in [True, False]:
        for mode in ['same', 'valid', 'full']:
            # XXX JRK: full wavelet decomposition needs to be implemented
            if (not use_fft) and mode == 'full':
                assert_raises(ValueError, cwt, data[0, :, :], Ws,
                              use_fft=use_fft, mode=mode)
                continue
            cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode)

    # Test decim parameter checks
    assert_raises(TypeError, tfr_morlet, epochs, freqs=freqs,
                  n_cycles=n_cycles, use_fft=True, return_itc=True,
                  decim='decim')

Example 160

Project: elephant Source File: test_icsd.py
    def test_SplineiCSD_00(self):
        '''test using standard SI units'''
        #set some parameters for ground truth csd and csd estimates., e.g.,
        #we will use same source diameter as in ground truth
        
        #contact point coordinates
        z_j = np.arange(21)*1E-4*pq.m
        
        #source coordinates
        z_i = z_j
        
        #current source density magnitude
        C_i = np.zeros(z_i.size)*pq.A/pq.m**3
        C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
                
        #source radius (delta, step)
        R_i = np.ones(z_i.size)*1E-3*pq.m
        
        #source height (cylinder)
        h_i = np.ones(z_i.size)*1E-4*pq.m
        
        #conductivity, use same conductivity for top layer (z_j < 0)
        sigma = 0.3*pq.S/pq.m
        sigma_top = sigma

        #construct interpolators, spline method assume underlying source
        #pattern generating LFPs that are cubic spline interpolates between
        #contacts so we generate CSD data relying on the same assumption 
        f_C = interp1d(z_i, C_i, kind='cubic')
        f_R = interp1d(z_i, R_i)
        num_steps = 201
        z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
        C_i_i = f_C(np.asarray(z_i_i))*C_i.units
        R_i_i = f_R(z_i_i)*R_i.units

        h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
        
        #flag for debug plots
        plot = False

        #get LFP and CSD at contacts
        phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
                                          sigma, plot)

        spline_input = {
            'lfp' : phi_j,
            'coord_electrode' : z_j,
            'diam' : R_i*2,
            'sigma' : sigma,
            'sigma_top' : sigma,
            'num_steps' : num_steps,
            'tol' : 1E-12,          # Tolerance in numerical integration
            'f_type' : 'gaussian',
            'f_order' : (3, 1),
        }
        spline_icsd = icsd.SplineiCSD(**spline_input)
        csd = spline_icsd.get_csd()
        
        self.assertEqual(C_i.units, csd.units)
        nt.assert_array_almost_equal(C_i, csd, decimal=3)

Example 161

Project: elephant Source File: test_icsd.py
    def test_SplineiCSD_02(self):
        '''test using non-standard SI units'''
        #set some parameters for ground truth csd and csd estimates., e.g.,
        #we will use same source diameter as in ground truth
        
        #contact point coordinates
        z_j = np.arange(21)*1E-4*pq.m
        
        #source coordinates
        z_i = z_j
        
        #current source density magnitude
        C_i = np.zeros(z_i.size)*pq.A/pq.m**3
        C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
                
        #source radius (delta, step)
        R_i = np.ones(z_i.size)*1E-3*pq.m
        
        #source height (cylinder)
        h_i = np.ones(z_i.size)*1E-4*pq.m
        
        #conductivity, use same conductivity for top layer (z_j < 0)
        sigma = 0.3*pq.S/pq.m
        sigma_top = sigma

        #construct interpolators, spline method assume underlying source
        #pattern generating LFPs that are cubic spline interpolates between
        #contacts so we generate CSD data relying on the same assumption 
        f_C = interp1d(z_i, C_i, kind='cubic')
        f_R = interp1d(z_i, R_i)
        num_steps = 201
        z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
        C_i_i = f_C(np.asarray(z_i_i))*C_i.units
        R_i_i = f_R(z_i_i)*R_i.units

        h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
        
        #flag for debug plots
        plot = False

        #get LFP and CSD at contacts
        phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
                                          sigma, plot)

        spline_input = {
            'lfp' : phi_j*1E3*pq.mV/pq.V,
            'coord_electrode' : z_j,
            'diam' : R_i*2,
            'sigma' : sigma,
            'sigma_top' : sigma,
            'num_steps' : num_steps,
            'tol' : 1E-12,          # Tolerance in numerical integration
            'f_type' : 'gaussian',
            'f_order' : (3, 1),
        }
        spline_icsd = icsd.SplineiCSD(**spline_input)
        csd = spline_icsd.get_csd()
        
        self.assertEqual(C_i.units, csd.units)
        nt.assert_array_almost_equal(C_i, csd, decimal=3)

Example 162

Project: elephant Source File: test_icsd.py
    def test_SplineiCSD_03(self):
        '''test using standard SI units'''
        #set some parameters for ground truth csd and csd estimates., e.g.,
        #we will use same source diameter as in ground truth
        
        #contact point coordinates
        z_j = np.arange(21)*1E-4*pq.m
        
        #source coordinates
        z_i = z_j
        
        #current source density magnitude
        C_i = np.zeros(z_i.size)*pq.A/pq.m**3
        C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
                
        #source radius (delta, step)
        R_i = np.ones(z_i.size)*1E-3*pq.m
        
        #source height (cylinder)
        h_i = np.ones(z_i.size)*1E-4*pq.m
        
        #conductivity, use same conductivity for top layer (z_j < 0)
        sigma = 0.3*pq.S/pq.m
        sigma_top = sigma

        #construct interpolators, spline method assume underlying source
        #pattern generating LFPs that are cubic spline interpolates between
        #contacts so we generate CSD data relying on the same assumption 
        f_C = interp1d(z_i, C_i, kind='cubic')
        f_R = interp1d(z_i, R_i)
        num_steps = 201
        z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
        C_i_i = f_C(np.asarray(z_i_i))*C_i.units
        R_i_i = f_R(z_i_i)*R_i.units

        h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
        
        #flag for debug plots
        plot = False

        #get LFP and CSD at contacts
        phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
                                          sigma, plot)

        spline_input = {
            'lfp' : phi_j,
            'coord_electrode' : z_j*1E3*pq.mm/pq.m,
            'diam' : R_i*2*1E3*pq.mm/pq.m,
            'sigma' : sigma,
            'sigma_top' : sigma,
            'num_steps' : num_steps,
            'tol' : 1E-12,          # Tolerance in numerical integration
            'f_type' : 'gaussian',
            'f_order' : (3, 1),
        }
        spline_icsd = icsd.SplineiCSD(**spline_input)
        csd = spline_icsd.get_csd()
        
        self.assertEqual(C_i.units, csd.units)
        nt.assert_array_almost_equal(C_i, csd, decimal=3)

Example 163

Project: elephant Source File: test_icsd.py
    def test_SplineiCSD_04(self):
        '''test using standard SI units'''
        #set some parameters for ground truth csd and csd estimates., e.g.,
        #we will use same source diameter as in ground truth
        
        #contact point coordinates
        z_j = np.arange(21)*1E-4*pq.m
        
        #source coordinates
        z_i = z_j
        
        #current source density magnitude
        C_i = np.zeros(z_i.size)*pq.A/pq.m**3
        C_i[7:12:2] += np.array([-.5, 1., -.5])*pq.A/pq.m**3
                
        #source radius (delta, step)
        R_i = np.ones(z_i.size)*1E-3*pq.m
        
        #source height (cylinder)
        h_i = np.ones(z_i.size)*1E-4*pq.m
        
        #conductivity, use same conductivity for top layer (z_j < 0)
        sigma = 0.3*pq.S/pq.m
        sigma_top = sigma

        #construct interpolators, spline method assume underlying source
        #pattern generating LFPs that are cubic spline interpolates between
        #contacts so we generate CSD data relying on the same assumption 
        f_C = interp1d(z_i, C_i, kind='cubic')
        f_R = interp1d(z_i, R_i)
        num_steps = 201
        z_i_i = np.linspace(float(z_i[0]), float(z_i[-1]), num_steps)*z_i.units
        C_i_i = f_C(np.asarray(z_i_i))*C_i.units
        R_i_i = f_R(z_i_i)*R_i.units

        h_i_i = np.ones(z_i_i.size)*np.diff(z_i_i).min()
        
        #flag for debug plots
        plot = False

        #get LFP and CSD at contacts
        phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
                                          sigma, plot)

        spline_input = {
            'lfp' : phi_j,
            'coord_electrode' : z_j,
            'diam' : R_i*2,
            'sigma' : sigma*1E3*pq.mS/pq.S,
            'sigma_top' : sigma*1E3*pq.mS/pq.S,
            'num_steps' : num_steps,
            'tol' : 1E-12,          # Tolerance in numerical integration
            'f_type' : 'gaussian',
            'f_order' : (3, 1),
        }
        spline_icsd = icsd.SplineiCSD(**spline_input)
        csd = spline_icsd.get_csd()
        
        self.assertEqual(C_i.units, csd.units)
        nt.assert_array_almost_equal(C_i, csd, decimal=3)

Example 164

Project: dipy Source File: test_expectmax.py
def test_compute_em_demons_step_2d():
    r"""
    Compares the output of the demons step in 2d against an analytical
    step. The fixed image is given by $F(x) = \frac{1}{2}||x - c_f||^2$, the
    moving image is given by $G(x) = \frac{1}{2}||x - c_g||^2$,
    $x, c_f, c_g \in R^{2}$

    References
    ----------
    [Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.
                    (2009). Diffeomorphic demons: efficient non-parametric
                    image registration. NeuroImage, 45(1 Suppl), S61-72.
                    doi:10.1016/j.neuroimage.2008.10.040
    """
    # Select arbitrary images' shape (same shape for both images)
    sh = (30, 20)

    # Select arbitrary centers
    c_f = np.asarray(sh) / 2
    c_g = c_f + 0.5

    # Compute the identity vector field I(x) = x in R^2
    x_0 = np.asarray(range(sh[0]))
    x_1 = np.asarray(range(sh[1]))
    X = np.ndarray(sh + (2,), dtype=np.float64)
    O = np.ones(sh)
    X[..., 0] = x_0[:, None] * O
    X[..., 1] = x_1[None, :] * O

    # Compute the gradient fields of F and G
    grad_F = X - c_f
    grad_G = X - c_g

    # The squared norm of grad_G to be used later
    sq_norm_grad_G = np.sum(grad_G**2, -1)

    # Compute F and G
    F = 0.5 * np.sum(grad_F**2, -1)
    G = 0.5 * sq_norm_grad_G
    delta_field = G - F

    # Now select an arbitrary parameter for
    # $\sigma_x$ (eq 4 in [Vercauteren09])
    sigma_x_sq = 1.5

    # Set arbitrary values for $\sigma_i$ (eq. 4 in [Vercauteren09])
    # The original Demons algorithm used simply |F(x) - G(x)| as an
    # estimator, so let's use it as well
    sigma_i_sq = (F - G)**2

    # Select some pixels to have special values
    np.random.seed(1346491)
    random_labels = np.random.randint(0, 5, sh[0] * sh[1])
    random_labels = random_labels.reshape(sh)

    # this label is used to set sigma_i_sq == 0 below
    random_labels[sigma_i_sq == 0] = 2
    # this label is used to set gradient == 0 below
    random_labels[sq_norm_grad_G == 0] = 2

    expected = np.zeros_like(grad_G)
    # Pixels with sigma_i_sq = inf
    sigma_i_sq[random_labels == 0] = np.inf
    expected[random_labels == 0, ...] = 0

    # Pixels with gradient!=0 and sigma_i_sq=0
    sqnrm = sq_norm_grad_G[random_labels == 1]
    sigma_i_sq[random_labels == 1] = 0
    expected[random_labels == 1, 0] = (delta_field[random_labels == 1] *
                                       grad_G[random_labels == 1, 0] / sqnrm)
    expected[random_labels == 1, 1] = (delta_field[random_labels == 1] *
                                       grad_G[random_labels == 1, 1] / sqnrm)

    # Pixels with gradient=0 and sigma_i_sq=0
    sigma_i_sq[random_labels == 2] = 0
    grad_G[random_labels == 2, ...] = 0
    expected[random_labels == 2, ...] = 0

    # Pixels with gradient=0 and sigma_i_sq!=0
    grad_G[random_labels == 3, ...] = 0

    # Directly compute the demons step according to eq. 4 in [Vercauteren09]
    num = (sigma_x_sq * (F - G))[random_labels >= 3]
    den = (sigma_x_sq * sq_norm_grad_G + sigma_i_sq)[random_labels >= 3]

    # This is $J^{P}$ in eq. 4 [Vercauteren09]
    expected[random_labels >= 3] = -1 * np.array(grad_G[random_labels >= 3])
    expected[random_labels >= 3, ...] *= (num / den)[..., None]

    # Now compute it using the implementation under test

    actual = np.empty_like(expected, dtype=floating)
    em.compute_em_demons_step_2d(np.array(delta_field, dtype=floating),
                                 np.array(sigma_i_sq, dtype=floating),
                                 np.array(grad_G, dtype=floating),
                                 sigma_x_sq,
                                 actual)

    # Test sigma_i_sq == inf
    try:
        assert_array_almost_equal(actual[random_labels == 0],
                                  expected[random_labels == 0])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq == inf")

    # Test sigma_i_sq == 0 and gradient != 0
    try:
        assert_array_almost_equal(actual[random_labels == 1],
                                  expected[random_labels == 1])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq == 0 and gradient != 0")

    # Test sigma_i_sq == 0 and gradient == 0
    try:
        assert_array_almost_equal(actual[random_labels == 2],
                                  expected[random_labels == 2])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq == 0 and gradient == 0")

    # Test sigma_i_sq != 0 and gradient == 0
    try:
        assert_array_almost_equal(actual[random_labels == 3],
                                  expected[random_labels == 3])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq != 0 and gradient == 0 ")

    # Test sigma_i_sq != 0 and gradient != 0
    try:
        assert_array_almost_equal(actual[random_labels == 4],
                                  expected[random_labels == 4])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq != 0 and gradient != 0")

Example 165

Project: dipy Source File: test_expectmax.py
def test_compute_em_demons_step_3d():
    r"""
    Compares the output of the demons step in 3d against an analytical
    step. The fixed image is given by $F(x) = \frac{1}{2}||x - c_f||^2$, the
    moving image is given by $G(x) = \frac{1}{2}||x - c_g||^2$,
    $x, c_f, c_g \in R^{3}$

    References
    ----------
    [Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.
                    (2009). Diffeomorphic demons: efficient non-parametric
                    image registration. NeuroImage, 45(1 Suppl), S61-72.
                    doi:10.1016/j.neuroimage.2008.10.040
    """

    # Select arbitrary images' shape (same shape for both images)
    sh = (20, 15, 10)

    # Select arbitrary centers
    c_f = np.asarray(sh) / 2
    c_g = c_f + 0.5

    # Compute the identity vector field I(x) = x in R^2
    x_0 = np.asarray(range(sh[0]))
    x_1 = np.asarray(range(sh[1]))
    x_2 = np.asarray(range(sh[2]))
    X = np.ndarray(sh + (3,), dtype=np.float64)
    O = np.ones(sh)
    X[..., 0] = x_0[:, None, None] * O
    X[..., 1] = x_1[None, :, None] * O
    X[..., 2] = x_2[None, None, :] * O

    # Compute the gradient fields of F and G
    grad_F = X - c_f
    grad_G = X - c_g

    # The squared norm of grad_G to be used later
    sq_norm_grad_G = np.sum(grad_G**2, -1)

    # Compute F and G
    F = 0.5 * np.sum(grad_F**2, -1)
    G = 0.5 * sq_norm_grad_G
    delta_field = G - F

    # Now select an arbitrary parameter for
    # $\sigma_x$ (eq 4 in [Vercauteren09])
    sigma_x_sq = 1.5

    # Set arbitrary values for $\sigma_i$ (eq. 4 in [Vercauteren09])
    # The original Demons algorithm used simply |F(x) - G(x)| as an
    # estimator, so let's use it as well
    sigma_i_sq = (F - G)**2

    # Select some pixels to have special values
    np.random.seed(1346491)
    random_labels = np.random.randint(0, 5, sh[0] * sh[1] * sh[2])
    random_labels = random_labels.reshape(sh)

    # this label is used to set sigma_i_sq == 0 below
    random_labels[sigma_i_sq == 0] = 2
    # this label is used to set gradient == 0 below
    random_labels[sq_norm_grad_G == 0] = 2

    expected = np.zeros_like(grad_G)
    # Pixels with sigma_i_sq = inf
    sigma_i_sq[random_labels == 0] = np.inf
    expected[random_labels == 0, ...] = 0

    # Pixels with gradient!=0 and sigma_i_sq=0
    sqnrm = sq_norm_grad_G[random_labels == 1]
    sigma_i_sq[random_labels == 1] = 0
    expected[random_labels == 1, 0] = (delta_field[random_labels == 1] *
                                       grad_G[random_labels == 1, 0] / sqnrm)
    expected[random_labels == 1, 1] = (delta_field[random_labels == 1] *
                                       grad_G[random_labels == 1, 1] / sqnrm)
    expected[random_labels == 1, 2] = (delta_field[random_labels == 1] *
                                       grad_G[random_labels == 1, 2] / sqnrm)

    # Pixels with gradient=0 and sigma_i_sq=0
    sigma_i_sq[random_labels == 2] = 0
    grad_G[random_labels == 2, ...] = 0
    expected[random_labels == 2, ...] = 0

    # Pixels with gradient=0 and sigma_i_sq!=0
    grad_G[random_labels == 3, ...] = 0

    # Directly compute the demons step according to eq. 4 in [Vercauteren09]
    num = (sigma_x_sq * (F - G))[random_labels >= 3]
    den = (sigma_x_sq * sq_norm_grad_G + sigma_i_sq)[random_labels >= 3]

    # This is $J^{P}$ in eq. 4 [Vercauteren09]
    expected[random_labels >= 3] = -1 * np.array(grad_G[random_labels >= 3])
    expected[random_labels >= 3, ...] *= (num / den)[..., None]

    # Now compute it using the implementation under test
    actual = np.empty_like(expected, dtype=floating)
    em.compute_em_demons_step_3d(np.array(delta_field, dtype=floating),
                                 np.array(sigma_i_sq, dtype=floating),
                                 np.array(grad_G, dtype=floating),
                                 sigma_x_sq,
                                 actual)

    # Test sigma_i_sq == inf
    try:
        assert_array_almost_equal(actual[random_labels == 0],
                                  expected[random_labels == 0])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq == inf")

    # Test sigma_i_sq == 0 and gradient != 0
    try:
        assert_array_almost_equal(actual[random_labels == 1],
                                  expected[random_labels == 1])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq == 0 and gradient != 0")

    # Test sigma_i_sq == 0 and gradient == 0
    try:
        assert_array_almost_equal(actual[random_labels == 2],
                                  expected[random_labels == 2])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq == 0 and gradient == 0")

    # Test sigma_i_sq != 0 and gradient == 0
    try:
        assert_array_almost_equal(actual[random_labels == 3],
                                  expected[random_labels == 3])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq != 0 and gradient == 0 ")

    # Test sigma_i_sq != 0 and gradient != 0
    try:
        assert_array_almost_equal(actual[random_labels == 4],
                                  expected[random_labels == 4])
    except AssertionError:
        raise AssertionError("Failed for sigma_i_sq != 0 and gradient != 0")

Example 166

Project: dipy Source File: test_imaffine.py
def test_affine_map():
    np.random.seed(2112927)
    dom_shape = np.array([64, 64, 64], dtype=np.int32)
    cod_shape = np.array([80, 80, 80], dtype=np.int32)
    nx = dom_shape[0]
    ny = dom_shape[1]
    nz = dom_shape[2]
    # Radius of the circle/sphere (testing image)
    radius = 16
    # Rotation axis (used for 3D transforms only)
    rot_axis = np.array([.5, 2.0, 1.5])
    # Arbitrary transform parameters
    t = 0.15
    rotations = [-1 * np.pi / 10.0, 0.0, np.pi / 10.0]
    scales = [0.9, 1.0, 1.1]
    for dim in [2, 3]:
        # Setup current dimension
        if dim == 2:
            # Create image of a circle
            img = vf.create_circle(cod_shape[0], cod_shape[1], radius)
            oracle_linear = vf.transform_2d_affine
            oracle_nn = vf.transform_2d_affine_nn
        else:
            # Create image of a sphere
            img = vf.create_sphere(cod_shape[0], cod_shape[1], cod_shape[2],
                                   radius)
            oracle_linear = vf.transform_3d_affine
            oracle_nn = vf.transform_3d_affine_nn
        img = np.array(img)
        # Translation is the only parameter differing for 2D and 3D
        translations = [t * dom_shape[:dim]]
        # Generate affine transforms
        gt_affines = create_affine_transforms(dim, translations, rotations,
                                              scales, rot_axis)
        # Include the None case
        gt_affines.append(None)

        for affine in gt_affines:

            # make both domain point to the same physical region
            # It's ok to use the same transform, we just want to test
            # that this information is actually being considered
            domain_grid2world = affine
            codomain_grid2world = affine
            grid2grid_transform = affine

            # Evaluate the transform with vector_fields module (already tested)
            expected_linear = oracle_linear(img, dom_shape[:dim],
                                            grid2grid_transform)
            expected_nn = oracle_nn(img, dom_shape[:dim], grid2grid_transform)

            # Evaluate the transform with the implementation under test
            affine_map = imaffine.AffineMap(affine,
                                            dom_shape[:dim], domain_grid2world,
                                            cod_shape[:dim],
                                            codomain_grid2world)
            actual_linear = affine_map.transform(img, interp='linear')
            actual_nn = affine_map.transform(img, interp='nearest')
            assert_array_almost_equal(actual_linear, expected_linear)
            assert_array_almost_equal(actual_nn, expected_nn)

            # Test set_affine with valid matrix
            affine_map.set_affine(affine)
            if affine is None:
                assert(affine_map.affine is None)
                assert(affine_map.affine_inv is None)
            else:
                assert_array_equal(affine, affine_map.affine)
                actual = affine_map.affine.dot(affine_map.affine_inv)
                assert_array_almost_equal(actual, np.eye(dim + 1))

            # Evaluate via the inverse transform

            # AffineMap will use the inverse of the input matrix when we call
            # `transform_inverse`. Since the inverse of the inverse of a matrix
            # is not exactly equal to the original matrix (numerical
            #  limitations) we need to invert the matrix twice to make sure
            # the oracle and the implementation under test apply the same
            # transform
            aff_inv = None if affine is None else npl.inv(affine)
            aff_inv_inv = None if aff_inv is None else npl.inv(aff_inv)
            expected_linear = oracle_linear(img, dom_shape[:dim],
                                            aff_inv_inv)
            expected_nn = oracle_nn(img, dom_shape[:dim], aff_inv_inv)

            affine_map = imaffine.AffineMap(aff_inv,
                                            cod_shape[:dim],
                                            codomain_grid2world,
                                            dom_shape[:dim], domain_grid2world)
            actual_linear = affine_map.transform_inverse(img, interp='linear')
            actual_nn = affine_map.transform_inverse(img, interp='nearest')
            assert_array_almost_equal(actual_linear, expected_linear)
            assert_array_almost_equal(actual_nn, expected_nn)

        # Verify AffineMap cannot be created with a non-invertible matrix
        invalid_nan = np.zeros((dim + 1, dim + 1), dtype=np.float64)
        invalid_nan[1, 1] = np.nan
        invalid_zeros = np.zeros((dim + 1, dim + 1), dtype=np.float64)
        assert_raises(
            imaffine.AffineInversionError,
            imaffine.AffineMap,
            invalid_nan)
        assert_raises(
            imaffine.AffineInversionError,
            imaffine.AffineMap,
            invalid_zeros)

        # Test exception is raised when the affine transform matrix is not
        # valid
        invalid_shape = np.eye(dim)
        affmap_invalid_shape = imaffine.AffineMap(invalid_shape,
                                                  dom_shape[:dim], None,
                                                  cod_shape[:dim], None)
        assert_raises(ValueError, affmap_invalid_shape.transform, img)
        assert_raises(ValueError, affmap_invalid_shape.transform_inverse, img)

        # Verify exception is raised when sampling info is not provided
        valid = np.eye(3)
        affmap_invalid_shape = imaffine.AffineMap(valid)
        assert_raises(ValueError, affmap_invalid_shape.transform, img)
        assert_raises(ValueError, affmap_invalid_shape.transform_inverse, img)

        # Verify exception is raised when requesting an invalid interpolation
        assert_raises(ValueError, affine_map.transform, img, 'invalid')
        assert_raises(ValueError, affine_map.transform_inverse, img, 'invalid')

        # Verify exception is raised when attempting to warp an image of
        # invalid dimension
        for dim in [2, 3]:
            affine_map = imaffine.AffineMap(np.eye(dim),
                                            cod_shape[:dim], None,
                                            dom_shape[:dim], None)
            for sh in [(2,), (2, 2, 2, 2)]:
                img = np.zeros(sh)
                assert_raises(ValueError, affine_map.transform, img)
                assert_raises(ValueError, affine_map.transform_inverse, img)
            aff_sing = np.zeros((dim + 1, dim + 1))
            aff_nan = np.zeros((dim + 1, dim + 1))
            aff_nan[...] = np.nan
            aff_inf = np.zeros((dim + 1, dim + 1))
            aff_inf[...] = np.inf

            assert_raises(
                AffineInversionError,
                affine_map.set_affine,
                aff_sing)
            assert_raises(AffineInversionError, affine_map.set_affine, aff_nan)
            assert_raises(AffineInversionError, affine_map.set_affine, aff_inf)

Example 167

Project: dipy Source File: test_imwarp.py
def test_diffeomorphic_map_2d():
    r""" Test 2D DiffeomorphicMap

    Creates a random displacement field that exactly maps pixels from an
    input image to an output image. First a discrete random assignment
    between the images is generated, then each pair of mapped points are
    transformed to the physical space by assigning a pair of arbitrary,
    fixed affine matrices to input and output images, and finaly the
    difference between their positions is taken as the displacement vector.
    The resulting displacement, although operating in physical space,
    maps the points exactly (up to numerical precision).
    """
    np.random.seed(2022966)
    domain_shape = (10, 10)
    codomain_shape = (10, 10)
    # create a simple affine transformation
    nr = domain_shape[0]
    nc = domain_shape[1]
    s = 1.1
    t = 0.25
    trans = np.array([[1, 0, -t * nr],
                      [0, 1, -t * nc],
                      [0, 0, 1]])
    trans_inv = np.linalg.inv(trans)
    scale = np.array([[1 * s, 0, 0],
                      [0, 1 * s, 0],
                      [0, 0, 1]])
    gt_affine = trans_inv.dot(scale.dot(trans))

    # create the random displacement field
    domain_grid2world = gt_affine
    codomain_grid2world = gt_affine
    disp, assign = vfu.create_random_displacement_2d(
        np.array(domain_shape, dtype=np.int32),
        domain_grid2world, np.array(codomain_shape, dtype=np.int32),
        codomain_grid2world)
    disp = np.array(disp, dtype=floating)
    assign = np.array(assign)
    # create a random image (with decimal digits) to warp
    moving_image = np.ndarray(codomain_shape, dtype=floating)
    ns = np.size(moving_image)
    moving_image[...] = np.random.randint(0, 10, ns).reshape(codomain_shape)
    # set boundary values to zero so we don't test wrong interpolation due
    # to floating point precision
    moving_image[0, :] = 0
    moving_image[-1, :] = 0
    moving_image[:, 0] = 0
    moving_image[:, -1] = 0

    # warp the moving image using the (exact) assignments
    expected = moving_image[(assign[..., 0], assign[..., 1])]

    # warp using a DiffeomorphicMap instance
    diff_map = imwarp.DiffeomorphicMap(2, domain_shape, domain_grid2world,
                                       domain_shape, domain_grid2world,
                                       codomain_shape, codomain_grid2world,
                                       None)
    diff_map.forward = disp

    # Verify that the transform method accepts different image types (note that
    # the actual image contained integer values, we don't want to test
    # rounding)
    for type in [floating, np.float64, np.int64, np.int32]:
        moving_image = moving_image.astype(type)

        # warp using linear interpolation
        warped = diff_map.transform(moving_image, 'linear')
        # compare the images (the linear interpolation may introduce slight
        # precision errors)
        assert_array_almost_equal(warped, expected, decimal=5)

        # Now test the nearest neighbor interpolation
        warped = diff_map.transform(moving_image, 'nearest')
        # compare the images (now we dont have to worry about precision,
        # it is n.n.)
        assert_array_almost_equal(warped, expected)

        # verify the is_inverse flag
        inv = diff_map.inverse()
        warped = inv.transform_inverse(moving_image, 'linear')
        assert_array_almost_equal(warped, expected, decimal=5)

        warped = inv.transform_inverse(moving_image, 'nearest')
        assert_array_almost_equal(warped, expected)

    # Now test the inverse functionality
    diff_map = imwarp.DiffeomorphicMap(2, codomain_shape, codomain_grid2world,
                                       codomain_shape, codomain_grid2world,
                                       domain_shape, domain_grid2world, None)
    diff_map.backward = disp
    for type in [floating, np.float64, np.int64, np.int32]:
        moving_image = moving_image.astype(type)

        # warp using linear interpolation
        warped = diff_map.transform_inverse(moving_image, 'linear')
        # compare the images (the linear interpolation may introduce slight
        # precision errors)
        assert_array_almost_equal(warped, expected, decimal=5)

        # Now test the nearest neighbor interpolation
        warped = diff_map.transform_inverse(moving_image, 'nearest')
        # compare the images (now we don't have to worry about precision,
        # it is nearest neighbour)
        assert_array_almost_equal(warped, expected)

    # Verify that DiffeomorphicMap raises the appropriate exceptions when
    # the sampling information is undefined
    diff_map = imwarp.DiffeomorphicMap(2, domain_shape, domain_grid2world,
                                       domain_shape, domain_grid2world,
                                       codomain_shape, codomain_grid2world,
                                       None)
    diff_map.forward = disp
    diff_map.domain_shape = None
    # If we don't provide the sampling info, it should try to use the map's
    # info, but it's None...
    assert_raises(ValueError, diff_map.transform, moving_image, 'linear')

    # Same test for diff_map.transform_inverse
    diff_map = imwarp.DiffeomorphicMap(2, domain_shape, domain_grid2world,
                                       domain_shape, domain_grid2world,
                                       codomain_shape, codomain_grid2world,
                                       None)
    diff_map.forward = disp
    diff_map.codomain_shape = None
    # If we don't provide the sampling info, it should try to use the map's
    # info, but it's None...
    assert_raises(ValueError, diff_map.transform_inverse,
                  moving_image, 'linear')

    # We must provide, at least, the reference grid shape
    assert_raises(ValueError, imwarp.DiffeomorphicMap, 2, None)

    # Verify that matrices are correctly interpreted from string
    non_array_obj = diff_map
    array_obj = np.ones((3, 3))
    assert_raises(ValueError, diff_map.interpret_matrix, 'a different string')
    assert_raises(ValueError, diff_map.interpret_matrix, non_array_obj)
    assert(diff_map.interpret_matrix('identity') is None)
    assert(diff_map.interpret_matrix(None) is None)
    assert_array_equal(diff_map.interpret_matrix(array_obj), array_obj)

Example 168

Project: dipy Source File: test_dti.py
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0,
                                                        gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle /
            np.linalg.norm(A_squiggle)))
    evals_eigh, evecs_eigh = np.linalg.eigh(tensor)
    # Sort according to eigen-value from large to small:
    evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]]
    # Check that eigenvalues and eigenvectors are properly sorted through
    # that previous operation:
    for i in range(3):
        assert_array_almost_equal(np.dot(tensor, evecs[:, i]),
                                  evals[i] * evecs[:, i])
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)
        # Test that the eigenvectors are correct, one-by-one:
        for i in range(3):
            # Eigenvectors have intrinsic sign ambiguity
            # (see
            # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf)
            # so we need to allow for sign flips. One of the following should
            # always be true:
            assert_(
                    np.all(np.abs(tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6) or
                    np.all(np.abs(-tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6))
            # We set a fixed tolerance of 10e-6, similar to array_almost_equal

        err_msg = "Calculation of tensor from Y does not compare to "
        err_msg += "analytical solution"
        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=err_msg)

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test custom fit tensor method
    try:
        model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42)
        fit = model.fit_method()
    except Exception as exc:
        assert False, "TensorModel should accept custom fit methods: %s" % exc
    assert fit == 42, "Custom fit method for TensorModel returned %s." % fit

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)

Example 169

Project: nipy Source File: test_array_coords.py
def test_array_coord_map():
    # array coord map recreates the affine when you slice an image.  In
    # general, if you take an integer slice in some dimension, the
    # corresponding column of the affine will go, leaving a row for the
    # lost dimension, with all zeros, execpt for the translation in the
    # now-removed dimension, encoding the position of that particular
    # slice
    xz = 1.1; yz = 2.3; zz = 3.5
    xt = 10.0; yt = 11; zt = 12
    aff = np.diag([xz, yz, zz, 1])
    aff[:3,3] = [xt, yt, zt]
    shape = (2,3,4)
    cmap = AffineTransform.from_params('ijk', 'xyz', aff)
    acm = acs.ArrayCoordMap(cmap, shape)
    # slice the coordinate map for the first axis
    sacm = acm[1]
    # The affine has lost the first column, but has a remaining row (the
    # first) encoding the translation to get to this slice
    assert_array_almost_equal(sacm.coordmap.affine,
                              np.array([
                                  [0, 0, xz+xt],
                                  [yz, 0, yt],
                                  [0, zz, zt],
                                  [0, 0, 1]]))
    sacm = acm[:,1]
    # lost second column, remaining second row with translation
    assert_array_almost_equal(sacm.coordmap.affine,
                              np.array([
                                  [xz, 0, xt],
                                  [0, 0, yz+yt],
                                  [0, zz, zt],
                                  [0, 0, 1]]))
    sacm = acm[:,:,2]
    # ditto third column and row
    assert_array_almost_equal(sacm.coordmap.affine,
                              np.array([
                                  [xz, 0, xt],
                                  [0, yz, yt],
                                  [0, 0, 2*zz+zt],
                                  [0, 0, 1]]))
    # check ellipsis slicing is the same as [:,: ...
    sacm = acm[...,2]
    assert_array_almost_equal(sacm.coordmap.affine,
                              np.array([
                                  [xz, 0, xt],
                                  [0, yz, yt],
                                  [0, 0, 2*zz+zt],
                                  [0, 0, 1]]))
    # that ellipsis can follow other slice types
    sacm = acm[:,...,2]
    assert_array_almost_equal(sacm.coordmap.affine,
                              np.array([
                                  [xz, 0, xt],
                                  [0, yz, yt],
                                  [0, 0, 2*zz+zt],
                                  [0, 0, 1]]))
    # that there can be only one ellipsis
    assert_raises(ValueError, acm.__getitem__, (
        (Ellipsis, Ellipsis,2)))
    # that you can integer slice in all three dimensions, leaving only
    # the translation column
    sacm = acm[1,0,2]
    assert_array_almost_equal(sacm.coordmap.affine,
                              np.array([
                                  [xz+xt],
                                  [yt],
                                  [2*zz+zt],
                                  [1]]))
    # that anything other than an int, slice or Ellipsis is an error
    assert_raises(ValueError, acm.__getitem__, ([0,2],))
    assert_raises(ValueError, acm.__getitem__, (np.array([0,2]),))

Example 170

Project: nipy Source File: test_utils.py
def test_convolve_functions():
    # replicate convolution
    # This is a square wave on [0,1]
    f1 = (t > 0) * (t < 1)
    # ff1 is the numerical implementation of same
    ff1 = lambdify(t, f1)
    # Time delta
    dt = 1e-3
    # Numerical convolution to test against
    # The convolution of ``f1`` with itself is a triangular wave on [0, 2],
    # peaking at 1 with height 1
    time, value = numerical_convolve(ff1, ff1, [0, 2], dt)
    # shells to wrap convolve kernel version
    def kern_conv1(f1, f2, f1_interval, f2_interval, dt, fill=0, name=None):
        kern = TimeConvolver(f1, f1_interval, dt, fill)
        return kern.convolve(f2, f2_interval, name=name)
    def kern_conv2(f1, f2, f1_interval, f2_interval, dt, fill=0, name=None):
        kern = TimeConvolver(f2, f2_interval, dt, fill)
        return kern.convolve(f1, f1_interval, name=name)
    for cfunc in (convolve_functions, kern_conv1, kern_conv2):
        tri = cfunc(f1, f1, [0, 2], [0, 2], dt, name='conv')
        assert_equal(str(tri), 'conv(t)')
        ftri = lambdify(t, tri)
        y = ftri(time)
        # numerical convolve about the same as ours
        assert_array_almost_equal(value, y)
        # peak is at 1
        assert_array_almost_equal(time[np.argmax(y)], 1)
        # Flip the interval and get the same result
        for seq1, seq2 in (((0, 2), (2, 0)),
                        ((2, 0), (0, 2)),
                        ((2, 0), (2, 0))):
            tri = cfunc(f1, f1, seq1, seq2, dt)
            ftri = lambdify(t, tri)
            y = ftri(time)
            assert_array_almost_equal(value, y)
        # offset square wave by 1 - offset triangle by 1
        f2 = (t > 1) * (t < 2)
        tri = cfunc(f1, f2, [0, 3], [0, 3], dt)
        ftri = lambdify(t, tri)
        o1_time = np.arange(0, 3, dt)
        z1s = np.zeros((np.round(1./dt)))
        assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value])
        # Same for input function
        tri = cfunc(f2, f1, [0, 3], [0, 3], dt)
        ftri = lambdify(t, tri)
        assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value])
        # 2 seconds for both
        tri = cfunc(f2, f2, [0, 4], [0, 4], dt)
        ftri = lambdify(t, tri)
        o2_time = np.arange(0, 4, dt)
        assert_array_almost_equal(ftri(o2_time), np.r_[z1s, z1s, value])
        # offset by -0.5 - offset triangle by -0.5
        f3 = (t > -0.5) * (t < 0.5)
        tri = cfunc(f1, f3, [0, 2], [-0.5, 1.5], dt)
        ftri = lambdify(t, tri)
        o1_time = np.arange(-0.5, 1.5, dt)
        assert_array_almost_equal(ftri(o1_time), value)
        # Same for input function
        tri = cfunc(f3, f1, [-0.5, 1.5], [0, 2], dt)
        ftri = lambdify(t, tri)
        assert_array_almost_equal(ftri(o1_time), value)
        # -1 second for both
        tri = cfunc(f3, f3, [-0.5, 1.5], [-0.5, 1.5], dt)
        ftri = lambdify(t, tri)
        o2_time = np.arange(-1, 1, dt)
        assert_array_almost_equal(ftri(o2_time), value)
        # Check it's OK to be off the dt grid
        tri = cfunc(f1, f1, [dt/2, 2 + dt/2], [0, 2], dt, name='conv')
        ftri = lambdify(t, tri)
        assert_array_almost_equal(ftri(time), value, 3)
        # Check fill value
        nan_tri = cfunc(f1, f1, [0, 2], [0, 2], dt, fill=np.nan)
        nan_ftri = lambdify(t, nan_tri)
        y = nan_ftri(time)
        assert_array_equal(y, value)
        assert_true(np.all(np.isnan(nan_ftri(np.arange(-2, 0)))))
        assert_true(np.all(np.isnan(nan_ftri(np.arange(4, 6)))))
        # The original fill value was 0
        assert_array_equal(ftri(np.arange(-2, 0)), 0)
        assert_array_equal(ftri(np.arange(4, 6)), 0)

Example 171

Project: nitime Source File: test_spectral.py
def test_get_spectra_complex():
    """

    Testing spectral estimation

    """

    methods = (None,
           {"this_method": 'welch', "NFFT": 256, "Fs": 2 * np.pi},
           {"this_method": 'welch', "NFFT": 1024, "Fs": 2 * np.pi})

    for method in methods:
        avg_pwr1 = []
        avg_pwr2 = []
        est_pwr1 = []
        est_pwr2 = []

        # Make complex signals:
        r, _, _ = utils.ar_generator(N=2 ** 16)  # It needs to be that long for
                                                 # the answers to converge
        c, _, _ = utils.ar_generator(N=2 ** 16)
        arsig1 = r + c * scipy.sqrt(-1)

        r, _, _ = utils.ar_generator(N=2 ** 16)
        c, _, _ = utils.ar_generator(N=2 ** 16)

        arsig2 = r + c * scipy.sqrt(-1)
        avg_pwr1.append((arsig1 * arsig1.conjugate()).mean())
        avg_pwr2.append((arsig2 * arsig2.conjugate()).mean())

        tseries = np.vstack([arsig1, arsig2])

        f, c = tsa.get_spectra(tseries, method=method)

        # \sum_{\omega} psd d\omega:
        est_pwr1.append(np.sum(c[0, 0]) * (f[1] - f[0]))
        est_pwr2.append(np.sum(c[1, 1]) * (f[1] - f[0]))

        # Get it right within the order of magnitude:
        npt.assert_array_almost_equal(est_pwr1, avg_pwr1, decimal=-1)
        npt.assert_array_almost_equal(est_pwr2, avg_pwr2, decimal=-1)

Example 172

Project: nitime Source File: test_spectral.py
def test_mtm_cross_spectrum():
    """

    Test the multi-taper cross-spectral estimation. Based on the example in
    doc/examples/multi_taper_coh.py

    """
    NW = 4
    K = 2 * NW - 1

    N = 2 ** 10
    n_reps = 10
    n_freqs = N

    tapers, eigs = tsa.dpss_windows(N, NW, 2 * NW - 1)

    est_psd = []
    for k in range(n_reps):
        data, nz, alpha = utils.ar_generator(N=N)
        fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], n_freqs=n_freqs)
        # 'one-sided', so multiply by 2:
        psd = 2 * (hz * hz.conj()).real

        tdata = tapers * data

        tspectra = fftpack.fft(tdata)

        L = N / 2 + 1
        sides = 'onesided'
        w, _ = utils.adaptive_weights(tspectra, eigs, sides=sides)

        sxx = tsa.mtm_cross_spectrum(tspectra, tspectra, w, sides=sides)
        est_psd.append(sxx)

    fxx = np.mean(est_psd, 0)

    psd_ratio = np.mean(fxx / psd)

    # This is a rather lenient test, making sure that the average ratio is 1 to
    # within an order of magnitude. That is, that they are equal on average:
    npt.assert_array_almost_equal(psd_ratio, 1, decimal=1)

    # Test raising of error in case the inputs don't make sense:
    npt.assert_raises(ValueError,
                      tsa.mtm_cross_spectrum,
                      tspectra, np.r_[tspectra, tspectra],
                      (w, w))

Example 173

Project: nitime Source File: test_spectral.py
@dec.slow
def test_multi_taper_psd_csd():
    """

    Test the multi taper psd and csd estimation functions.
    Based on the example in
    doc/examples/multi_taper_spectral_estimation.py

    """

    N = 2 ** 10
    n_reps = 10

    psd = []
    est_psd = []
    est_csd = []
    for jk in [True, False]:
        for k in range(n_reps):
            for adaptive in [True, False]:
                ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)
                ar_seq -= ar_seq.mean()
                fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha],
                                              n_freqs=N)
                psd.append(2 * (hz * hz.conj()).real)
                f, psd_mt, nu = tsa.multi_taper_psd(ar_seq, adaptive=adaptive,
                                                    jackknife=jk)
                est_psd.append(psd_mt)
                f, csd_mt = tsa.multi_taper_csd(np.vstack([ar_seq, ar_seq]),
                                               adaptive=adaptive)
                # Symmetrical in this case, so take one element out:
                est_csd.append(csd_mt[0][1])

        fxx = np.mean(psd, axis=0)
        fxx_est1 = np.mean(est_psd, axis=0)
        fxx_est2 = np.mean(est_csd, axis=0)

        # Tests the psd:
        psd_ratio1 = np.mean(fxx_est1 / fxx)
        npt.assert_array_almost_equal(psd_ratio1, 1, decimal=-1)
        # Tests the csd:
        psd_ratio2 = np.mean(fxx_est2 / fxx)
        npt.assert_array_almost_equal(psd_ratio2, 1, decimal=-1)

Example 174

Project: openpathsampling Source File: test_snapshot_modifier.py
    def test_with_openmm_snapshot(self):
        # note: this is only a smoke test; correctness depends on OpenMM's
        # tests of its constraint approaches.
        test_system = omt.testsystems.AlanineDipeptideVacuum()
        template = omm_engine.snapshot_from_testsystem(test_system)
        engine = omm_engine.Engine(
            topology=template.topology,
            system=test_system.system,
            integrator=omt.integrators.VVVRIntegrator()
        )
        beta = 1.0 / (300.0 * u.kelvin * u.BOLTZMANN_CONSTANT_kB)
        
        # when the engine doesn't have an existing snapshot
        randomizer = RandomVelocities(beta=beta, engine=engine)
        new_snap = randomizer(template)
        # coordinates stayed the same
        assert_array_almost_equal(template.coordinates,
                                  new_snap.coordinates)
        # velocities changed
        assert_equal(np.isclose(template.velocities,
                                new_snap.velocities).all(),
                     False)
        engine.generate(new_snap, [lambda x, foo: len(x) <= 4])

        # when the engine does have an existing snapshot
        zeros = np.zeros((engine.n_atoms, engine.n_spatial))
        zero_snap = paths.engines.openmm.Snapshot.construct(
            coordinates=zeros * u.nanometer,
            velocities=zeros * u.nanometer / u.picosecond,
            box_vectors=template.box_vectors,
            engine=engine
        )
        engine.current_snapshot = zero_snap
        randomizer = RandomVelocities(beta=beta, engine=engine)
        new_snap = randomizer(template)
        # coordinates stayed the same
        assert_array_almost_equal(template.coordinates,
                                  new_snap.coordinates)
        # velocities changed
        assert_equal(np.isclose(template.velocities,
                                new_snap.velocities).all(),
                     False)
        # internal snapshot unchanged
        assert_equal(engine.current_snapshot, zero_snap)
        engine.generate(new_snap, [lambda x, foo: len(x) <= 4])
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Page 4 Selected