nose.tools.assert_equal

Here are the examples of the python api nose.tools.assert_equal taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

158 Examples 7

Example 101

Project: seaborn Source File: test_utils.py
Function: test_categorical_order
def test_categorical_order():

    x = ["a", "c", "c", "b", "a", "d"]
    y = [3, 2, 5, 1, 4]
    order = ["a", "b", "c", "d"]

    out = utils.categorical_order(x)
    nt.assert_equal(out, ["a", "c", "b", "d"])

    out = utils.categorical_order(x, order)
    nt.assert_equal(out, order)

    out = utils.categorical_order(x, ["b", "a"])
    nt.assert_equal(out, ["b", "a"])

    out = utils.categorical_order(np.array(x))
    nt.assert_equal(out, ["a", "c", "b", "d"])

    out = utils.categorical_order(pd.Series(x))
    nt.assert_equal(out, ["a", "c", "b", "d"])

    out = utils.categorical_order(y)
    nt.assert_equal(out, [1, 2, 3, 4, 5])

    out = utils.categorical_order(np.array(y))
    nt.assert_equal(out, [1, 2, 3, 4, 5])

    out = utils.categorical_order(pd.Series(y))
    nt.assert_equal(out, [1, 2, 3, 4, 5])

    if pandas_has_categoricals:
        x = pd.Categorical(x, order)
        out = utils.categorical_order(x)
        nt.assert_equal(out, list(x.categories))

        x = pd.Series(x)
        out = utils.categorical_order(x)
        nt.assert_equal(out, list(x.cat.categories))

        out = utils.categorical_order(x, ["b", "a"])
        nt.assert_equal(out, ["b", "a"])

    x = ["a", np.nan, "c", "c", "b", "a", "d"]
    out = utils.categorical_order(x)
    nt.assert_equal(out, ["a", "c", "b", "d"])

Example 102

Project: dipy Source File: gqsampling_stats.py
def test_gqiodf():

    #read bvals,gradients and data
    bvals=np.load(opj(os.path.dirname(__file__), \
                          'data','small_64D.bvals.npy'))
    gradients=np.load(opj(os.path.dirname(__file__), \
                              'data','small_64D.gradients.npy'))    
    img =ni.load(os.path.join(os.path.dirname(__file__),\
                                  'data','small_64D.nii'))
    data=img.get_data()    

    #print(bvals.shape)
    #print(gradients.shape)
    #print(data.shape)


    t1=time.clock()
    
    gqs = gq.GeneralizedQSampling(data,bvals,gradients)
    ten = dt.Tensor(data,bvals,gradients,thresh=50)

    
    fa=ten.fa()

    x,y,z,a,b=ten.evecs.shape
    evecs=ten.evecs
    xyz=x*y*z
    evecs = evecs.reshape(xyz,3,3)
    #vs = np.sign(evecs[:,2,:])
    #print vs.shape
    #print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape
    #evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3)
    #print evecs.shape
    evals=ten.evals
    evals = evals.reshape(xyz,3)
    #print evals.shape

    

    t2=time.clock()
    #print('GQS in %d' %(t2-t1))
        
    eds=np.load(opj(os.path.dirname(__file__),\
                        '..','matrices',\
                        'evenly_distributed_sphere_362.npz'))

    
    odf_vertices=eds['vertices']
    odf_faces=eds['faces']

    #Yeh et.al, IEEE TMI, 2010
    #calculate the odf using GQI

    scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free
    #water diffusion coefficient 
    #l_values sqrt(6 D tau) D free water
    #diffusion coefficiet and tau included in the b-value

    tmp=np.tile(scaling,(3,1))
    b_vector=gradients.T*tmp
    Lambda = 1.2 # smoothing parameter - diffusion sampling length
    
    q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)
    #implements equation no. 9 from Yeh et.al.

    S=data.copy()

    x,y,z,g=S.shape
    S=S.reshape(x*y*z,g)
    QA = np.zeros((x*y*z,5))
    IN = np.zeros((x*y*z,5))

    fwd = 0
    
    #Calculate Quantitative Anisotropy and find the peaks and the indices
    #for every voxel

    summary = {}

    summary['vertices'] = odf_vertices
    v = odf_vertices.shape[0]
    summary['faces'] = odf_faces
    f = odf_faces.shape[0]

    '''
    If e = number_of_edges
    the Euler formula says f-e+v = 2 for a mesh on a sphere
    Here, assuming we have a healthy triangulation
    every face is a triangle, all 3 of whose edges should belong to
    exactly two faces = so 2*e = 3*f
    to avoid division we test whether 2*f - 3*f + 2*v == 4
    or equivalently 2*v - f == 4
    '''

    assert_equal(2*v-f, 4,'Direct Euler test fails')
    assert_true(meshes.euler_characteristic_check(odf_vertices, odf_faces,chi=2),'euler_characteristic_check fails')
    
    coarse = meshes.coarseness(odf_faces)
    print 'coarseness: ', coarse

    for (i,s) in enumerate(S):

        #print 'Volume %d' % i

        istr = str(i)

        summary[istr] = {}

        odf = Q2odf(s,q2odf_params)
        peaks,inds=rp.peak_finding(odf,odf_faces)
        fwd=max(np.max(odf),fwd)
        peaks = peaks - np.min(odf)
        l=min(len(peaks),5)
        QA[i][:l] = peaks[:l]
        IN[i][:l] = inds[:l]

        summary[istr]['odf'] = odf
        summary[istr]['peaks'] = peaks
        summary[istr]['inds'] = inds
        summary[istr]['evecs'] = evecs[i,:,:]
        summary[istr]['evals'] = evals[i,:]
   
    QA/=fwd
    QA=QA.reshape(x,y,z,5)    
    IN=IN.reshape(x,y,z,5)
    
    #print('Old %d secs' %(time.clock() - t2))
    # assert_equal((gqs.QA-QA).max(),0.,'Frank QA different than our QA')

    # assert_equal((gqs.QA.shape),QA.shape, 'Frank QA shape is different')
       
    # assert_equal((gqs.QA-QA).max(), 0.)

    #import dipy.core.track_propagation as tp

    #tp.FACT_Delta(QA,IN)

    #return tp.FACT_Delta(QA,IN,seeds_no=10000).tracks

    peaks_1 = [i for i in range(1000) if len(summary[str(i)]['inds'])==1]
    peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2]
    peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3]

    # correct numbers of voxels with respectively 1,2,3 ODF/QA peaks
    assert_array_equal((len(peaks_1),len(peaks_2),len(peaks_3)), (790,196,14),
                       'error in numbers of QA/ODF peaks')

    # correct indices of odf directions for voxels 0,10,44
    # with respectively 1,2,3 ODF/QA peaks
    assert_array_equal(summary['0']['inds'],[116],
                       'wrong peak indices for voxel 0')
    assert_array_equal(summary['10']['inds'],[105, 78],
                       'wrong peak indices for voxel 10')
    assert_array_equal(summary['44']['inds'],[95, 84, 108],
                       'wrong peak indices for voxel 44')

    assert_equal(np.argmax(summary['0']['odf']), 116)
    assert_equal(np.argmax(summary['10']['odf']), 105)
    assert_equal(np.argmax(summary['44']['odf']), 95)

    pole_1 = summary['vertices'][116]
    #print 'pole_1', pole_1
    pole_2 = summary['vertices'][105]
    #print 'pole_2', pole_2
    pole_3 = summary['vertices'][95]
    #print 'pole_3', pole_3

    vertices = summary['vertices']

    width = 0.02#0.3 #0.05
    
    '''
    print 'pole_1 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_1)) < width])
    print 'pole_2 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_2)) < width])
    print 'pole_3 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_3)) < width])
    '''
    
    #print 'pole_1 equator contains:', len(meshes.equatorial_vertices(vertices,pole_1,width))
    #print 'pole_2 equator contains:', len(meshes.equatorial_vertices(vertices,pole_2,width))
    #print 'pole_3 equator contains:', len(meshes'equatorial_vertices(vertices,pole_3,width))

    #print triple_odf_maxima(vertices,summary['0']['odf'],width)
    #print triple_odf_maxima(vertices,summary['10']['odf'],width)
    #print triple_odf_maxima(vertices,summary['44']['odf'],width)
    #print summary['0']['evals']
    '''

    pole=np.array([0,0,1])

    from dipy.viz import fos
    r=fos.ren()
    fos.add(r,fos.point(pole,fos.green))
    for i,ev in enumerate(vertices):        
        if np.abs(np.dot(ev,pole))<width:
            fos.add(r,fos.point(ev,fos.red))
    fos.show(r)

    '''

    triple = triple_odf_maxima(vertices, summary['10']['odf'], width)
    
    indmax1, odfmax1 = triple[0]
    indmax2, odfmax2 = triple[1]
    indmax3, odfmax3 = triple[2] 

    '''
    from dipy.viz import fos
    r=fos.ren()
    for v in vertices:
        fos.add(r,fos.point(v,fos.cyan))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax1]),radius=0.1,color=fos.red))
    #fos.add(r,fos.line(np.array([0,0,0]),vertices[indmax1]))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax2]),radius=0.05,color=fos.green))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax3]),radius=0.025,color=fos.blue))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,0]),radius=0.1,color=fos.red,opacity=0.7))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,1]),radius=0.05,color=fos.green,opacity=0.7))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,2]),radius=0.025,color=fos.blue,opacity=0.7))
    fos.add(r,fos.sphere([0,0,0],radius=0.01,color=fos.white))
    fos.show(r)
    '''
    
    mat = np.vstack([vertices[indmax1],vertices[indmax2],vertices[indmax3]])

    print np.dot(mat,np.transpose(mat))
    # this is to assess how othogonal the triple is/are
    print np.dot(summary['0']['evecs'],np.transpose(mat))

Example 103

Project: delocate Source File: test_delocating.py
def test_copy_recurse():
    # Function to find / copy needed libraries recursively
    with InTemporaryDirectory():
        # Get some fixed up libraries to play with
        os.makedirs('libcopy')
        test_lib, liba, libb, libc = _copy_fixpath(
            [TEST_LIB, LIBA, LIBB, LIBC], 'libcopy')
        # Set execute permissions
        os.chmod(test_lib, 0o744)
        # Check system finds libraries
        back_tick(['./libcopy/test-lib'])
        # One library, depends only on system libs, system libs filtered
        def filt_func(libname):
            return not libname.startswith('/usr/lib')
        os.makedirs('subtree')
        _copy_fixpath([LIBA], 'subtree')
        # Nothing copied therefore
        assert_equal(copy_recurse('subtree', copy_filt_func=filt_func), {})
        assert_equal(set(os.listdir('subtree')), set(['liba.dylib']))
        # shortcut
        _rp = realpath
        # An object that depends on a library that depends on two libraries
        # test_lib depends on libc, libc depends on liba and libb. libc gets
        # copied first, then liba, libb
        def _st(fname):
            return _rp(pjoin('subtree2', basename(fname)))
        os.makedirs('subtree2')
        shutil.copy2(test_lib, 'subtree2')
        assert_equal(copy_recurse('subtree2', filt_func),
                     {_rp(libc): {_st(test_lib): libc},
                      _rp(libb): {_rp(libc): libb},
                      _rp(liba): {_rp(libb): liba,
                                  _rp(libc): liba}})
        assert_equal(set(os.listdir('subtree2')),
                     set(('liba.dylib',
                          'libb.dylib',
                          'libc.dylib',
                          'test-lib')))
        # A circular set of libraries
        os.makedirs('libcopy2')
        libw = _copy_to(LIBA, 'libcopy2', 'libw.dylib')
        libx = _copy_to(LIBA, 'libcopy2', 'libx.dylib')
        liby = _copy_to(LIBA, 'libcopy2', 'liby.dylib')
        libz = _copy_to(LIBA, 'libcopy2', 'libz.dylib')
        # targets and dependencies.  A copy of libw starts in the directory,
        # first pass should install libx and liby (dependencies of libw),
        # second pass should install libz, libw (dependencies of liby, libx
        # respectively)
        t_dep1_dep2 = (
            (libw, libx, liby), # libw depends on libx, liby
            (libx, libw, liby), # libx depends on libw, liby
            (liby, libw, libz), # liby depends on libw, libz
            (libz, libw, libx)) # libz depends on libw, libx
        for tlib, dep1, dep2 in t_dep1_dep2:
            set_install_name(tlib, EXT_LIBS[0], dep1)
            set_install_name(tlib, EXT_LIBS[1], dep2)
        os.makedirs('subtree3')
        seed_path = pjoin('subtree3', 'seed')
        shutil.copy2(libw, seed_path)
        assert_equal(copy_recurse('subtree3'), # not filtered
                     # First pass, libx, liby get copied
                     {_rp(libx): {_rp(seed_path): libx,
                                  _rp(libw): libx,
                                  _rp(libz): libx},
                      _rp(liby): {_rp(seed_path): liby,
                                  _rp(libw): liby,
                                  _rp(libx): liby},
                      _rp(libw): {_rp(libx): libw,
                                  _rp(liby): libw,
                                  _rp(libz): libw},
                      _rp(libz): {_rp(liby): libz}})
        assert_equal(set(os.listdir('subtree3')),
                     set(('seed',
                         'libw.dylib',
                          'libx.dylib',
                          'liby.dylib',
                          'libz.dylib')))
        for tlib, dep1, dep2 in t_dep1_dep2:
            out_lib = pjoin('subtree3', basename(tlib))
            assert_equal(set(get_install_names(out_lib)),
                         set(('@loader_path/' + basename(dep1),
                              '@loader_path/' + basename(dep2))))
        # Check case of not-empty copied_libs
        os.makedirs('subtree4')
        shutil.copy2(libw, 'subtree4')
        copied_libs = {_rp(libw): {_rp(libx): libw,
                                   _rp(liby): libw,
                                   _rp(libz): libw}}
        copied_copied = copied_libs.copy()
        assert_equal(copy_recurse('subtree4', None, copied_libs),
                     {_rp(libw): {_rp(libx): libw,
                                  _rp(liby): libw,
                                  _rp(libz): libw},
                      _rp(libx): {_rp(libw): libx,
                                  _rp(libz): libx},
                      _rp(liby): {_rp(libw): liby,
                                  _rp(libx): liby},
                      _rp(libz): {_rp(liby): libz}})
        # Not modified in-place
        assert_equal(copied_libs, copied_copied)

Example 104

Project: workload-automation Source File: test_execution.py
    def test_bad_workload_status(self):
        workloads = [
            WorkloadRunSpec(id='1', number_of_iterations=2, instrumentation=['Signal Catcher']),
            WorkloadRunSpec(id='2', number_of_iterations=2, instrumentation=['Signal Catcher']),
            WorkloadRunSpec(id='3', number_of_iterations=2, instrumentation=['Signal Catcher']),
            WorkloadRunSpec(id='4', number_of_iterations=2, instrumentation=['Signal Catcher']),
            WorkloadRunSpec(id='5', number_of_iterations=2, instrumentation=['Signal Catcher'])
        ]

        workloads[0]._workload = BadWorkload(Exception, ["setup"])
        workloads[1]._workload = BadWorkload(Exception, ["run"])
        workloads[2]._workload = BadWorkload(Exception, ["update_result"])
        workloads[3]._workload = BadWorkload(Exception, ["teardown"])
        workloads[4]._workload = Mock()

        context = Mock()
        context.reboot_policy = RebootPolicy("never")
        context.config.workload_specs = workloads

        runner = BySpecRunner(Mock(), context, Mock())
        runner.init_queue(context.config.workload_specs)

        instrument = _instantiate(SignalCatcher)
        instrumentation.install(instrument)

        try:
            runner.run()
        finally:
            instrumentation.uninstall(instrument)

        #Check queue was handled correctly
        assert_equal(len(runner.completed_jobs), 10)
        assert_equal(len(runner.job_queue), 0)

        #Check job status'
        expected_status = [
            IterationResult.FAILED, IterationResult.SKIPPED,
            IterationResult.FAILED, IterationResult.FAILED,
            IterationResult.PARTIAL, IterationResult.PARTIAL,
            IterationResult.NONCRITICAL, IterationResult.NONCRITICAL,
            IterationResult.OK, IterationResult.OK
        ]
        for i in range(0, len(runner.completed_jobs)):
            assert_equal(runner.completed_jobs[i].result.status, expected_status[i])

        #Check signals were sent correctly
        expected_signals = [
            signal.RUN_START.name,
            signal.RUN_INIT.name,
            signal.WORKLOAD_SPEC_START.name,  # Fail Setup
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                signal.ITERATION_END.name,
                #Skipped iteration
            signal.WORKLOAD_SPEC_END.name,
            signal.WORKLOAD_SPEC_START.name,  # Fail Run
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    #signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name, - not sent because run failed
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    #signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name, - not sent because run failed
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
            signal.WORKLOAD_SPEC_END.name,
            signal.WORKLOAD_SPEC_START.name,  # Fail Result Update
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
            signal.WORKLOAD_SPEC_END.name,
            signal.WORKLOAD_SPEC_START.name,  # Fail Teardown
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
            signal.WORKLOAD_SPEC_END.name,
            signal.WORKLOAD_SPEC_START.name,  # OK
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
                signal.ITERATION_START.name,
                    signal.BEFORE_WORKLOAD_SETUP.name,
                    signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                    signal.AFTER_WORKLOAD_SETUP.name,
                    signal.BEFORE_WORKLOAD_EXECUTION.name,
                    signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                    signal.AFTER_WORKLOAD_EXECUTION.name,
                    signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                    signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
                    signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                    signal.BEFORE_WORKLOAD_TEARDOWN.name,
                    signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                    signal.AFTER_WORKLOAD_TEARDOWN.name,
                signal.ITERATION_END.name,
            signal.WORKLOAD_SPEC_END.name,
            signal.RUN_FIN.name,
            signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
            signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
            signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
            signal.RUN_END.name
        ]

        assert_equal(expected_signals, instrument.signals_received)

Example 105

Project: megaman Source File: test_validation.py
@ignore_warnings
def test_check_array():
    # accept_sparse == None
    # raise error on sparse inputs
    X = [[1, 2], [3, 4]]
    X_csr = sp.csr_matrix(X)
    assert_raises(TypeError, check_array, X_csr)
    # ensure_2d
    assert_warns(DeprecationWarning, check_array, [0, 1, 2])
    X_array = check_array([0, 1, 2])
    assert_equal(X_array.ndim, 2)
    X_array = check_array([0, 1, 2], ensure_2d=False)
    assert_equal(X_array.ndim, 1)
    # don't allow ndim > 3
    X_ndim = np.arange(8).reshape(2, 2, 2)
    assert_raises(ValueError, check_array, X_ndim)
    check_array(X_ndim, allow_nd=True)  # doesn't raise
    # force_all_finite
    X_inf = np.arange(4).reshape(2, 2).astype(np.float)
    X_inf[0, 0] = np.inf
    assert_raises(ValueError, check_array, X_inf)
    check_array(X_inf, force_all_finite=False)  # no raise
    # nan check
    X_nan = np.arange(4).reshape(2, 2).astype(np.float)
    X_nan[0, 0] = np.nan
    assert_raises(ValueError, check_array, X_nan)
    check_array(X_inf, force_all_finite=False)  # no raise

    # dtype and order enforcement.
    X_C = np.arange(4).reshape(2, 2).copy("C")
    X_F = X_C.copy("F")
    X_int = X_C.astype(np.int)
    X_float = X_C.astype(np.float)
    Xs = [X_C, X_F, X_int, X_float]
    dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
    orders = ['C', 'F', None]
    copys = [True, False]

    for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
        X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
        if dtype is not None:
            assert_equal(X_checked.dtype, dtype)
        else:
            assert_equal(X_checked.dtype, X.dtype)
        if order == 'C':
            assert_true(X_checked.flags['C_CONTIGUOUS'])
            assert_false(X_checked.flags['F_CONTIGUOUS'])
        elif order == 'F':
            assert_true(X_checked.flags['F_CONTIGUOUS'])
            assert_false(X_checked.flags['C_CONTIGUOUS'])
        if copy:
            assert_false(X is X_checked)
        else:
            # doesn't copy if it was already good
            if (X.dtype == X_checked.dtype and
                    X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
                    and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
                assert_true(X is X_checked)

    # allowed sparse != None
    X_csc = sp.csc_matrix(X_C)
    X_coo = X_csc.tocoo()
    X_dok = X_csc.todok()
    X_int = X_csc.astype(np.int)
    X_float = X_csc.astype(np.float)

    Xs = [X_csc, X_coo, X_dok, X_int, X_float]
    accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
    for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
                                                 copys):
        with warnings.catch_warnings(record=True) as w:
            X_checked = check_array(X, dtype=dtype,
                                    accept_sparse=accept_sparse, copy=copy)
        if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
            message = str(w[0].message)
            messages = ["object dtype is not supported by sparse matrices",
                        "Can't check dok sparse matrix for nan or inf."]
            assert_true(message in messages)
        else:
            assert_equal(len(w), 0)
        if dtype is not None:
            assert_equal(X_checked.dtype, dtype)
        else:
            assert_equal(X_checked.dtype, X.dtype)
        if X.format in accept_sparse:
            # no change if allowed
            assert_equal(X.format, X_checked.format)
        else:
            # got converted
            assert_equal(X_checked.format, accept_sparse[0])
        if copy:
            assert_false(X is X_checked)
        else:
            # doesn't copy if it was already good
            if (X.dtype == X_checked.dtype and X.format == X_checked.format):
                assert_true(X is X_checked)

    # other input formats
    # convert lists to arrays
    X_dense = check_array([[1, 2], [3, 4]])
    assert_true(isinstance(X_dense, np.ndarray))
    # raise on too deep lists
    assert_raises(ValueError, check_array, X_ndim.tolist())
    check_array(X_ndim.tolist(), allow_nd=True)  # doesn't raise

Example 106

Project: allura Source File: test_repository.py
    def test_diffs_file_renames(self):
        def open_blob(blob):
            blobs = {
                u'a': u'Leia',
                u'/b/a/a': u'Darth Vader',
                u'/b/a/b': u'Luke Skywalker',
                u'/b/b': u'Death Star will destroy you',
                u'/b/c': u'Luke Skywalker',  # moved from /b/a/b
                # moved from /b/b and modified
                u'/b/a/z': u'Death Star will destroy you\nALL',
            }
            from cStringIO import StringIO
            return StringIO(blobs.get(blob.path(), ''))
        self.repo._impl.open_blob = open_blob

        self.repo._impl.commit = mock.Mock(return_value=self.ci)
        self.repo._impl.paged_diffs.return_value = {
            'added': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
            'changed': [],
            'copied': [],
            'renamed': [],
            'removed': [],
            'total': 5,
        }
        M.repo_refresh.refresh_commit_trees(self.ci, {})
        assert_equal(self.ci.diffs.added,
                     ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
        assert (self.ci.diffs.copied
                == self.ci.diffs.changed
                == self.ci.diffs.removed
                == [])

        ci, isnew = self._make_commit(
            'bar',
            b=dict(
                a=dict(
                    a='',
                    b='',),
                b=''))
        ci.parent_ids = ['foo']
        self._make_log(ci)
        self.repo._impl.paged_diffs.return_value = {
            'added': ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'],
            'renamed': [],
            'copied': [],
            'changed': [],
            'removed': ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'],
            'total': 10,
        }
        M.repo_refresh.refresh_commit_trees(ci, {})
        assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
        assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
        assert (ci.diffs.copied
                == ci.diffs.changed
                == [])

        ci, isnew = self._make_commit(
            'baz',
            b=dict(
                a=dict(
                    z=''),
                c=''))
        ci.parent_ids = ['bar']
        self._make_log(ci)
        self.repo._impl.paged_diffs.return_value = {
            'added': [u'b/c', u'b/a/z'],
            'removed': [u'/b/a/b', u'b/b'],
            'changed': [],
            'copied': [
                {
                    'new': u'b/c',
                    'old': u'b/a/b',
                    'ratio': 1,
                    'diff': '',
                },
                {
                    'new': u'b/a/z',
                    'old': u'b/b',
                    'ratio': 1,
                    'diff': '',
                },
            ],
            'renamed': [],
            'total': 2
        }
        M.repo_refresh.refresh_commit_trees(ci, {})
        assert_equal(ci.diffs.added, [u'b/a/z', u'b/c'])
        assert_equal(ci.diffs.changed, [])
        assert_equal(ci.diffs.removed, [u'/b/a/b', u'b/b'])
        # see mock for open_blob
        assert_equal(len(ci.diffs.copied), 2)
        assert_equal(ci.diffs.copied[1]['old'], 'b/a/b')
        assert_equal(ci.diffs.copied[1]['new'], 'b/c')
        assert_equal(ci.diffs.copied[1]['ratio'], 1)
        assert_equal(ci.diffs.copied[1]['diff'], '')
        assert_equal(ci.diffs.copied[0]['old'], 'b/b')
        assert_equal(ci.diffs.copied[0]['new'], 'b/a/z')

Example 107

Project: nideep Source File: test_net_merge.py
    @staticmethod
    def test_duplicate_hdf5data():

        fpath = os.path.join(os.path.dirname(ROOT_PKG_PATH),
                             TEST_DATA_DIRNAME, TEST_NET_HDF5DATA_FILENAME)

        n1 = Parser().from_net_params_file(fpath)
        n2 = Parser().from_net_params_file(fpath)

        n1_tmp = NetParameter(); n1_tmp.CopyFrom(n1)
        n2_tmp = NetParameter(); n2_tmp.CopyFrom(n2)
        s = mrg.merge_indep_net_spec([n1_tmp, n2_tmp])

        assert_is_not_none(s)
        assert_is_instance(s, str)
        assert_greater(len(s), 0)

        n = NetParameter()
        text_format.Merge(s, n)
        assert_is_not_none(n)

        # Data Layer from first network
        for l in n.layer:
            if l.type.lower() == 'hdf5data':
                for l1 in n1.layer:
                    if l1.type.lower() == 'hdf5data':

                        dat_phase = [x.phase for x in l.include]
                        # compare test with test and train with train
                        if dat_phase == [x.phase for x in l1.include]:

                            assert_is_not(l.top, l1.top)
                            assert_list_equal(list(l.top), list(l1.top))
                            assert_equal(l.data_param.source, l1.data_param.source)
                            assert_equal(l.data_param.backend, l1.data_param.backend)
                            assert_equal(l.data_param.batch_size, l1.data_param.batch_size)
                            assert_equal(l.transform_param.scale, l1.transform_param.scale)
        # For non-data layers

        # back up merged net
        for ni in [n1, n2]:
            for l1 in ni.layer:
                found = False
                if l1.type.lower() != 'hdf5data':

                    for l in n.layer:
                        if l.type.lower() == l1.type.lower() and \
                           [t.split('_nidx')[0] for t in l.top] == list(l1.top) and \
                           [b.split('_nidx')[0] for b in l.bottom] == list(l1.bottom):

                            assert_true(l.name.startswith(l1.name))

                            fnames1 = [f.name for f in l1.DESCRIPTOR.fields]
                            fnames = [f.name for f in l.DESCRIPTOR.fields]
                            assert_list_equal(fnames, fnames1)

                            l.ClearField('name')
                            l.ClearField('top')
                            l.ClearField('bottom')
                            l1.ClearField('name')
                            l1.ClearField('top')
                            l1.ClearField('bottom')

                            assert_equal(text_format.MessageToString(l), text_format.MessageToString(l1))

                            found = True
                else:
                    continue  # skip for data layers
                assert_true(found, "Failed to find %s in merged network!" % (l1.name,))

Example 108

Project: claripy Source File: test_vsa.py
def test_vsa():
    # Set backend
    b = claripy.backends.vsa

    SI = claripy.SI
    VS = claripy.ValueSet
    BVV = claripy.BVV

    # Disable the use of DiscreteStridedIntervalSet
    claripy.vsa.strided_interval.allow_dsis = False

    def is_equal(ast_0, ast_1):
        return claripy.backends.vsa.identical(ast_0, ast_1)

    si1 = claripy.TSI(32, name="foo", explicit_name=True)
    nose.tools.assert_equal(vsa_model(si1).name, "foo")

    # Normalization
    si1 = SI(bits=32, stride=1, lower_bound=10, upper_bound=10)
    nose.tools.assert_equal(vsa_model(si1).stride, 0)

    # Integers
    si1 = claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)
    si2 = claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)
    si3 = claripy.SI(bits=32, stride=0, lower_bound=28, upper_bound=28)
    # Strided intervals
    si_a = claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=20)
    si_b = claripy.SI(bits=32, stride=2, lower_bound=-100, upper_bound=200)
    si_c = claripy.SI(bits=32, stride=3, lower_bound=-100, upper_bound=200)
    si_d = claripy.SI(bits=32, stride=2, lower_bound=50, upper_bound=60)
    si_e = claripy.SI(bits=16, stride=1, lower_bound=0x2000, upper_bound=0x3000)
    si_f = claripy.SI(bits=16, stride=1, lower_bound=0, upper_bound=255)
    si_g = claripy.SI(bits=16, stride=1, lower_bound=0, upper_bound=0xff)
    si_h = claripy.SI(bits=32, stride=0, lower_bound=0x80000000, upper_bound=0x80000000)

    nose.tools.assert_true(is_equal(si1, claripy.SI(bits=32, to_conv=10)))
    nose.tools.assert_true(is_equal(si2, claripy.SI(bits=32, to_conv=10)))
    nose.tools.assert_true(is_equal(si1, si2))
    # __add__
    si_add_1 = si1 + si2
    nose.tools.assert_true(is_equal(si_add_1, claripy.SI(bits=32, stride=0, lower_bound=20, upper_bound=20)))
    si_add_2 = si1 + si_a
    nose.tools.assert_true(is_equal(si_add_2, claripy.SI(bits=32, stride=2, lower_bound=20, upper_bound=30)))
    si_add_3 = si_a + si_b
    nose.tools.assert_true(is_equal(si_add_3, claripy.SI(bits=32, stride=2, lower_bound=-90, upper_bound=220)))
    si_add_4 = si_b + si_c
    nose.tools.assert_true(is_equal(si_add_4, claripy.SI(bits=32, stride=1, lower_bound=-200, upper_bound=400)))
    # __add__ with overflow
    si_add_5 = si_h + 0xffffffff
    nose.tools.assert_true(is_equal(si_add_5, claripy.SI(bits=32, stride=0, lower_bound=0x7fffffff, upper_bound=0x7fffffff)))
    # __sub__
    si_minus_1 = si1 - si2
    nose.tools.assert_true(is_equal(si_minus_1, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0)))
    si_minus_2 = si_a - si_b
    nose.tools.assert_true(is_equal(si_minus_2, claripy.SI(bits=32, stride=2, lower_bound=-190, upper_bound=120)))
    si_minus_3 = si_b - si_c
    nose.tools.assert_true(is_equal(si_minus_3, claripy.SI(bits=32, stride=1, lower_bound=-300, upper_bound=300)))
    # __neg__ / __invert__ / bitwise not
    si_neg_1 = ~si1
    nose.tools.assert_true(is_equal(si_neg_1, claripy.SI(bits=32, to_conv=-11)))
    si_neg_2 = ~si_b
    nose.tools.assert_true(is_equal(si_neg_2, claripy.SI(bits=32, stride=2, lower_bound=-201, upper_bound=99)))
    # __or__
    si_or_1 = si1 | si3
    nose.tools.assert_true(is_equal(si_or_1, claripy.SI(bits=32, to_conv=30)))
    si_or_2 = si1 | si2
    nose.tools.assert_true(is_equal(si_or_2, claripy.SI(bits=32, to_conv=10)))
    si_or_3 = si1 | si_a # An integer | a strided interval
    nose.tools.assert_true(is_equal(si_or_3 , claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=30)))
    si_or_3 = si_a | si1 # Exchange the operands
    nose.tools.assert_true(is_equal(si_or_3, claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=30)))
    si_or_4 = si_a | si_d # A strided interval | another strided interval
    nose.tools.assert_true(is_equal(si_or_4, claripy.SI(bits=32, stride=2, lower_bound=50, upper_bound=62)))
    si_or_4 = si_d | si_a # Exchange the operands
    nose.tools.assert_true(is_equal(si_or_4, claripy.SI(bits=32, stride=2, lower_bound=50, upper_bound=62)))
    si_or_5 = si_e | si_f #
    nose.tools.assert_true(is_equal(si_or_5, claripy.SI(bits=16, stride=1, lower_bound=0x2000, upper_bound=0x30ff)))
    si_or_6 = si_e | si_g #
    nose.tools.assert_true(is_equal(si_or_6, claripy.SI(bits=16, stride=1, lower_bound=0x2000, upper_bound=0x30ff)))
    # Shifting
    si_shl_1 = si1 << 3
    nose.tools.assert_equal(si_shl_1.size(), 32)
    nose.tools.assert_true(is_equal(si_shl_1, claripy.SI(bits=32, stride=0, lower_bound=80, upper_bound=80)))
    # Multiplication
    si_mul_1 = si1 * 3
    nose.tools.assert_equal(si_mul_1.size(), 32)
    nose.tools.assert_true(is_equal(si_mul_1, claripy.SI(bits=32, stride=0, lower_bound=30, upper_bound=30)))
    si_mul_2 = si_a * 3
    nose.tools.assert_equal(si_mul_2.size(), 32)
    nose.tools.assert_true(is_equal(si_mul_2, claripy.SI(bits=32, stride=6, lower_bound=30, upper_bound=60)))
    si_mul_3 = si_a * si_b
    nose.tools.assert_equal(si_mul_3.size(), 32)
    nose.tools.assert_true(is_equal(si_mul_3, claripy.SI(bits=32, stride=2, lower_bound=-2000, upper_bound=4000)))
    # Division
    si_div_1 = si1 / 3
    nose.tools.assert_equal(si_div_1.size(), 32)
    nose.tools.assert_true(is_equal(si_div_1, claripy.SI(bits=32, stride=0, lower_bound=3, upper_bound=3)))
    si_div_2 = si_a / 3
    nose.tools.assert_equal(si_div_2.size(), 32)
    nose.tools.assert_true(is_equal(si_div_2, claripy.SI(bits=32, stride=1, lower_bound=3, upper_bound=6)))
    # Modulo
    si_mo_1 = si1 % 3
    nose.tools.assert_equal(si_mo_1.size(), 32)
    nose.tools.assert_true(is_equal(si_mo_1, claripy.SI(bits=32, stride=0, lower_bound=1, upper_bound=1)))
    si_mo_2 = si_a % 3
    nose.tools.assert_equal(si_mo_2.size(), 32)
    nose.tools.assert_true(is_equal(si_mo_2, claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)))

    #
    # Extracting the sign bit
    #

    # a negative integer
    si = claripy.SI(bits=64, stride=0, lower_bound=-1, upper_bound=-1)
    sb = si[63: 63]
    nose.tools.assert_true(is_equal(sb, claripy.SI(bits=1, to_conv=1)))

    # non-positive integers
    si = claripy.SI(bits=64, stride=1, lower_bound=-1, upper_bound=0)
    sb = si[63: 63]
    nose.tools.assert_true(is_equal(sb, claripy.SI(bits=1, stride=1, lower_bound=0, upper_bound=1)))

    # Extracting an integer
    si = claripy.SI(bits=64, stride=0, lower_bound=0x7fffffffffff0000, upper_bound=0x7fffffffffff0000)
    part1 = si[63 : 32]
    part2 = si[31 : 0]
    nose.tools.assert_true(is_equal(part1, claripy.SI(bits=32, stride=0, lower_bound=0x7fffffff, upper_bound=0x7fffffff)))
    nose.tools.assert_true(is_equal(part2, claripy.SI(bits=32, stride=0, lower_bound=0xffff0000, upper_bound=0xffff0000)))

    # Concatenating two integers
    si_concat = part1.concat(part2)
    nose.tools.assert_true(is_equal(si_concat, si))

    # Extracting a claripy.SI
    si = claripy.SI(bits=64, stride=0x9, lower_bound=0x1, upper_bound=0xa)
    part1 = si[63 : 32]
    part2 = si[31 : 0]
    nose.tools.assert_true(is_equal(part1, claripy.SI(bits=32, stride=0, lower_bound=0x0, upper_bound=0x0)))
    nose.tools.assert_true(is_equal(part2, claripy.SI(bits=32, stride=9, lower_bound=1, upper_bound=10)))

    # Concatenating two claripy.SIs
    si_concat = part1.concat(part2)
    nose.tools.assert_true(is_equal(si_concat, si))

    # Concatenating two SIs that are of different sizes
    si_1 = SI(bits=64, stride=1, lower_bound=0, upper_bound=0xffffffffffffffff)
    si_2 = SI(bits=32, stride=1, lower_bound=0, upper_bound=0xffffffff)
    si_concat = si_1.concat(si_2)
    nose.tools.assert_true(is_equal(si_concat, SI(bits=96, stride=1,
                                                  lower_bound=0,
                                                  upper_bound=0xffffffffffffffffffffffff)))

    # Zero-Extend the low part
    si_zeroextended = part2.zero_extend(32)
    nose.tools.assert_true(is_equal(si_zeroextended, claripy.SI(bits=64, stride=9, lower_bound=1, upper_bound=10)))

    # Sign-extension
    si_signextended = part2.sign_extend(32)
    nose.tools.assert_true(is_equal(si_signextended, claripy.SI(bits=64, stride=9, lower_bound=1, upper_bound=10)))

    # Extract from the result above
    si_extracted = si_zeroextended[31:0]
    nose.tools.assert_true(is_equal(si_extracted, claripy.SI(bits=32, stride=9, lower_bound=1, upper_bound=10)))

    # Union
    si_union_1 = si1.union(si2)
    nose.tools.assert_true(is_equal(si_union_1, claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)))
    si_union_2 = si1.union(si3)
    nose.tools.assert_true(is_equal(si_union_2, claripy.SI(bits=32, stride=18, lower_bound=10, upper_bound=28)))
    si_union_3 = si1.union(si_a)
    nose.tools.assert_true(is_equal(si_union_3, claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=20)))
    si_union_4 = si_a.union(si_b)
    nose.tools.assert_true(is_equal(si_union_4, claripy.SI(bits=32, stride=2, lower_bound=-100, upper_bound=200)))
    si_union_5 = si_b.union(si_c)
    nose.tools.assert_true(is_equal(si_union_5, claripy.SI(bits=32, stride=1, lower_bound=-100, upper_bound=200)))

    # Intersection
    si_intersection_1 = si1.intersection(si1)
    nose.tools.assert_true(is_equal(si_intersection_1, si2))
    si_intersection_2 = si1.intersection(si2)
    nose.tools.assert_true(is_equal(si_intersection_2, claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)))
    si_intersection_3 = si1.intersection(si_a)
    nose.tools.assert_true(is_equal(si_intersection_3, claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)))

    si_intersection_4 = si_a.intersection(si_b)

    nose.tools.assert_true(is_equal(si_intersection_4, claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=20)))
    si_intersection_5 = si_b.intersection(si_c)
    nose.tools.assert_true(is_equal(si_intersection_5, claripy.SI(bits=32, stride=6, lower_bound=-100, upper_bound=200)))

    # More intersections
    t0 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=0x27)
    t1 = claripy.SI(bits=32, stride=0x7fffffff, lower_bound=0x80000002, upper_bound=1)

    si_is_6 = t0.intersection(t1)
    nose.tools.assert_true(is_equal(si_is_6, claripy.SI(bits=32, stride=0, lower_bound=1, upper_bound=1)))

    t2 = claripy.SI(bits=32, stride=5, lower_bound=20, upper_bound=30)
    t3 = claripy.SI(bits=32, stride=1, lower_bound=27, upper_bound=0xffffffff)

    si_is_7 = t2.intersection(t3)
    nose.tools.assert_true(is_equal(si_is_7, claripy.SI(bits=32, stride=0, lower_bound=30, upper_bound=30)))

    t4 = claripy.SI(bits=32, stride=5, lower_bound=-400, upper_bound=400)
    t5 = claripy.SI(bits=32, stride=1, lower_bound=395, upper_bound=-395)
    si_is_8 = t4.intersection(t5)
    nose.tools.assert_true(is_equal(si_is_8, claripy.SI(bits=32, stride=5, lower_bound=-400, upper_bound=400)))

    # Sign-extension
    si = claripy.SI(bits=1, stride=0, lower_bound=1, upper_bound=1)
    si_signextended = si.sign_extend(31)
    nose.tools.assert_true(is_equal(si_signextended, claripy.SI(bits=32, stride=0, lower_bound=0xffffffff, upper_bound=0xffffffff)))

    # Comparison between claripy.SI and BVV
    si = claripy.SI(bits=32, stride=1, lower_bound=-0x7f, upper_bound=0x7f)
    si._model_vsa.uninitialized = True
    bvv = BVV(0x30, 32)
    comp = (si < bvv)
    nose.tools.assert_true(vsa_model(comp).identical(MaybeResult()))

    # Better extraction
    # si = <32>0x1000000[0xcffffff, 0xdffffff]R
    si = claripy.SI(bits=32, stride=0x1000000, lower_bound=0xcffffff, upper_bound=0xdffffff)
    si_byte0 = si[7: 0]
    si_byte1 = si[15: 8]
    si_byte2 = si[23: 16]
    si_byte3 = si[31: 24]
    nose.tools.assert_true(is_equal(si_byte0, claripy.SI(bits=8, stride=0, lower_bound=0xff, upper_bound=0xff)))
    nose.tools.assert_true(is_equal(si_byte1, claripy.SI(bits=8, stride=0, lower_bound=0xff, upper_bound=0xff)))
    nose.tools.assert_true(is_equal(si_byte2, claripy.SI(bits=8, stride=0, lower_bound=0xff, upper_bound=0xff)))
    nose.tools.assert_true(is_equal(si_byte3, claripy.SI(bits=8, stride=1, lower_bound=0xc, upper_bound=0xd)))

    # Optimization on bitwise-and
    si_1 = claripy.SI(bits=32, stride=1, lower_bound=0x0, upper_bound=0xffffffff)
    si_2 = claripy.SI(bits=32, stride=0, lower_bound=0x80000000, upper_bound=0x80000000)
    si = si_1 & si_2
    nose.tools.assert_true(is_equal(si, claripy.SI(bits=32, stride=0x80000000, lower_bound=0, upper_bound=0x80000000)))

    si_1 = claripy.SI(bits=32, stride=1, lower_bound=0x0, upper_bound=0x7fffffff)
    si_2 = claripy.SI(bits=32, stride=0, lower_bound=0x80000000, upper_bound=0x80000000)
    si = si_1 & si_2
    nose.tools.assert_true(is_equal(si, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0)))

    # Concatenation: concat with zeros only increases the stride
    si_1 = claripy.SI(bits=8, stride=0xff, lower_bound=0x0, upper_bound=0xff)
    si_2 = claripy.SI(bits=8, stride=0, lower_bound=0, upper_bound=0)
    si = si_1.concat(si_2)
    nose.tools.assert_true(is_equal(si, claripy.SI(bits=16, stride=0xff00, lower_bound=0, upper_bound=0xff00)))

    # Extract from a reversed value
    si_1 = claripy.SI(bits=64, stride=0xff, lower_bound=0x0, upper_bound=0xff)
    si_2 = si_1.reversed[63 : 56]
    nose.tools.assert_true(is_equal(si_2, claripy.SI(bits=8, stride=0xff, lower_bound=0x0, upper_bound=0xff)))

    #
    # ValueSet
    #

    def VS(name=None, bits=None, region=None, val=None):
        region = 'foobar' if region is None else region
        return claripy.ValueSet(bits, region=region, region_base_addr=0, value=val, name=name)

    vs_1 = VS(bits=32, val=0)
    vs_1 = vs_1.intersection(VS(bits=32, val=1))
    nose.tools.assert_true(vsa_model(vs_1).is_empty)
    # Test merging two addresses
    vsa_model(vs_1)._merge_si('global', 0, vsa_model(si1))
    vsa_model(vs_1)._merge_si('global', 0, vsa_model(si3))
    nose.tools.assert_true(vsa_model(vs_1).get_si('global').identical(vsa_model(SI(bits=32, stride=18, lower_bound=10, upper_bound=28))))
    # Length of this ValueSet
    nose.tools.assert_equal(len(vsa_model(vs_1)), 32)

    vs_1 = VS(name='boo', bits=32, val=0).intersection(VS(name='makeitempty', bits=32, val=1))
    vs_2 = VS(name='foo', bits=32, val=0).intersection(VS(name='makeitempty', bits=32, val=1))
    nose.tools.assert_true(claripy.backends.vsa.identical(vs_1, vs_1))
    nose.tools.assert_true(claripy.backends.vsa.identical(vs_2, vs_2))
    vsa_model(vs_1)._merge_si('global', 0, vsa_model(si1))
    nose.tools.assert_false(claripy.backends.vsa.identical(vs_1, vs_2))
    vsa_model(vs_2)._merge_si('global', 0, vsa_model(si1))
    nose.tools.assert_true(claripy.backends.vsa.identical(vs_1, vs_2))
    nose.tools.assert_true(claripy.backends.vsa.is_true((vs_1 & vs_2) == vs_1))
    vsa_model(vs_1)._merge_si('global', 0, vsa_model(si3))
    nose.tools.assert_false(claripy.backends.vsa.identical(vs_1, vs_2))

    # Subtraction
    # Subtraction of two pointers yields a concrete value

    vs_1 = VS(name='foo', region='global', bits=32, val=0x400010)
    vs_2 = VS(name='bar', region='global', bits=32, val=0x400000)
    si = vs_1 - vs_2
    nose.tools.assert_is(type(vsa_model(si)), StridedInterval)
    nose.tools.assert_true(claripy.backends.vsa.identical(si, claripy.SI(bits=32, stride=0, lower_bound=0x10, upper_bound=0x10)))

    #
    # IfProxy
    #

    si = claripy.SI(bits=32, stride=1, lower_bound=10, upper_bound=0xffffffff)
    if_0 = claripy.If(si == 0, si, si - 1)
    nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0))
    nose.tools.assert_false(claripy.backends.vsa.identical(if_0, si))

    # max and min on IfProxy
    si = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=0xffffffff)
    if_0 = claripy.If(si == 0, si, si - 1)
    max_val = b.max(if_0)
    min_val = b.min(if_0)
    nose.tools.assert_equal(max_val, 0xffffffff)
    nose.tools.assert_equal(min_val, 0x00000000)

    # identical
    nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0))
    nose.tools.assert_true(claripy.backends.vsa.identical(if_0, si))
    if_0_copy = claripy.If(si == 0, si, si - 1)
    nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0_copy))
    if_1 = claripy.If(si == 1, si, si - 1)
    nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_1))

    si = SI(bits=32, stride=0, lower_bound=1, upper_bound=1)
    if_0 = claripy.If(si == 0, si, si - 1)
    if_0_copy = claripy.If(si == 0, si, si - 1)
    nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_0_copy))
    if_1 = claripy.If(si == 1, si, si - 1)
    nose.tools.assert_false(claripy.backends.vsa.identical(if_0, if_1))
    if_1 = claripy.If(si == 0, si + 1, si - 1)
    nose.tools.assert_true(claripy.backends.vsa.identical(if_0, if_1))
    if_1 = claripy.If(si == 0, si, si)
    nose.tools.assert_false(claripy.backends.vsa.identical(if_0, if_1))

    # if_1 = And(VS_2, IfProxy(si == 0, 0, 1))
    vs_2 = VS(region='global', bits=32, val=0xFA7B00B)
    si = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=1)
    if_1 = (vs_2 & claripy.If(si == 0, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0), claripy.SI(bits=32, stride=0, lower_bound=0xffffffff, upper_bound=0xffffffff)))
    nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_1.ite_excavated.args[1]) == vsa_model(VS(region='global', bits=32, val=0))))
    nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_1.ite_excavated.args[2]) == vsa_model(vs_2)))

    # if_2 = And(VS_3, IfProxy(si != 0, 0, 1)
    vs_3 = VS(region='global', bits=32, val=0xDEADCA7)
    si = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=1)
    if_2 = (vs_3 & claripy.If(si != 0, claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0), claripy.SI(bits=32, stride=0, lower_bound=0xffffffff, upper_bound=0xffffffff)))
    nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_2.ite_excavated.args[1]) == vsa_model(VS(region='global', bits=32, val=0))))
    nose.tools.assert_true(claripy.backends.vsa.is_true(vsa_model(if_2.ite_excavated.args[2]) == vsa_model(vs_3)))

Example 109

Project: nitime Source File: test_utils.py
def test_information_criteria():
    """

    Test the implementation of information criteria:

    """
    a1 = np.array([[0.9, 0],
                   [0.16, 0.8]])

    a2 = np.array([[-0.5, 0],
                  [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov],
                    [xy_cov, y_var]])

    #Number of realizations of the process
    N = 500
    #Length of each realization:
    L = 1024

    order = am.shape[0]
    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in range(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    AIC = []
    BIC = []
    AICc = []

    # The total number data points available for estimation:
    Ntotal = L * n_process

    for n_lags in range(1, 10):

        Rxx = np.empty((N, n_process, n_process, n_lags))

        for i in range(N):
            Rxx[i] = utils.autocov_vector(z[i], nlags=n_lags)

        Rxx = Rxx.mean(axis=0)
        Rxx = Rxx.transpose(2, 0, 1)

        a, ecov = alg.lwr_recursion(Rxx)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags, Ntotal)
        AIC.append(IC)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags, Ntotal, corrected=True)
        AICc.append(IC)

        IC = utils.bayesian_information_criterion(ecov, n_process, n_lags, Ntotal)
        BIC.append(IC)

    # The model has order 2, so this should minimize on 2:

    # We do not test this for AIC/AICc, because these sometimes do not minimize
    # (see Ding and Bressler)
    # nt.assert_equal(np.argmin(AIC), 2)
    # nt.assert_equal(np.argmin(AICc), 2)
    nt.assert_equal(np.argmin(BIC), 2)

Example 110

Project: landlab Source File: test_lake_mapper.py
def test_composite_pits():
    """
    A test to ensure the component correctly handles cases where there are
    multiple pits, inset into each other.
    """
    mg = RasterModelGrid(10, 10, 1.)
    z = mg.add_field('node', 'topographic__elevation', mg.node_x.copy())
    # a sloping plane
    #np.random.seed(seed=0)
    #z += np.random.rand(100)/10000.
    # punch one big hole
    z.reshape((10,10))[3:8,3:8] = 0.
    # dig a couple of inset holes
    z[57] = -1.
    z[44] = -2.
    z[54] = -10.
    
    # make an outlet
    z[71] = 0.9

    fr = FlowRouter(mg)
    lf = DepressionFinderAndRouter(mg)
    fr.route_flow()
    lf.map_depressions()
    
    flow_sinks_target = np.zeros(100, dtype=bool)
    flow_sinks_target[mg.boundary_nodes] = True
    # no internal sinks now:
    assert_array_equal(mg.at_node['flow__sink_flag'], flow_sinks_target)
    
    # test conservation of mass:
    assert_almost_equal(mg.at_node['drainage_area'
                                       ].reshape((10,10))[1:-1,1].sum(), 8.**2)
    # ^all the core nodes
    
    # test the actual flow field:
#    nA = np.array([  0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,
#                     8.,   8.,   7.,   6.,   5.,   4.,   3.,   2.,   1.,   0.,
#                     1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   0.,
#                     1.,   1.,   1.,   4.,   2.,   2.,   8.,   4.,   1.,   0.,
#                     1.,   1.,   1.,   8.,   3.,  15.,   3.,   2.,   1.,   0.,
#                     1.,   1.,   1.,  13.,  25.,   6.,   3.,   2.,   1.,   0.,
#                     1.,   1.,   1.,  45.,   3.,   3.,   5.,   2.,   1.,   0.,
#                    50.,  50.,  49.,   3.,   2.,   2.,   2.,   4.,   1.,   0.,
#                     1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   0.,
#                     0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.])
    nA = np.array([  0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,
                     8.,   8.,   7.,   6.,   5.,   4.,   3.,   2.,   1.,   0.,
                     1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   0.,
                     1.,   1.,   1.,   4.,   2.,   2.,   6.,   4.,   1.,   0.,
                     1.,   1.,   1.,   6.,   3.,  12.,   3.,   2.,   1.,   0.,
                     1.,   1.,   1.,   8.,  20.,   4.,   3.,   2.,   1.,   0.,
                     1.,   1.,   1.,  35.,   5.,   4.,   3.,   2.,   1.,   0.,
                    50.,  50.,  49.,  13.,  10.,   8.,   6.,   4.,   1.,   0.,
                     1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   0.,
                     0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.,   0.])
    assert_array_equal(mg.at_node['drainage_area'], nA)
    
    # the lake code map:
    lc = np.array([XX, XX, XX, XX, XX, XX, XX, XX, XX, XX,
                   XX, XX, XX, XX, XX, XX, XX, XX, XX, XX,
                   XX, XX, XX, XX, XX, XX, XX, XX, XX, XX,
                   XX, XX, XX, 57, 57, 57, 57, 57, XX, XX,
                   XX, XX, XX, 57, 57, 57, 57, 57, XX, XX,
                   XX, XX, XX, 57, 57, 57, 57, 57, XX, XX,
                   XX, XX, XX, 57, 57, 57, 57, 57, XX, XX,
                   XX, XX, XX, 57, 57, 57, 57, 57, XX, XX,
                   XX, XX, XX, XX, XX, XX, XX, XX, XX, XX,
                   XX, XX, XX, XX, XX, XX, XX, XX, XX, XX])
    
    #test the remaining properties:
    assert_equal(lf.lake_outlets.size, 1)
    assert_equal(lf.lake_outlets[0], 72)
    outlets_in_map = np.unique(lf.depression_outlet_map)
    assert_equal(outlets_in_map.size, 2)
    assert_equal(outlets_in_map[1], 72)
    assert_equal(lf.number_of_lakes, 1)
    assert_equal(lf.lake_codes[0], 57)
    assert_array_equal(lf.lake_map, lc)
    assert_almost_equal(lf.lake_areas[0], 25.)
    assert_almost_equal(lf.lake_volumes[0], 63.)

Example 111

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_text.py
def test_vectorizer():
    # raw docuements as an iterator
    train_data = iter(ALL_FOOD_DOCS[:-1])
    test_data = [ALL_FOOD_DOCS[-1]]
    n_train = len(ALL_FOOD_DOCS) - 1

    # test without vocabulary
    v1 = CountVectorizer(max_df=0.5)
    counts_train = v1.fit_transform(train_data)
    if hasattr(counts_train, 'tocsr'):
        counts_train = counts_train.tocsr()
    assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)

    # build a vectorizer v1 with the same vocabulary as the one fitted by v1
    v2 = CountVectorizer(vocabulary=v1.vocabulary_)

    # compare that the two vectorizer give the same output on the test sample
    for v in (v1, v2):
        counts_test = v.transform(test_data)
        if hasattr(counts_test, 'tocsr'):
            counts_test = counts_test.tocsr()

        vocabulary = v.vocabulary_
        assert_equal(counts_test[0, vocabulary["salad"]], 1)
        assert_equal(counts_test[0, vocabulary["tomato"]], 1)
        assert_equal(counts_test[0, vocabulary["water"]], 1)

        # stop word from the fixed list
        assert_false("the" in vocabulary)

        # stop word found automatically by the vectorizer DF thresholding
        # words that are high frequent across the complete corpus are likely
        # to be not informative (either real stop words of extraction
        # artifacts)
        assert_false("copyright" in vocabulary)

        # not present in the sample
        assert_equal(counts_test[0, vocabulary["coke"]], 0)
        assert_equal(counts_test[0, vocabulary["burger"]], 0)
        assert_equal(counts_test[0, vocabulary["beer"]], 0)
        assert_equal(counts_test[0, vocabulary["pizza"]], 0)

    # test tf-idf
    t1 = TfidfTransformer(norm='l1')
    tfidf = t1.fit(counts_train).transform(counts_train).toarray()
    assert_equal(len(t1.idf_), len(v1.vocabulary_))
    assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))

    # test tf-idf with new data
    tfidf_test = t1.transform(counts_test).toarray()
    assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))

    # test tf alone
    t2 = TfidfTransformer(norm='l1', use_idf=False)
    tf = t2.fit(counts_train).transform(counts_train).toarray()
    assert_equal(t2.idf_, None)

    # test idf transform with unlearned idf vector
    t3 = TfidfTransformer(use_idf=True)
    assert_raises(ValueError, t3.transform, counts_train)

    # test idf transform with incompatible n_features
    X = [[1, 1, 5],
         [1, 1, 0]]
    t3.fit(X)
    X_incompt = [[1, 3],
                 [1, 3]]
    assert_raises(ValueError, t3.transform, X_incompt)

    # L1-normalized term frequencies sum to one
    assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)

    # test the direct tfidf vectorizer
    # (equivalent to term count vectorizer + tfidf transformer)
    train_data = iter(ALL_FOOD_DOCS[:-1])
    tv = TfidfVectorizer(norm='l1')

    tv.max_df = v1.max_df
    tfidf2 = tv.fit_transform(train_data).toarray()
    assert_false(tv.fixed_vocabulary_)
    assert_array_almost_equal(tfidf, tfidf2)

    # test the direct tfidf vectorizer with new data
    tfidf_test2 = tv.transform(test_data).toarray()
    assert_array_almost_equal(tfidf_test, tfidf_test2)

    # test transform on unfitted vectorizer with empty vocabulary
    v3 = CountVectorizer(vocabulary=None)
    assert_raises(ValueError, v3.transform, train_data)

    # ascii preprocessor?
    v3.set_params(strip_accents='ascii', lowercase=False)
    assert_equal(v3.build_preprocessor(), strip_accents_ascii)

    # error on bad strip_accents param
    v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
    assert_raises(ValueError, v3.build_preprocessor)

    # error with bad analyzer type
    v3.set_params = '_invalid_analyzer_type_'
    assert_raises(ValueError, v3.build_analyzer)

Example 112

Project: mne-python Source File: test_reference.py
@testing.requires_testing_data
def test_add_reference():
    """Test adding a reference."""
    raw = read_raw_fif(fif_fname, preload=True)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # check if channel already exists
    assert_raises(ValueError, add_reference_channels,
                  raw, raw.info['ch_names'][0])
    # add reference channel to Raw
    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # for Neuromag fif's, the reference electrode location is placed in
    # elements [3:6] of each "data" electrode location
    assert_allclose(raw.info['chs'][-1]['loc'][:3],
                    raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)

    ref_idx = raw.ch_names.index('Ref')
    ref_data, _ = raw[ref_idx]
    assert_array_equal(ref_data, 0)

    # add reference channel to Raw when no digitization points exist
    raw = read_raw_fif(fif_fname).crop(0, 1).load_data()
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    del raw.info['dig']

    raw_ref = add_reference_channels(raw, 'Ref', copy=True)

    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # Test adding an existing channel as reference channel
    assert_raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])

    # add two reference channels to Raw
    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
    _check_channel_names(raw_ref, ['M1', 'M2'])
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    assert_array_equal(raw_ref._data[-2:, :], 0)

    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
    _check_channel_names(raw, ['M1', 'M2'])
    ref_idx = raw.ch_names.index('M1')
    ref_idy = raw.ch_names.index('M2')
    ref_data, _ = raw[[ref_idx, ref_idy]]
    assert_array_equal(ref_data, 0)

    # add reference channel to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
                    picks=picks_eeg, preload=True)
    # default: proj=True, after which adding a Ref channel is prohibited
    assert_raises(RuntimeError, add_reference_channels, epochs, 'Ref')

    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
                    picks=picks_eeg, preload=True, proj='delayed')
    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
    # CAR after custom reference is an Error
    assert_raises(RuntimeError, epochs_ref.set_eeg_reference)

    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
    _check_channel_names(epochs_ref, 'Ref')
    ref_idx = epochs_ref.ch_names.index('Ref')
    ref_data = epochs_ref.get_data()[:, ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add two reference channels to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
                    picks=picks_eeg, preload=True, proj='delayed')
    with warnings.catch_warnings(record=True):  # multiple set zero
        epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
    _check_channel_names(epochs_ref, ['M1', 'M2'])
    ref_idx = epochs_ref.ch_names.index('M1')
    ref_idy = epochs_ref.ch_names.index('M2')
    assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
    assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add reference channel to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
                    picks=picks_eeg, preload=True, proj='delayed')
    evoked = epochs.average()
    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
    _check_channel_names(evoked_ref, 'Ref')
    ref_idx = evoked_ref.ch_names.index('Ref')
    ref_data = evoked_ref.data[ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # add two reference channels to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
                    picks=picks_eeg, preload=True, proj='delayed')
    evoked = epochs.average()
    with warnings.catch_warnings(record=True):  # multiple set zero
        evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
    _check_channel_names(evoked_ref, ['M1', 'M2'])
    ref_idx = evoked_ref.ch_names.index('M1')
    ref_idy = evoked_ref.ch_names.index('M2')
    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # Test invalid inputs
    raw_np = read_raw_fif(fif_fname, preload=False)
    assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
    assert_raises(ValueError, add_reference_channels, raw, 1)

Example 113

Project: flanker Source File: google_test.py
def test_google_fail():
    with patch.object(validate, 'mail_exchanger_lookup') as mock_method:
        mock_method.side_effect = mock_exchanger_lookup

        # invalid single character (must be alphanum, underscore, or apostrophe)
        invalid_chars = string.punctuation
        invalid_chars = invalid_chars.replace('_', '')
        invalid_chars = invalid_chars.replace('\'', '')
        for i in invalid_chars:
            localpart = str(i)
            addr = address.validate_address(localpart + DOMAIN)
            assert_equal(addr, None)

        # invalid length range
        for i in range(0) + range(65, 80):
            localpart = ''.join(random.choice(string.ascii_letters) for x in range(i))
            addr = address.validate_address(localpart + DOMAIN)
            assert_equal(addr, None)

        # invalid start char (must start with alphanum, underscore, dash, or apostrophe)
        invalid_chars = string.punctuation
        invalid_chars = invalid_chars.replace('_', '')
        invalid_chars = invalid_chars.replace('-', '')
        invalid_chars = invalid_chars.replace('\'', '')
        for i in invalid_chars:
            localpart = str(i) + 'aaaaa'
            addr = address.validate_address(localpart + DOMAIN)
            assert_equal(addr, None)

        # invalid end char (must end with alphanum, underscore, dash, or apostrophe)
        invalid_chars = string.punctuation
        invalid_chars = invalid_chars.replace('_', '')
        invalid_chars = invalid_chars.replace('-', '')
        invalid_chars = invalid_chars.replace('\'', '')
        invalid_chars = invalid_chars.replace('+', '')
        for i in invalid_chars:
            localpart = 'aaaaa' + str(i)
            addr = address.validate_address(localpart + DOMAIN)
            assert_equal(addr, None)

        # invalid chars (must be alphanum, underscore, dash, apostrophe, dots)
        invalid_chars = string.punctuation
        invalid_chars = invalid_chars.replace('_', '')
        invalid_chars = invalid_chars.replace('-', '')
        invalid_chars = invalid_chars.replace('\'', '')
        invalid_chars = invalid_chars.replace('.', '')
        invalid_chars = invalid_chars.replace('+', '')
        for i in invalid_chars:
            localpart = 'aaa' + str(i) + '000'
            addr = address.validate_address(localpart + DOMAIN)
            assert_equal(addr, None)

        # dots (.) are NOT ignored
        addr1 = address.validate_address('aa..aa' + DOMAIN)
        addr2 = address.validate_address('aa.aa' + DOMAIN)
        assert_not_equal(addr1, addr2)

        # everything after plus (+) is ignored, but something must be infront of it
        for localpart in ['+t1', '+' + ATOM_STR]:
            addr = address.validate_address(localpart + DOMAIN)
            assert_equal(addr, None)

Example 114

Project: CoilSnake Source File: test_graphics.py
def test_read_4bpp_graphic_from_block():
    source = Block()
    source.from_list([0b01010110,
                      0b00001011,

                      0b11001110,
                      0b10010110,

                      0b01110001,
                      0b00111011,

                      0b00001011,
                      0b10011110,

                      0b00011000,
                      0b00000011,

                      0b10000001,
                      0b11101011,

                      0b00000100,
                      0b01000101,

                      0b01010110,
                      0b10001111,

                      0b00101100,
                      0b10110000,

                      0b01010110,
                      0b10110010,

                      0b01010000,
                      0b11000000,

                      0b00111000,
                      0b10010111,

                      0b00101101,
                      0b11111100,

                      0b01111101,
                      0b11101010,

                      0b10101111,
                      0b10110111,

                      0b01100000,
                      0b11101110])
    target = [[0 for x in range(8)] for y in range(8)]
    assert_equal(32, read_4bpp_graphic_from_block(target=target, source=source, offset=0, x=0, y=0, bit_offset=0))
    assert_list_equal(target,
                      [[8, 1, 12, 9, 6, 5, 3, 2],
                       [11, 5, 8, 14, 1, 7, 15, 0],
                       [8, 13, 3, 7, 2, 0, 2, 3],
                       [10, 0, 4, 14, 7, 10, 11, 9],
                       [8, 8, 12, 9, 13, 12, 2, 6],
                       [11, 14, 14, 4, 14, 4, 10, 7],
                       [12, 2, 12, 8, 4, 15, 12, 14],
                       [10, 13, 12, 1, 10, 11, 11, 2]])

Example 115

Project: claripy Source File: test_expression.py
Function: test_expression
def test_expression():
    bc = claripy.backends.concrete

    e = claripy.BVV(0x01020304, 32)
    nose.tools.assert_equal(len(e), 32)
    r = e.reversed
    nose.tools.assert_equal(bc.convert(r), 0x04030201)
    nose.tools.assert_equal(len(r), 32)

    nose.tools.assert_equal([ bc.convert(i) for i in r.chop(8) ], [ 4, 3, 2, 1 ] )

    e1 = r[31:24]
    nose.tools.assert_equal(bc.convert(e1), 0x04)
    nose.tools.assert_equal(len(e1), 8)
    nose.tools.assert_equal(bc.convert(e1[2]), 1)
    nose.tools.assert_equal(bc.convert(e1[1]), 0)

    ee1 = e1.zero_extend(8)
    nose.tools.assert_equal(bc.convert(ee1), 0x0004)
    nose.tools.assert_equal(len(ee1), 16)

    ee1 = claripy.BVV(0xfe, 8).sign_extend(8)
    nose.tools.assert_equal(bc.convert(ee1), 0xfffe)
    nose.tools.assert_equal(len(ee1), 16)

    xe1 = [ bc.convert(i) for i in e1.chop(1) ]
    nose.tools.assert_equal(xe1, [ 0, 0, 0, 0, 0, 1, 0, 0 ])

    a = claripy.BVV(1, 1)
    nose.tools.assert_equal(bc.convert(a+a), 2)

    x = claripy.BVV(1, 32)
    nose.tools.assert_equal(x.length, 32)
    y = claripy.LShR(x, 10)
    nose.tools.assert_equal(y.length, 32)

    r = claripy.BVV(0x01020304, 32)
    rr = r.reversed
    rrr = rr.reversed
    #nose.tools.assert_is(bc.convert(r), bc.convert(rrr))
    #nose.tools.assert_is(type(bc.convert(rr)), claripy.A)
    nose.tools.assert_equal(bc.convert(rr), 0x04030201)
    nose.tools.assert_is(r.concat(rr), claripy.Concat(r, rr))

    rsum = r+rr
    nose.tools.assert_equal(bc.convert(rsum), 0x05050505)

    r = claripy.BVS('x', 32)
    rr = r.reversed
    rrr = rr.reversed
    nose.tools.assert_is(r, rrr)

    # test identity
    nose.tools.assert_is(r, rrr)
    nose.tools.assert_is_not(r, rr)
    ii = claripy.BVS('ii', 32)
    ij = claripy.BVS('ij', 32)
    nose.tools.assert_is(ii, ii)
    nose.tools.assert_is_not(ii, ij)

    si = claripy.SI(bits=32, stride=2, lower_bound=20, upper_bound=100)
    sj = claripy.SI(bits=32, stride=2, lower_bound=10, upper_bound=10)
    sk = claripy.SI(bits=32, stride=2, lower_bound=20, upper_bound=100)
    nose.tools.assert_true(claripy.backends.vsa.identical(si, si))
    nose.tools.assert_false(claripy.backends.vsa.identical(si, sj))
    nose.tools.assert_true(claripy.backends.vsa.identical(si, sk))
    nose.tools.assert_is_not(si, sj)
    nose.tools.assert_is_not(sj, sk)
    nose.tools.assert_is_not(sk, si)

    # test hash cache
    nose.tools.assert_is(a+a, a+a)

    # test replacement
    old = claripy.BVS('old', 32, explicit_name=True)
    new = claripy.BVS('new', 32, explicit_name=True)
    ooo = claripy.BVV(0, 32)

    old_formula = claripy.If((old + 1)%256 == 0, old+10, old+20)
    print old_formula.dbg_repr()
    new_formula = old_formula.replace(old, new)
    print new_formula.dbg_repr()
    ooo_formula = new_formula.replace(new, ooo)

    print ooo_formula.dbg_repr()

    nose.tools.assert_not_equal(hash(old_formula), hash(new_formula))
    nose.tools.assert_not_equal(hash(old_formula), hash(ooo_formula))
    nose.tools.assert_not_equal(hash(new_formula), hash(ooo_formula))

    nose.tools.assert_equal(old_formula.variables, { 'old' })
    nose.tools.assert_equal(new_formula.variables, { 'new' })
    nose.tools.assert_equal(ooo_formula.variables, ooo.variables)

    nose.tools.assert_true(old_formula.symbolic)
    nose.tools.assert_true(new_formula.symbolic)
    nose.tools.assert_true(new_formula.symbolic)

    nose.tools.assert_equal(str(old_formula).replace('old', 'new'), str(new_formula))
    nose.tools.assert_equal(bc.convert(ooo_formula), 20)

    # test dict replacement
    old = claripy.BVS('old', 32, explicit_name=True)
    new = claripy.BVS('new', 32, explicit_name=True)
    c = (old + 10) - (old + 20)
    d = (old + 1) - (old + 2)
    cr = c.replace_dict({(old+10).cache_key: (old+1), (old+20).cache_key: (old+2)})
    nose.tools.assert_is(cr, d)

    # test AST collapse
    s = claripy.SI(bits=32, stride=0, lower_bound=10, upper_bound=10)
    b = claripy.BVV(20, 32)

    sb = s+b
    nose.tools.assert_is_instance(sb.args[0], claripy.ast.Base)

    bb = b+b
    # this was broken previously -- it was checking if type(bb.args[0]) == A,
    # and it wasn't, but was instead a subclass. leaving this out for now
    # nose.tools.assert_not_is_instance(bb.args[0], claripy.ast.Base)

    # ss = s+s
    # (see above)
    # nose.tools.assert_not_is_instance(ss.args[0], claripy.ast.Base)

    sob = s|b
    # for now, this is collapsed. Presumably, Fish will make it not collapse at some point
    nose.tools.assert_is_instance(sob.args[0], claripy.ast.Base)

    # make sure the AST collapses for delayed ops like reversing
    rb = b.reversed
    #nose.tools.assert_is_instance(rb.args[0], claripy.ast.Base)
    # TODO: Properly delay reversing: should not be eager

    nose.tools.assert_is_not(rb, bb)
    nose.tools.assert_is(rb, rb)

    # test some alternate bvv creation methods
    nose.tools.assert_is(claripy.BVV('AAAA'), claripy.BVV(0x41414141, 32))
    nose.tools.assert_is(claripy.BVV('AAAA', 32), claripy.BVV(0x41414141, 32))
    nose.tools.assert_is(claripy.BVV('AB'), claripy.BVV(0x4142, 16))
    nose.tools.assert_is(claripy.BVV('AB', 16), claripy.BVV(0x4142, 16))
    nose.tools.assert_raises(claripy.errors.ClaripyValueError, claripy.BVV, 'AB', 8)

Example 116

Project: networkx Source File: test_pydot.py
    def pydot_checks(self, G, prog):
        '''
        Validate :mod:`pydot`-based usage of the passed NetworkX graph with the
        passed basename of an external GraphViz command (e.g., `dot`, `neato`).
        '''

        # Set the name of this graph to... "G". Failing to do so will
        # subsequently trip an assertion expecting this name.
        G.graph['name'] = 'G'

        # Add arbitrary nodes and edges to the passed empty graph.
        G.add_edges_from([('A','B'),('A','C'),('B','C'),('A','D')])
        G.add_node('E')

        # Validate layout of this graph with the passed GraphViz command.
        graph_layout = nx.nx_pydot.pydot_layout(G, prog=prog)
        assert_is_instance(graph_layout, dict)

        # Convert this graph into a "pydot.Dot" instance.
        P = nx.nx_pydot.to_pydot(G)

        # Convert this "pydot.Dot" instance back into a graph of the same type.
        G2 = G.__class__(nx.nx_pydot.from_pydot(P))

        # Validate the original and resulting graphs to be the same.
        assert_graphs_equal(G, G2)

        # Serialize this "pydot.Dot" instance to a temporary file in dot format.
        fname = tempfile.mktemp()
        assert_true(P.write_raw(fname))

        # Deserialize a list of new "pydot.Dot" instances back from this file.
        Pin_list = pydot.graph_from_dot_file(path=fname, encoding='utf-8')

        # Validate this file to contain only one graph.
        assert_equal(len(Pin_list), 1)

        # The single "pydot.Dot" instance deserialized from this file.
        Pin = Pin_list[0]

        # Sorted list of all nodes in the original "pydot.Dot" instance.
        n1 = sorted([p.get_name() for p in P.get_node_list()])

        # Sorted list of all nodes in the deserialized "pydot.Dot" instance.
        n2 = sorted([p.get_name() for p in Pin.get_node_list()])

        # Validate these instances to contain the same nodes.
        assert_equal(n1, n2)

        # Sorted list of all edges in the original "pydot.Dot" instance.
        e1 = sorted([
            (e.get_source(), e.get_destination()) for e in P.get_edge_list()])

        # Sorted list of all edges in the original "pydot.Dot" instance.
        e2 = sorted([
            (e.get_source(), e.get_destination()) for e in Pin.get_edge_list()])

        # Validate these instances to contain the same edges.
        assert_equal(e1, e2)

        # Deserialize a new graph of the same type back from this file.
        Hin = nx.nx_pydot.read_dot(fname)
        Hin = G.__class__(Hin)

        # Validate the original and resulting graphs to be the same.
        assert_graphs_equal(G, Hin)

Example 117

Project: hwrt Source File: handwritten_data_test.py
def get_sorted_pointlist_test():
    a = testhelper.get_symbol_as_handwriting(97705)
    nose.tools.assert_equal(a.get_sorted_pointlist(),
                            [[
                             {'y': 223.125, 'x': 328.5, 'time': 1377173554837},
                             {'y': 225.125, 'x': 326.5, 'time': 1377173554868},
                             {'y': 226.125, 'x': 322.5, 'time': 1377173554876},
                             {'y': 229.125, 'x': 319.5, 'time': 1377173554885},
                             {'y': 231.125, 'x': 315.5, 'time': 1377173554895},
                             {'y': 237.125, 'x': 304.5, 'time': 1377173554912},
                             {'y': 245.125, 'x': 291.5, 'time': 1377173554928},
                             {'y': 253.125, 'x': 274.5, 'time': 1377173554945},
                             {'y': 261.125, 'x': 256.5, 'time': 1377173554964},
                             {'y': 267.125, 'x': 243.5, 'time': 1377173554978},
                             {'y': 276.125, 'x': 222.5, 'time': 1377173554995},
                             {'y': 279.125, 'x': 211.5, 'time': 1377173555012},
                             {'y': 280.125, 'x': 204.5, 'time': 1377173555031},
                             {'y': 281.125, 'x': 196.5, 'time': 1377173555045},
                             {'y': 281.125, 'x': 183.5, 'time': 1377173555061},
                             {'y': 281.125, 'x': 172.5, 'time': 1377173555078},
                             {'y': 280.125, 'x': 163.5, 'time': 1377173555095},
                             {'y': 274.125, 'x': 149.5, 'time': 1377173555128},
                             {'y': 270.125, 'x': 144.5, 'time': 1377173555147},
                             {'y': 266.125, 'x': 142.5, 'time': 1377173555180},
                             {'y': 258.125, 'x': 142.5, 'time': 1377173555216},
                             {'y': 226.125, 'x': 143.5, 'time': 1377173555264},
                             {'y': 212.125, 'x': 148.5, 'time': 1377173555281},
                             {'y': 204.125, 'x': 156.5, 'time': 1377173555296},
                             {'y': 182.125, 'x': 176.5, 'time': 1377173555330},
                             {'y': 167.125, 'x': 189.5, 'time': 1377173555363},
                             {'y': 152.125, 'x': 201.5, 'time': 1377173555397},
                             {'y': 148.125, 'x': 206.5, 'time': 1377173555417},
                             {'y': 132.125, 'x': 223.5, 'time': 1377173555463},
                             {'y': 121.125, 'x': 231.5, 'time': 1377173555496},
                             {'y': 106.125, 'x': 241.5, 'time': 1377173555530},
                             {'y':  94.125, 'x': 243.5, 'time': 1377173555563},
                             {'y':  81.125, 'x': 241.5, 'time': 1377173555596},
                             {'y':  75.125, 'x': 236.5, 'time': 1377173555630},
                             {'y':  62.125, 'x': 222.5, 'time': 1377173555663},
                             {'y':  54.125, 'x': 208.5, 'time': 1377173555696},
                             {'y':  50.125, 'x': 198.5, 'time': 1377173555730},
                             {'y':  50.125, 'x': 182.5, 'time': 1377173555763},
                             {'y':  50.125, 'x': 174.5, 'time': 1377173555795},
                             {'y':  57.125, 'x': 168.5, 'time': 1377173555830},
                             {'y':  67.125, 'x': 166.5, 'time': 1377173555862},
                             {'y':  80.125, 'x': 166.5, 'time': 1377173555896},
                             {'y': 102.125, 'x': 173.5, 'time': 1377173555930},
                             {'y': 130.125, 'x': 184.5, 'time': 1377173555962},
                             {'y': 157.125, 'x': 195.5, 'time': 1377173555996},
                             {'y': 176.125, 'x': 207.5, 'time': 1377173556030},
                             {'y': 194.125, 'x': 217.5, 'time': 1377173556061},
                             {'y': 207.125, 'x': 225.5, 'time': 1377173556094},
                             {'y': 215.125, 'x': 231.5, 'time': 1377173556115},
                             {'y': 229.125, 'x': 239.5, 'time': 1377173556147},
                             {'y': 242.125, 'x': 246.5, 'time': 1377173556180},
                             {'y': 259.125, 'x': 258.5, 'time': 1377173556216},
                             {'y': 274.125, 'x': 269.5, 'time': 1377173556247},
                             {'y': 282.125, 'x': 275.5, 'time': 1377173556280},
                             {'y': 290.125, 'x': 280.5, 'time': 1377173556315},
                             {'y': 292.125, 'x': 281.5, 'time': 1377173556349},
                             {'y': 292.125, 'x': 282.5, 'time': 1377173556395},
                             {'y': 293.125, 'x': 283.5, 'time': 1377173556499},
                             {'y': 295.125, 'x': 286.5, 'time': 1377173556652},
                             {'y': 300.125, 'x': 290.5, 'time': 1377173556660},
                             {'y': 304.125, 'x': 294.5, 'time': 1377173556668},
                             {'y': 307.125, 'x': 297.5, 'time': 1377173556682},
                             {'y': 311.125, 'x': 300.5, 'time': 1377173556697},
                             {'y': 313.125, 'x': 302.5, 'time': 1377173556713}]
                             ])

Example 118

Project: dipy Source File: test_metrics.py
def test_downsample():

    t = np.array([[  82.20181274,   91.3650589 ,   43.15737152],
       [  82.3844223 ,   91.79336548,   43.87036514],
       [  82.48710632,   92.27861023,   44.56298065],
       [  82.53310394,   92.7854538 ,   45.24635315],
       [  82.53793335,   93.26902008,   45.94785309],
       [  82.48797607,   93.75003815,   46.6493988 ],
       [  82.35533142,   94.2518158 ,   47.32533264],
       [  82.15484619,   94.76634216,   47.97451019],
       [  81.90982819,   95.28792572,   48.6024437 ],
       [  81.63336945,   95.78153229,   49.23971176],
       [  81.35479736,   96.24868011,   49.89558792],
       [  81.08713531,   96.69807434,   50.56812668],
       [  80.81504822,   97.14285278,   51.24193192],
       [  80.52591705,   97.56719971,   51.92168427],
       [  80.26599884,   97.98269653,   52.61848068],
       [  80.0463562 ,   98.38131714,   53.3385582 ],
       [  79.8469162 ,   98.77052307,   54.06955338],
       [  79.57667542,   99.13599396,   54.78985596],
       [  79.23351288,   99.4320755 ,   55.51065063],
       [  78.84815979,   99.64141846,   56.24016571],
       [  78.47383881,   99.77347565,   56.9929924 ],
       [  78.12837219,   99.81330872,   57.76969528],
       [  77.80438995,   99.85082245,   58.55574799],
       [  77.4943924 ,   99.88065338,   59.34777069],
       [  77.21414185,   99.85343933,   60.15090561],
       [  76.96416473,   99.82772827,   60.96406937],
       [  76.74712372,   99.80519104,   61.78676605],
       [  76.52263641,   99.79122162,   62.60765076],
       [  76.03757477,  100.08692169,   63.24152374],
       [  75.44867706,  100.3526535 ,   63.79513168],
       [  74.78033447,  100.57255554,   64.272789  ],
       [  74.11605835,  100.7733078 ,   64.76428986],
       [  73.51222992,  100.98779297,   65.32373047],
       [  72.97387695,  101.23387146,   65.93502045],
       [  72.47355652,  101.49151611,   66.57343292],
       [  71.99834442,  101.72480774,   67.2397995 ],
       [  71.5690918 ,  101.98665619,   67.92664337],
       [  71.18083191,  102.29483795,   68.61888123],
       [  70.81879425,  102.63343048,   69.31127167],
       [  70.47422791,  102.98672485,   70.00532532],
       [  70.10092926,  103.28502655,   70.70999908],
       [  69.69512177,  103.51667023,   71.42147064],
       [  69.27423096,  103.71351624,   72.13452911],
       [  68.91260529,  103.81676483,   72.89796448],
       [  68.60788727,  103.81982422,   73.69258118],
       [  68.34162903,  103.7661972 ,   74.49915314],
       [  68.08542633,  103.70635223,   75.30856323],
       [  67.83590698,  103.60187531,   76.11553955],
       [  67.56822968,  103.4482193 ,   76.90870667],
       [  67.28399658,  103.25878906,   77.68825531],
       [  67.00117493,  103.03740692,   78.45989227],
       [  66.72718048,  102.80329895,   79.23099518],
       [  66.4619751 ,  102.54130554,   79.99622345],
       [  66.20803833,  102.22305298,   80.7438736 ],
       [  65.96872711,  101.88980865,   81.48987579],
       [  65.72864532,  101.59316254,   82.25085449],
       [  65.47808075,  101.33383942,   83.02194214],
       [  65.21841431,  101.11295319,   83.80186462],
       [  64.95678711,  100.94080353,   84.59326935],
       [  64.71759033,  100.82022095,   85.40114594],
       [  64.48053741,  100.73490143,   86.21411896],
       [  64.24304199,  100.65074158,   87.02709198],
       [  64.01773834,  100.55318451,   87.84204865],
       [  63.83801651,  100.41996765,   88.66333008],
       [  63.70982361,  100.25119019,   89.48779297],
       [  63.60707855,  100.06730652,   90.31262207],
       [  63.46164322,   99.91001892,   91.13648224],
       [  63.26287842,   99.78648376,   91.95485687],
       [  63.03713226,   99.68377686,   92.76905823],
       [  62.81192398,   99.56619263,   93.58140564],
       [  62.57145309,   99.42708588,   94.38592529],
       [  62.32259369,   99.25592804,   95.18167114],
       [  62.07497787,   99.05770111,   95.97154236],
       [  61.82253647,   98.83877563,   96.7543869 ],
       [  61.59536743,   98.59293365,   97.5370636 ],
       [  61.46530151,   98.30503845,   98.32772827],
       [  61.39904785,   97.97928619,   99.11172485],
       [  61.33279419,   97.65353394,   99.89572906],
       [  61.26067352,   97.30914307,  100.67123413],
       [  61.19459534,   96.96743011,  101.44847107],
       [  61.1958046 ,   96.63417053,  102.23215485],
       [  61.26572037,   96.2988739 ,  103.01185608],
       [  61.39840698,   95.96297455,  103.78307343],
       [  61.5720787 ,   95.6426239 ,  104.55268097],
       [  61.78163528,   95.35540771,  105.32629395],
       [  62.06700134,   95.09746552,  106.08564758],
       [  62.39427185,   94.8572464 ,  106.83369446],
       [  62.74076462,   94.62278748,  107.57482147],
       [  63.11461639,   94.40107727,  108.30641937],
       [  63.53397751,   94.20418549,  109.02002716],
       [  64.00019836,   94.03809357,  109.71183777],
       [  64.43580627,   93.87523651,  110.42416382],
       [  64.84857941,   93.69993591,  111.14715576],
       [  65.26740265,   93.51858521,  111.86515808],
       [  65.69511414,   93.3671875 ,  112.58474731],
       [  66.10470581,   93.22719574,  113.31711578],
       [  66.45891571,   93.06028748,  114.07256317],
       [  66.78582001,   92.90560913,  114.84281921],
       [  67.11138916,   92.79004669,  115.6204071 ],
       [  67.44729614,   92.75711823,  116.40135193],
       [  67.75688171,   92.98265076,  117.16111755],
       [  68.02041626,   93.28012848,  117.91371155],
       [  68.25725555,   93.53466797,  118.69052124],
       [  68.46047974,   93.63263702,  119.51107788],
       [  68.62039948,   93.62007141,  120.34690094],
       [  68.76782227,   93.56475067,  121.18331909],
       [  68.90222168,   93.46326447,  122.01765442],
       [  68.99872589,   93.30039978,  122.84759521],
       [  69.04119873,   93.05428314,  123.66156769],
       [  69.05086517,   92.74394989,  124.45450592],
       [  69.02742004,   92.40427399,  125.23509979],
       [  68.95466614,   92.09059143,  126.02339935],
       [  68.84975433,   91.7967453 ,  126.81564331],
       [  68.72673798,   91.53726196,  127.61715698],
       [  68.6068573 ,   91.3030014 ,  128.42681885],
       [  68.50636292,   91.12481689,  129.25317383],
       [  68.39311218,   91.01572418,  130.08976746],
       [  68.25946808,   90.94654083,  130.92756653]], dtype=np.float32)

    pts = 12
    td = tm.downsample(t, pts)
    # print td
    assert_equal(len(td), pts)

    res = []
    t = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
    for pts in range(3, 200):
        td = tm.downsample(t, pts)
        res.append(pts-len(td))
    assert_equal(np.sum(res), 0)

    """

Example 119

Project: hamms Source File: test_endpoints.py
def test_5512():
    url = 'http://127.0.0.1:{port}?tries=foo'.format(port=BASE_PORT+12)
    r = requests.get(url)
    assert_equal(r.status_code, 400)
    d = r.json()
    assert_true('integer' in d['error'])

    url = 'http://127.0.0.1:{port}?key=hamms-test'.format(port=BASE_PORT+12)
    r = requests.get(url)
    assert_equal(r.status_code, 500)
    d = r.json()
    assert_equal(d['tries_remaining'], 2)
    assert_equal(d['key'], 'hamms-test')

    r = requests.get(url)
    assert_equal(r.status_code, 500)
    d = r.json()
    assert_equal(d['tries_remaining'], 1)

    otherkey_url = 'http://127.0.0.1:{port}?key=other-key'.format(port=BASE_PORT+12)
    r = requests.get(otherkey_url)
    assert_equal(r.status_code, 500)
    d = r.json()
    assert_equal(d['tries_remaining'], 2)

    url = 'http://127.0.0.1:{port}?key=hamms-test'.format(port=BASE_PORT+12)
    r = requests.get(url)
    assert_equal(r.status_code, 200)
    d = r.json()
    assert_equal(d['tries_remaining'], 0)

    url = 'http://127.0.0.1:{port}/counters?key=hamms-test&tries=7'.format(port=BASE_PORT+12)
    r = requests.post(url)
    assert_equal(r.status_code, 200)
    d = r.json()
    assert_equal(d['key'], 'hamms-test')
    assert_equal(d['tries_remaining'], 7)

    url = 'http://127.0.0.1:{port}/counters'.format(port=BASE_PORT+12)
    r = requests.post(url, data={'key': 'hamms-test', 'tries': 7})
    assert_equal(r.status_code, 200)
    d = r.json()
    assert_equal(d['key'], 'hamms-test')
    assert_equal(d['tries_remaining'], 7)

    url = 'http://127.0.0.1:{port}?key=hamms-test'.format(port=BASE_PORT+12)
    r = requests.get(url)
    assert_equal(r.status_code, 500)
    d = r.json()
    assert_equal(d['tries_remaining'], 6)

    url = 'http://127.0.0.1:{port}'.format(port=BASE_PORT+12)
    r = requests.get(url)
    assert_equal(r.status_code, 500)
    d = r.json()
    assert_equal(d['key'], 'default')

    url = 'http://127.0.0.1:{port}?key=foo&tries=1'.format(port=BASE_PORT+12)
    r = requests.get(url)
    assert_equal(r.status_code, 200)
    d = r.json()
    assert_equal(d['key'], 'foo')

Example 120

Project: nipy Source File: test_utils.py
def test_convolve_functions():
    # replicate convolution
    # This is a square wave on [0,1]
    f1 = (t > 0) * (t < 1)
    # ff1 is the numerical implementation of same
    ff1 = lambdify(t, f1)
    # Time delta
    dt = 1e-3
    # Numerical convolution to test against
    # The convolution of ``f1`` with itself is a triangular wave on [0, 2],
    # peaking at 1 with height 1
    time, value = numerical_convolve(ff1, ff1, [0, 2], dt)
    # shells to wrap convolve kernel version
    def kern_conv1(f1, f2, f1_interval, f2_interval, dt, fill=0, name=None):
        kern = TimeConvolver(f1, f1_interval, dt, fill)
        return kern.convolve(f2, f2_interval, name=name)
    def kern_conv2(f1, f2, f1_interval, f2_interval, dt, fill=0, name=None):
        kern = TimeConvolver(f2, f2_interval, dt, fill)
        return kern.convolve(f1, f1_interval, name=name)
    for cfunc in (convolve_functions, kern_conv1, kern_conv2):
        tri = cfunc(f1, f1, [0, 2], [0, 2], dt, name='conv')
        assert_equal(str(tri), 'conv(t)')
        ftri = lambdify(t, tri)
        y = ftri(time)
        # numerical convolve about the same as ours
        assert_array_almost_equal(value, y)
        # peak is at 1
        assert_array_almost_equal(time[np.argmax(y)], 1)
        # Flip the interval and get the same result
        for seq1, seq2 in (((0, 2), (2, 0)),
                        ((2, 0), (0, 2)),
                        ((2, 0), (2, 0))):
            tri = cfunc(f1, f1, seq1, seq2, dt)
            ftri = lambdify(t, tri)
            y = ftri(time)
            assert_array_almost_equal(value, y)
        # offset square wave by 1 - offset triangle by 1
        f2 = (t > 1) * (t < 2)
        tri = cfunc(f1, f2, [0, 3], [0, 3], dt)
        ftri = lambdify(t, tri)
        o1_time = np.arange(0, 3, dt)
        z1s = np.zeros((np.round(1./dt)))
        assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value])
        # Same for input function
        tri = cfunc(f2, f1, [0, 3], [0, 3], dt)
        ftri = lambdify(t, tri)
        assert_array_almost_equal(ftri(o1_time), np.r_[z1s, value])
        # 2 seconds for both
        tri = cfunc(f2, f2, [0, 4], [0, 4], dt)
        ftri = lambdify(t, tri)
        o2_time = np.arange(0, 4, dt)
        assert_array_almost_equal(ftri(o2_time), np.r_[z1s, z1s, value])
        # offset by -0.5 - offset triangle by -0.5
        f3 = (t > -0.5) * (t < 0.5)
        tri = cfunc(f1, f3, [0, 2], [-0.5, 1.5], dt)
        ftri = lambdify(t, tri)
        o1_time = np.arange(-0.5, 1.5, dt)
        assert_array_almost_equal(ftri(o1_time), value)
        # Same for input function
        tri = cfunc(f3, f1, [-0.5, 1.5], [0, 2], dt)
        ftri = lambdify(t, tri)
        assert_array_almost_equal(ftri(o1_time), value)
        # -1 second for both
        tri = cfunc(f3, f3, [-0.5, 1.5], [-0.5, 1.5], dt)
        ftri = lambdify(t, tri)
        o2_time = np.arange(-1, 1, dt)
        assert_array_almost_equal(ftri(o2_time), value)
        # Check it's OK to be off the dt grid
        tri = cfunc(f1, f1, [dt/2, 2 + dt/2], [0, 2], dt, name='conv')
        ftri = lambdify(t, tri)
        assert_array_almost_equal(ftri(time), value, 3)
        # Check fill value
        nan_tri = cfunc(f1, f1, [0, 2], [0, 2], dt, fill=np.nan)
        nan_ftri = lambdify(t, nan_tri)
        y = nan_ftri(time)
        assert_array_equal(y, value)
        assert_true(np.all(np.isnan(nan_ftri(np.arange(-2, 0)))))
        assert_true(np.all(np.isnan(nan_ftri(np.arange(4, 6)))))
        # The original fill value was 0
        assert_array_equal(ftri(np.arange(-2, 0)), 0)
        assert_array_equal(ftri(np.arange(4, 6)), 0)

Example 121

Project: neural-network-animation Source File: test_dates.py
def test_auto_date_locator():
    def _create_auto_date_locator(date1, date2):
        locator = mdates.AutoDateLocator()
        locator.create_dummy_axis()
        locator.set_view_interval(mdates.date2num(date1),
                                  mdates.date2num(date2))
        return locator

    d1 = datetime.datetime(1990, 1, 1)
    results = ([datetime.timedelta(weeks=52 * 200),
                ['1990-01-01 00:00:00+00:00', '2010-01-01 00:00:00+00:00',
                 '2030-01-01 00:00:00+00:00', '2050-01-01 00:00:00+00:00',
                 '2070-01-01 00:00:00+00:00', '2090-01-01 00:00:00+00:00',
                 '2110-01-01 00:00:00+00:00', '2130-01-01 00:00:00+00:00',
                 '2150-01-01 00:00:00+00:00', '2170-01-01 00:00:00+00:00']
                ],
               [datetime.timedelta(weeks=52),
                ['1990-01-01 00:00:00+00:00', '1990-02-01 00:00:00+00:00',
                 '1990-03-01 00:00:00+00:00', '1990-04-01 00:00:00+00:00',
                 '1990-05-01 00:00:00+00:00', '1990-06-01 00:00:00+00:00',
                 '1990-07-01 00:00:00+00:00', '1990-08-01 00:00:00+00:00',
                 '1990-09-01 00:00:00+00:00', '1990-10-01 00:00:00+00:00',
                 '1990-11-01 00:00:00+00:00', '1990-12-01 00:00:00+00:00']
                ],
               [datetime.timedelta(days=140),
                ['1990-01-06 00:00:00+00:00', '1990-01-27 00:00:00+00:00',
                 '1990-02-17 00:00:00+00:00', '1990-03-10 00:00:00+00:00',
                 '1990-03-31 00:00:00+00:00', '1990-04-21 00:00:00+00:00',
                 '1990-05-12 00:00:00+00:00']
                ],
               [datetime.timedelta(days=40),
                ['1990-01-03 00:00:00+00:00', '1990-01-10 00:00:00+00:00',
                 '1990-01-17 00:00:00+00:00', '1990-01-24 00:00:00+00:00',
                 '1990-01-31 00:00:00+00:00', '1990-02-07 00:00:00+00:00']
                ],
               [datetime.timedelta(hours=40),
                ['1990-01-01 00:00:00+00:00', '1990-01-01 04:00:00+00:00',
                 '1990-01-01 08:00:00+00:00', '1990-01-01 12:00:00+00:00',
                 '1990-01-01 16:00:00+00:00', '1990-01-01 20:00:00+00:00',
                 '1990-01-02 00:00:00+00:00', '1990-01-02 04:00:00+00:00',
                 '1990-01-02 08:00:00+00:00', '1990-01-02 12:00:00+00:00',
                 '1990-01-02 16:00:00+00:00']
                ],
               [datetime.timedelta(minutes=20),
                ['1990-01-01 00:00:00+00:00', '1990-01-01 00:05:00+00:00',
                 '1990-01-01 00:10:00+00:00', '1990-01-01 00:15:00+00:00',
                 '1990-01-01 00:20:00+00:00']

                ],
               [datetime.timedelta(seconds=40),
                ['1990-01-01 00:00:00+00:00', '1990-01-01 00:00:05+00:00',
                 '1990-01-01 00:00:10+00:00', '1990-01-01 00:00:15+00:00',
                 '1990-01-01 00:00:20+00:00', '1990-01-01 00:00:25+00:00',
                 '1990-01-01 00:00:30+00:00', '1990-01-01 00:00:35+00:00',
                 '1990-01-01 00:00:40+00:00']
                ],
               [datetime.timedelta(microseconds=1500),
                ['1989-12-31 23:59:59.999507+00:00',
                 '1990-01-01 00:00:00+00:00',
                 '1990-01-01 00:00:00.000502+00:00',
                 '1990-01-01 00:00:00.001005+00:00',
                 '1990-01-01 00:00:00.001508+00:00']
                ],
               )

    for t_delta, expected in results:
        d2 = d1 + t_delta
        locator = _create_auto_date_locator(d1, d2)
        assert_equal(list(map(str, mdates.num2date(locator()))),
                     expected)

Example 122

Project: neural-network-animation Source File: test_triangulation.py
def test_trifinder():
    # Test points within triangles of masked triangulation.
    x, y = np.meshgrid(np.arange(4), np.arange(4))
    x = x.ravel()
    y = y.ravel()
    triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
                 [3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
                 [6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
                 [10, 14, 13], [10, 11, 14], [11, 15, 14]]
    mask = np.zeros(len(triangles))
    mask[8:10] = 1
    triang = mtri.Triangulation(x, y, triangles, mask)
    trifinder = triang.get_trifinder()

    xs = [0.25, 1.25, 2.25, 3.25]
    ys = [0.25, 1.25, 2.25, 3.25]
    xs, ys = np.meshgrid(xs, ys)
    xs = xs.ravel()
    ys = ys.ravel()
    tris = trifinder(xs, ys)
    assert_array_equal(tris, [0, 2, 4, -1, 6, -1, 10, -1,
                              12, 14, 16, -1, -1, -1, -1, -1])
    tris = trifinder(xs-0.5, ys-0.5)
    assert_array_equal(tris, [-1, -1, -1, -1, -1, 1, 3, 5,
                              -1, 7, -1, 11, -1, 13, 15, 17])

    # Test points exactly on boundary edges of masked triangulation.
    xs = [0.5, 1.5, 2.5, 0.5, 1.5, 2.5, 1.5, 1.5, 0.0, 1.0, 2.0, 3.0]
    ys = [0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.5, 1.5, 1.5, 1.5]
    tris = trifinder(xs, ys)
    assert_array_equal(tris, [0, 2, 4, 13, 15, 17, 3, 14, 6, 7, 10, 11])

    # Test points exactly on boundary corners of masked triangulation.
    xs = [0.0, 3.0]
    ys = [0.0, 3.0]
    tris = trifinder(xs, ys)
    assert_array_equal(tris, [0, 17])

    # Test triangles with horizontal colinear points.  These are not valid
    # triangulations, but we try to deal with the simplest violations.
    delta = 0.0  # If +ve, triangulation is OK, if -ve triangulation invalid,
                 # if zero have colinear points but should pass tests anyway.
    x = [1.5, 0,  1,  2, 3, 1.5,   1.5]
    y = [-1,  0,  0,  0, 0, delta, 1]
    triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
                 [3, 4, 5], [1, 5, 6], [4, 6, 5]]
    triang = mtri.Triangulation(x, y, triangles)
    trifinder = triang.get_trifinder()

    xs = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
    ys = [-0.1, 0.1]
    xs, ys = np.meshgrid(xs, ys)
    tris = trifinder(xs, ys)
    assert_array_equal(tris, [[-1, 0, 0, 1, 1, 2, -1],
                              [-1, 6, 6, 6, 7, 7, -1]])

    # Test triangles with vertical colinear points.  These are not valid
    # triangulations, but we try to deal with the simplest violations.
    delta = 0.0  # If +ve, triangulation is OK, if -ve triangulation invalid,
                # if zero have colinear points but should pass tests anyway.
    x = [-1, -delta, 0,  0,  0, 0, 1]
    y = [1.5, 1.5,   0,  1,  2, 3, 1.5]
    triangles = [[0, 1, 2], [0, 1, 5], [1, 2, 3], [1, 3, 4], [1, 4, 5],
                 [2, 6, 3], [3, 6, 4], [4, 6, 5]]
    triang = mtri.Triangulation(x, y, triangles)
    trifinder = triang.get_trifinder()

    xs = [-0.1, 0.1]
    ys = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
    xs, ys = np.meshgrid(xs, ys)
    tris = trifinder(xs, ys)
    assert_array_equal(tris, [[-1, -1], [0, 5], [0, 5], [0, 6], [1, 6], [1, 7],
                              [-1, -1]])

    # Test that changing triangulation by setting a mask causes the trifinder
    # to be reinitialised.
    x = [0, 1, 0, 1]
    y = [0, 0, 1, 1]
    triangles = [[0, 1, 2], [1, 3, 2]]
    triang = mtri.Triangulation(x, y, triangles)
    trifinder = triang.get_trifinder()

    xs = [-0.2, 0.2, 0.8, 1.2]
    ys = [ 0.5, 0.5, 0.5, 0.5]
    tris = trifinder(xs, ys)
    assert_array_equal(tris, [-1, 0, 1, -1])

    triang.set_mask([1, 0])
    assert_equal(trifinder, triang.get_trifinder())
    tris = trifinder(xs, ys)
    assert_array_equal(tris, [-1, -1, 1, -1])

Example 123

Project: kivy-designer Source File: test_kv_lang_area.py
    def test_get_widget_path(self):
        p = self.kv.get_widget_path
        # level 0
        float_layout = FloatLayout()
        assert_equal(p(float_layout), [])

        # level 1
        btn1 = Button()
        float_layout.add_widget(btn1)
        assert_equal(p(float_layout), [])
        assert_equal(p(btn1), [0])

        btn2 = Button()
        float_layout.add_widget(btn2)
        assert_equal(p(float_layout), [])
        assert_equal(p(btn1), [0])
        assert_equal(p(btn2), [1])

        # level 2
        btn1_btn1 = Button()
        btn1.add_widget(btn1_btn1)
        assert_equal(p(btn1_btn1), [0, 0])

        btn2_btn1 = Button()
        btn2.add_widget(btn2_btn1)
        assert_equal(p(btn2_btn1), [0, 1])

        btn2_btn2 = Button()
        btn2.add_widget(btn2_btn2)
        assert_equal(p(btn2_btn2), [1, 1])

        btn2_btn3 = Button()
        btn2.add_widget(btn2_btn3)
        assert_equal(p(btn2_btn3), [2, 1])

        # level 3

        btn2_btn3_btn1 = Button()
        btn2_btn3.add_widget(btn2_btn3_btn1)
        assert_equal(p(btn2_btn3_btn1), [0, 2, 1])

Example 124

Project: mne-python Source File: test_time_gen.py
@slow_test
@requires_sklearn_0_15
def test_generalization_across_time():
    """Test time generalization decoding
    """
    from sklearn.svm import SVC
    from sklearn.base import is_classifier
    # KernelRidge is used for testing 1) regression analyses 2) n-dimensional
    # predictions.
    from sklearn.kernel_ridge import KernelRidge
    from sklearn.preprocessing import LabelEncoder
    from sklearn.metrics import roc_auc_score, mean_squared_error

    epochs = make_epochs()
    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
    if check_version('sklearn', '0.18'):
        from sklearn.model_selection import (KFold, StratifiedKFold,
                                             ShuffleSplit, LeaveOneGroupOut)
        cv = LeaveOneGroupOut()
        cv_shuffle = ShuffleSplit()
        # XXX we cannot pass any other parameters than X and y to cv.split
        # so we have to build it before hand
        cv_lolo = [(train, test) for train, test in cv.split(
                   y_4classes, y_4classes, y_4classes)]

        # With sklearn >= 0.17, `clf` can be identified as a regressor, and
        # the scoring metrics can therefore be automatically assigned.
        scorer_regress = None
    else:
        from sklearn.cross_validation import (KFold, StratifiedKFold,
                                              ShuffleSplit, LeaveOneLabelOut)
        cv_shuffle = ShuffleSplit(len(epochs))
        cv_lolo = LeaveOneLabelOut(y_4classes)

        # With sklearn < 0.17, `clf` cannot be identified as a regressor, and
        # therefore the scoring metrics cannot be automatically assigned.
        scorer_regress = mean_squared_error
    # Test default running
    gat = GeneralizationAcrossTime(picks='foo')
    assert_equal("<GAT | no fit, no prediction, no score>", "%s" % gat)
    assert_raises(ValueError, gat.fit, epochs)
    with warnings.catch_warnings(record=True):
        # check classic fit + check manual picks
        gat.picks = [0]
        gat.fit(epochs)
        # check optional y as array
        gat.picks = None
        gat.fit(epochs, y=epochs.events[:, 2])
        # check optional y as list
        gat.fit(epochs, y=epochs.events[:, 2].tolist())
    assert_equal(len(gat.picks_), len(gat.ch_names), 1)
    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), no "
                 "prediction, no score>", '%s' % gat)
    assert_equal(gat.ch_names, epochs.ch_names)
    # test different predict function:
    gat = GeneralizationAcrossTime(predict_method='decision_function')
    gat.fit(epochs)
    # With classifier, the default cv is StratifiedKFold
    assert_true(gat.cv_.__class__ == StratifiedKFold)
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 1))
    gat.predict_method = 'predict_proba'
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 2))
    gat.predict_method = 'foo'
    assert_raises(NotImplementedError, gat.predict, epochs)
    gat.predict_method = 'predict'
    gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 15, 14, 1))
    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
                 "predicted 14 epochs, no score>",
                 "%s" % gat)
    gat.score(epochs)
    assert_true(gat.scorer_.__name__ == 'accuracy_score')
    # check clf / predict_method combinations for which the scoring metrics
    # cannot be inferred.
    gat.scorer = None
    gat.predict_method = 'decision_function'
    assert_raises(ValueError, gat.score, epochs)
    # Check specifying y manually
    gat.predict_method = 'predict'
    gat.score(epochs, y=epochs.events[:, 2])
    gat.score(epochs, y=epochs.events[:, 2].tolist())
    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
                 "predicted 14 epochs,\n scored "
                 "(accuracy_score)>", "%s" % gat)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs, y=epochs.events[:, 2])

    old_mode = gat.predict_mode
    gat.predict_mode = 'super-foo-mode'
    assert_raises(ValueError, gat.predict, epochs)
    gat.predict_mode = old_mode

    gat.score(epochs, y=epochs.events[:, 2])
    assert_true("accuracy_score" in '%s' % gat.scorer_)
    epochs2 = epochs.copy()

    # check _DecodingTime class
    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
                 "0.050 (s), length: 0.050 (s), n_time_windows: 15>",
                 "%s" % gat.train_times_)
    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
                 "0.050 (s), length: 0.050 (s), n_time_windows: 15 x 15>",
                 "%s" % gat.test_times_)

    # the y-check
    gat.predict_mode = 'mean-prediction'
    epochs2.events[:, 2] += 10
    gat_ = copy.deepcopy(gat)
    with use_log_level('error'):
        assert_raises(ValueError, gat_.score, epochs2)
    gat.predict_mode = 'cross-validation'

    # Test basics
    # --- number of trials
    assert_true(gat.y_train_.shape[0] ==
                gat.y_true_.shape[0] ==
                len(gat.y_pred_[0][0]) == 14)
    # ---  number of folds
    assert_true(np.shape(gat.estimators_)[1] == gat.cv)
    # ---  length training size
    assert_true(len(gat.train_times_['slices']) == 15 ==
                np.shape(gat.estimators_)[0])
    # ---  length testing sizes
    assert_true(len(gat.test_times_['slices']) == 15 ==
                np.shape(gat.scores_)[0])
    assert_true(len(gat.test_times_['slices'][0]) == 15 ==
                np.shape(gat.scores_)[1])

    # Test score_mode
    gat.score_mode = 'foo'
    assert_raises(ValueError, gat.score, epochs)
    gat.score_mode = 'fold-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15, 5])
    gat.score_mode = 'mean-sample-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15])
    gat.score_mode = 'mean-fold-wise'
    scores = gat.score(epochs)
    assert_array_equal(np.shape(scores), [15, 15])
    gat.predict_mode = 'mean-prediction'
    with warnings.catch_warnings(record=True) as w:
        gat.score(epochs)
        assert_true(any("score_mode changed from " in str(ww.message)
                        for ww in w))

    # Test longer time window
    gat = GeneralizationAcrossTime(train_times={'length': .100})
    with warnings.catch_warnings(record=True):
        gat2 = gat.fit(epochs)
    assert_true(gat is gat2)  # return self
    assert_true(hasattr(gat2, 'cv_'))
    assert_true(gat2.cv_ != gat.cv)
    with warnings.catch_warnings(record=True):  # not vectorizing
        scores = gat.score(epochs)
    assert_true(isinstance(scores, np.ndarray))  # type check
    assert_equal(len(scores[0]), len(scores))  # shape check
    assert_equal(len(gat.test_times_['slices'][0][0]), 2)
    # Decim training steps
    gat = GeneralizationAcrossTime(train_times={'step': .100})
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)
    gat.score(epochs)
    assert_true(len(gat.scores_) == len(gat.estimators_) == 8)  # training time
    assert_equal(len(gat.scores_[0]), 15)  # testing time

    # Test start stop training & test cv without n_fold params
    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
    train_times = dict(start=0.090, stop=0.250)
    gat = GeneralizationAcrossTime(cv=cv_lolo, train_times=train_times)
    # predict without fit
    assert_raises(RuntimeError, gat.predict, epochs)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs, y=y_4classes)
    gat.score(epochs)
    assert_equal(len(gat.scores_), 4)
    assert_equal(gat.train_times_['times'][0], epochs.times[6])
    assert_equal(gat.train_times_['times'][-1], epochs.times[9])

    # Test score without passing epochs & Test diagonal decoding
    gat = GeneralizationAcrossTime(test_times='diagonal')
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.fit(epochs)
    assert_raises(RuntimeError, gat.score)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.predict(epochs)
    scores = gat.score()
    assert_true(scores is gat.scores_)
    assert_equal(np.shape(gat.scores_), (15, 1))
    assert_array_equal([tim for ttime in gat.test_times_['times']
                        for tim in ttime], gat.train_times_['times'])
    # Test generalization across conditions
    gat = GeneralizationAcrossTime(predict_mode='mean-prediction', cv=2)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs[0:6])
    with warnings.catch_warnings(record=True):
        # There are some empty test folds because of n_trials
        gat.predict(epochs[7:])
        gat.score(epochs[7:])

    # Test training time parameters
    gat_ = copy.deepcopy(gat)
    # --- start stop outside time range
    gat_.train_times = dict(start=-999.)
    with use_log_level('error'):
        assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(start=999.)
    assert_raises(ValueError, gat_.fit, epochs)
    # --- impossible slices
    gat_.train_times = dict(step=.000001)
    assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(length=.000001)
    assert_raises(ValueError, gat_.fit, epochs)
    gat_.train_times = dict(length=999.)
    assert_raises(ValueError, gat_.fit, epochs)

    # Test testing time parameters
    # --- outside time range
    gat.test_times = dict(start=-999.)
    with warnings.catch_warnings(record=True):  # no epochs in fold
        assert_raises(ValueError, gat.predict, epochs)
    gat.test_times = dict(start=999.)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    # --- impossible slices
    gat.test_times = dict(step=.000001)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    gat_ = copy.deepcopy(gat)
    gat_.train_times_['length'] = .000001
    gat_.test_times = dict(length=.000001)
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat_.predict, epochs)
    # --- test time region of interest
    gat.test_times = dict(step=.150)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.predict(epochs)
    assert_array_equal(np.shape(gat.y_pred_), (15, 5, 14, 1))
    # --- silly value
    gat.test_times = 'foo'
    with warnings.catch_warnings(record=True):  # no test epochs
        assert_raises(ValueError, gat.predict, epochs)
    assert_raises(RuntimeError, gat.score)
    # --- unmatched length between training and testing time
    gat.test_times = dict(length=.150)
    assert_raises(ValueError, gat.predict, epochs)
    # --- irregular length training and testing times
    # 2 estimators, the first one is trained on two successive time samples
    # whereas the second one is trained on a single time sample.
    train_times = dict(slices=[[0, 1], [1]])
    # The first estimator is tested once, the second estimator is tested on
    # two successive time samples.
    test_times = dict(slices=[[[0, 1]], [[0], [1]]])
    gat = GeneralizationAcrossTime(train_times=train_times,
                                   test_times=test_times)
    gat.fit(epochs)
    with warnings.catch_warnings(record=True):  # not vectorizing
        gat.score(epochs)
    assert_array_equal(np.shape(gat.y_pred_[0]), [1, len(epochs), 1])
    assert_array_equal(np.shape(gat.y_pred_[1]), [2, len(epochs), 1])
    # check cannot Automatically infer testing times for adhoc training times
    gat.test_times = None
    assert_raises(ValueError, gat.predict, epochs)

    svc = SVC(C=1, kernel='linear', probability=True)
    gat = GeneralizationAcrossTime(clf=svc, predict_mode='mean-prediction')
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)

    # sklearn needs it: c.f.
    # https://github.com/scikit-learn/scikit-learn/issues/2723
    # and http://bit.ly/1u7t8UT
    with use_log_level('error'):
        assert_raises(ValueError, gat.score, epochs2)
        gat.score(epochs)
    assert_true(0.0 <= np.min(scores) <= 1.0)
    assert_true(0.0 <= np.max(scores) <= 1.0)

    # Test that gets error if train on one dataset, test on another, and don't
    # specify appropriate cv:
    gat = GeneralizationAcrossTime(cv=cv_shuffle)
    gat.fit(epochs)
    with warnings.catch_warnings(record=True):
        gat.fit(epochs)

    gat.predict(epochs)
    assert_raises(ValueError, gat.predict, epochs[:10])

    # Make CV with some empty train and test folds:
    # --- empty test fold(s) should warn when gat.predict()
    gat._cv_splits[0] = [gat._cv_splits[0][0], np.empty(0)]
    with warnings.catch_warnings(record=True) as w:
        gat.predict(epochs)
        assert_true(len(w) > 0)
        assert_true(any('do not have any test epochs' in str(ww.message)
                        for ww in w))
    # --- empty train fold(s) should raise when gat.fit()
    gat = GeneralizationAcrossTime(cv=[([0], [1]), ([], [0])])
    assert_raises(ValueError, gat.fit, epochs[:2])

    # Check that still works with classifier that output y_pred with
    # shape = (n_trials, 1) instead of (n_trials,)
    if check_version('sklearn', '0.17'):  # no is_regressor before v0.17
        gat = GeneralizationAcrossTime(clf=KernelRidge(), cv=2)
        epochs.crop(None, epochs.times[2])
        gat.fit(epochs)
        # With regression the default cv is KFold and not StratifiedKFold
        assert_true(gat.cv_.__class__ == KFold)
        gat.score(epochs)
        # with regression the default scoring metrics is mean squared error
        assert_true(gat.scorer_.__name__ == 'mean_squared_error')

    # Test combinations of complex scenarios
    # 2 or more distinct classes
    n_classes = [2, 4]  # 4 tested
    # nicely ordered labels or not
    le = LabelEncoder()
    y = le.fit_transform(epochs.events[:, 2])
    y[len(y) // 2:] += 2
    ys = (y, y + 1000)
    # Univariate and multivariate prediction
    svc = SVC(C=1, kernel='linear', probability=True)
    reg = KernelRidge()

    def scorer_proba(y_true, y_pred):
        return roc_auc_score(y_true, y_pred[:, 0])

    # We re testing 3 scenario: default, classifier + predict_proba, regressor
    scorers = [None, scorer_proba, scorer_regress]
    predict_methods = [None, 'predict_proba', None]
    clfs = [svc, svc, reg]
    # Test all combinations
    for clf, predict_method, scorer in zip(clfs, predict_methods, scorers):
        for y in ys:
            for n_class in n_classes:
                for predict_mode in ['cross-validation', 'mean-prediction']:
                    # Cannot use AUC for n_class > 2
                    if (predict_method == 'predict_proba' and n_class != 2):
                        continue

                    y_ = y % n_class

                    with warnings.catch_warnings(record=True):
                        gat = GeneralizationAcrossTime(
                            cv=2, clf=clf, scorer=scorer,
                            predict_mode=predict_mode)
                        gat.fit(epochs, y=y_)
                        gat.score(epochs, y=y_)

                    # Check that scorer is correctly defined manually and
                    # automatically.
                    scorer_name = gat.scorer_.__name__
                    if scorer is None:
                        if is_classifier(clf):
                            assert_equal(scorer_name, 'accuracy_score')
                        else:
                            assert_equal(scorer_name, 'mean_squared_error')
                    else:
                        assert_equal(scorer_name, scorer.__name__)

Example 125

Project: codespell Source File: test_basic.py
def test_interactivity():
    """Test interaction"""
    with tempfile.NamedTemporaryFile('w') as f:
        assert_equal(main(f.name), 0)  # empty file
        f.write('abandonned\n')
        f.flush()
        assert_equal(main('-i', '-1', f.name), 1)  # bad
        with FakeStdin('y\n'):
            assert_equal(main('-i', '3', f.name), 1)
        with CaptureStdout() as sio:
            with FakeStdin('n\n'):
                assert_equal(main('-w', '-i', '3', f.name), 0)
        assert_true('==>' in sio[0])
        with CaptureStdout():
            with FakeStdin('x\ny\n'):
                assert_equal(main('-w', '-i', '3', f.name), 0)
        assert_equal(main(f.name), 0)
    with tempfile.NamedTemporaryFile('w') as f:
        f.write('abandonned\n')
        f.flush()
        assert_equal(main(f.name), 1)
        with CaptureStdout():
            with FakeStdin(' '):  # blank input -> Y
                assert_equal(main('-w', '-i', '3', f.name), 0)
        assert_equal(main(f.name), 0)
    # multiple options
    with tempfile.NamedTemporaryFile('w') as f:
        f.write('ackward\n')
        f.flush()
        assert_equal(main(f.name), 1)
        with CaptureStdout():
            with FakeStdin(' \n'):  # blank input -> nothing
                assert_equal(main('-w', '-i', '3', f.name), 0)
        assert_equal(main(f.name), 1)
        with CaptureStdout():
            with FakeStdin('0\n'):  # blank input -> nothing
                assert_equal(main('-w', '-i', '3', f.name), 0)
        assert_equal(main(f.name), 0)
        with open(f.name, 'r') as f_read:
            assert_equal(f_read.read(), 'awkward\n')
        f.seek(0)
        f.write('ackward\n')
        f.flush()
        assert_equal(main(f.name), 1)
        with CaptureStdout() as sio:
            with FakeStdin('x\n1\n'):  # blank input -> nothing
                assert_equal(main('-w', '-i', '3', f.name), 0)
        assert_true('a valid option' in sio[0])
        assert_equal(main(f.name), 0)
        with open(f.name, 'r') as f:
            assert_equal(f.read(), 'backward\n')

Example 126

Project: mne-python Source File: test_topomap.py
@slow_test
@testing.requires_testing_data
def test_plot_topomap():
    """Test topomap plotting."""
    import matplotlib.pyplot as plt
    from matplotlib.patches import Circle
    # evoked
    warnings.simplefilter('always')
    res = 16
    evoked = read_evokeds(evoked_fname, 'Left Auditory',
                          baseline=(None, 0))

    # Test animation
    _, anim = evoked.animate_topomap(ch_type='grad', times=[0, 0.1],
                                     butterfly=False)
    anim._func(1)  # _animate has to be tested separately on 'Agg' backend.
    plt.close('all')

    ev_bad = evoked.copy().pick_types(meg=False, eeg=True)
    ev_bad.pick_channels(ev_bad.ch_names[:2])
    ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6)  # auto, plots EEG
    assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
    assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
    assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
    assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
    assert_raises(ValueError, ev_bad.plot_topomap, times=[-100])  # bad time
    assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]])  # bad time
    assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]])  # bad time

    evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
    plt.close('all')
    axes = [plt.subplot(221), plt.subplot(222)]
    evoked.plot_topomap(axes=axes, colorbar=False)
    plt.close('all')
    evoked.plot_topomap(times=[-0.1, 0.2])
    plt.close('all')
    mask = np.zeros_like(evoked.data, dtype=bool)
    mask[[1, 5], :] = True
    evoked.plot_topomap(ch_type='mag', outlines=None)
    times = [0.1]
    evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
    evoked.plot_topomap(times, ch_type='planar1', res=res)
    evoked.plot_topomap(times, ch_type='planar2', res=res)
    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
                        show_names=True, mask_params={'marker': 'x'})
    plt.close('all')
    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
                  res=res, average=-1000)
    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
                  res=res, average='hahahahah')

    p = evoked.plot_topomap(times, ch_type='grad', res=res,
                            show_names=lambda x: x.replace('MEG', ''),
                            image_interp='bilinear')
    subplot = [x for x in p.get_children() if
               isinstance(x, matplotlib.axes.Subplot)][0]
    assert_true(all('MEG' not in x.get_text()
                    for x in subplot.get_children()
                    if isinstance(x, matplotlib.text.Text)))

    # Plot array
    for ch_type in ('mag', 'grad'):
        evoked_ = evoked.copy().pick_types(eeg=False, meg=ch_type)
        plot_topomap(evoked_.data[:, 0], evoked_.info)
    # fail with multiple channel types
    assert_raises(ValueError, plot_topomap, evoked.data[0, :], evoked.info)

    # Test title
    def get_texts(p):
        return [x.get_text() for x in p.get_children() if
                isinstance(x, matplotlib.text.Text)]

    p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
    assert_equal(len(get_texts(p)), 0)
    p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
    texts = get_texts(p)
    assert_equal(len(texts), 1)
    assert_equal(texts[0], 'Custom')
    plt.close('all')

    # delaunay triangulation warning
    with warnings.catch_warnings(record=True):  # can't show
        warnings.simplefilter('always')
        evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
    assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
                  proj='interactive')  # projs have already been applied

    # change to no-proj mode
    evoked = read_evokeds(evoked_fname, 'Left Auditory',
                          baseline=(None, 0), proj=False)
    with warnings.catch_warnings(record=True):
        warnings.simplefilter('always')
        evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
                  np.repeat(.1, 50))
    assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])

    with warnings.catch_warnings(record=True):  # file conventions
        warnings.simplefilter('always')
        projs = read_proj(ecg_fname)
    projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
    plot_projs_topomap(projs, res=res, colorbar=True)
    plt.close('all')
    ax = plt.subplot(111)
    plot_projs_topomap(projs[:1], res=res, axes=ax)  # test axes param
    plt.close('all')
    plot_projs_topomap(read_info(triux_fname)['projs'][-1:])  # grads
    plt.close('all')
    # XXX This one fails due to grads being combined but this proj having
    # all zeros in the grad values -> matplotlib contour error
    # plot_projs_topomap(read_info(triux_fname)['projs'][:1])  # mags
    # plt.close('all')
    for ch in evoked.info['chs']:
        if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
            ch['loc'].fill(0)

    # Remove extra digitization point, so EEG digitization points
    # correspond with the EEG electrodes
    del evoked.info['dig'][85]

    pos = make_eeg_layout(evoked.info).pos[:, :2]
    pos, outlines = _check_outlines(pos, 'head')
    assert_true('head' in outlines.keys())
    assert_true('nose' in outlines.keys())
    assert_true('ear_left' in outlines.keys())
    assert_true('ear_right' in outlines.keys())
    assert_true('autoshrink' in outlines.keys())
    assert_true(outlines['autoshrink'])
    assert_true('clip_radius' in outlines.keys())
    assert_array_equal(outlines['clip_radius'], 0.5)

    pos, outlines = _check_outlines(pos, 'skirt')
    assert_true('head' in outlines.keys())
    assert_true('nose' in outlines.keys())
    assert_true('ear_left' in outlines.keys())
    assert_true('ear_right' in outlines.keys())
    assert_true('autoshrink' in outlines.keys())
    assert_true(not outlines['autoshrink'])
    assert_true('clip_radius' in outlines.keys())
    assert_array_equal(outlines['clip_radius'], 0.625)

    pos, outlines = _check_outlines(pos, 'skirt',
                                    head_pos={'scale': [1.2, 1.2]})
    assert_array_equal(outlines['clip_radius'], 0.75)

    # Plot skirt
    evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')

    # Pass custom outlines without patch
    evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
    plt.close('all')

    # Test interactive cmap
    fig = plot_evoked_topomap(evoked, times=[0., 0.1], ch_type='eeg',
                              cmap=('Reds', True), title='title')
    fig.canvas.key_press_event('up')
    fig.canvas.key_press_event(' ')
    fig.canvas.key_press_event('down')
    cbar = fig.get_axes()[0].CB  # Fake dragging with mouse.
    ax = cbar.cbar.ax
    _fake_click(fig, ax, (0.1, 0.1))
    _fake_click(fig, ax, (0.1, 0.2), kind='motion')
    _fake_click(fig, ax, (0.1, 0.3), kind='release')

    _fake_click(fig, ax, (0.1, 0.1), button=3)
    _fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
    _fake_click(fig, ax, (0.1, 0.3), kind='release')

    fig.canvas.scroll_event(0.5, 0.5, -0.5)  # scroll down
    fig.canvas.scroll_event(0.5, 0.5, 0.5)  # scroll up

    plt.close('all')

    # Pass custom outlines with patch callable
    def patch():
        return Circle((0.5, 0.4687), radius=.46,
                      clip_on=True, transform=plt.gca().transAxes)
    outlines['patch'] = patch
    plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)

    # Remove digitization points. Now topomap should fail
    evoked.info['dig'] = None
    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
                  times, ch_type='eeg')
    plt.close('all')

    # Error for missing names
    n_channels = len(pos)
    data = np.ones(n_channels)
    assert_raises(ValueError, plot_topomap, data, pos, show_names=True)

    # Test error messages for invalid pos parameter
    pos_1d = np.zeros(n_channels)
    pos_3d = np.zeros((n_channels, 2, 2))
    assert_raises(ValueError, plot_topomap, data, pos_1d)
    assert_raises(ValueError, plot_topomap, data, pos_3d)
    assert_raises(ValueError, plot_topomap, data, pos[:3, :])

    pos_x = pos[:, :1]
    pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]]
    assert_raises(ValueError, plot_topomap, data, pos_x)
    assert_raises(ValueError, plot_topomap, data, pos_xyz)

    # An #channels x 4 matrix should work though. In this case (x, y, width,
    # height) is assumed.
    pos_xywh = np.c_[pos, np.zeros((n_channels, 2))]
    plot_topomap(data, pos_xywh)
    plt.close('all')

    # Test peak finder
    axes = [plt.subplot(131), plt.subplot(132)]
    with warnings.catch_warnings(record=True):  # rightmost column
        evoked.plot_topomap(times='peaks', axes=axes)
    plt.close('all')
    evoked.data = np.zeros(evoked.data.shape)
    evoked.data[50][1] = 1
    assert_array_equal(_find_peaks(evoked, 10), evoked.times[1])
    evoked.data[80][100] = 1
    assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]])
    evoked.data[2][95] = 2
    assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]])
    assert_array_equal(_find_peaks(evoked, 1), evoked.times[95])

Example 127

Project: mne-python Source File: test_ica.py
@slow_test
@requires_sklearn
def test_ica_additional():
    """Test additional ICA functionality."""
    import matplotlib.pyplot as plt
    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None, random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=True, exclude='bads')
    epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
                        baseline=(None, 0), preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    template = _get_ica_map(ica)[0]
    corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4,
              n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert_true(np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
                                        ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4, 20, l_trans_bandwidth='auto',
                       h_trans_bandwidth='auto', filter_length='auto',
                       phase='zero', fir_window='hamming')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10], filter_length='auto', trans_bandwidth=10,
                             phase='zero', fir_window='hamming')
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                     'pca_mean_', 'pca_explained_variance_',
                     '_pre_whitener']:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw, target='EOG 061', score_func=func,
                                   start=0, stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw,
                  target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
                             eog_ch=ch_name, skew_criterion=idx,
                             var_criterion=idx, kurt_criterion=idx)
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        ica.labels_ = None
        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
                      method='ctps')
        assert_raises(ValueError, ica.find_bads_ecg, raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog, target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs,
                  target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw, target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw, target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_true(len(ica_raw._filenames) == 0)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)

Example 128

Project: flanker Source File: parser_address_list_test.py
def test_simple_valid():
    s = '''http://foo.com:8080; "Ev K." <[email protected]>, "Alex K" [email protected], "Tom, S" "tom+[a]"@s.com'''
    addrs = address.parse_list(s)

    assert_equal(4, len(addrs))

    assert_equal(addrs[0].addr_type, 'url')
    assert_equal(addrs[0].address, 'http://foo.com:8080')
    assert_equal(addrs[0].full_spec(), 'http://foo.com:8080')

    assert_equal(addrs[1].addr_type, 'email')
    assert_equal(addrs[1].display_name, 'Ev K.')
    assert_equal(addrs[1].address, '[email protected]')
    assert_equal(addrs[1].full_spec(), '"Ev K." <[email protected]>')

    assert_equal(addrs[2].addr_type, 'email')
    assert_equal(addrs[2].display_name, 'Alex K')
    assert_equal(addrs[2].address, '[email protected]')
    assert_equal(addrs[2].full_spec(), 'Alex K <[email protected]>')

    assert_equal(addrs[3].addr_type, 'email')
    assert_equal(addrs[3].display_name, 'Tom, S')
    assert_equal(addrs[3].address, '"tom+[a]"@s.com')
    assert_equal(addrs[3].full_spec(), '"Tom, S" <"tom+[a]"@s.com>')


    s = '''"Allan G\'o"  <[email protected]>, "Os Wi" <[email protected]>'''
    addrs = address.parse_list(s)

    assert_equal(2, len(addrs))

    assert_equal(addrs[0].addr_type, 'email')
    assert_equal(addrs[0].display_name, 'Allan G\'o')
    assert_equal(addrs[0].address, '[email protected]')
    assert_equal(addrs[0].full_spec(), 'Allan G\'o <[email protected]>')

    assert_equal(addrs[1].addr_type, 'email')
    assert_equal(addrs[1].display_name, 'Os Wi')
    assert_equal(addrs[1].address, '[email protected]')
    assert_equal(addrs[1].full_spec(), 'Os Wi <[email protected]>')


    s = u'''I am also A <[email protected]>, Zeka <[email protected]> ;Gonzalo Bañuelos<[email protected]>'''
    addrs = address.parse_list(s)

    assert_equal(3, len(addrs))

    assert_equal(addrs[0].addr_type, 'email')
    assert_equal(addrs[0].display_name, 'I am also A')
    assert_equal(addrs[0].address, '[email protected]')
    assert_equal(addrs[0].full_spec(), 'I am also A <[email protected]>')

    assert_equal(addrs[1].addr_type, 'email')
    assert_equal(addrs[1].display_name, 'Zeka')
    assert_equal(addrs[1].address, '[email protected]')
    assert_equal(addrs[1].full_spec(), 'Zeka <[email protected]>')

    assert_equal(addrs[2].addr_type, 'email')
    assert_equal(addrs[2].display_name, u'Gonzalo Bañuelos')
    assert_equal(addrs[2].address, '[email protected]')
    assert_equal(addrs[2].full_spec(), '=?utf-8?q?Gonzalo_Ba=C3=B1uelos?= <[email protected]>')


    s = r'''"Escaped" "\e\s\c\a\p\e\d"@sld.com; http://userid:[email protected]:8080, "Dmitry" <my|'`!#_~%$&{}?^+-*@host.com>'''
    addrs = address.parse_list(s)

    assert_equal(3, len(addrs))

    assert_equal(addrs[0].addr_type, 'email')
    assert_equal(addrs[0].display_name, 'Escaped')
    assert_equal(addrs[0].address, '"\e\s\c\\a\p\e\d"@sld.com')
    assert_equal(addrs[0].full_spec(), 'Escaped <"\e\s\c\\a\p\e\d"@sld.com>')

    assert_equal(addrs[1].addr_type, 'url')
    assert_equal(addrs[1].address, 'http://userid:[email protected]:8080')
    assert_equal(addrs[1].full_spec(), 'http://userid:[email protected]:8080')

    assert_equal(addrs[2].addr_type, 'email')
    assert_equal(addrs[2].display_name, 'Dmitry')
    assert_equal(addrs[2].address, 'my|\'`!#_~%$&{}?^+-*@host.com')
    assert_equal(addrs[2].full_spec(), 'Dmitry <my|\'`!#_~%$&{}?^+-*@host.com>')


    s = "http://foo.com/blah_blah_(wikipedia)"
    addrs = address.parse_list(s)

    assert_equal(1, len(addrs))

    assert_equal(addrs[0].addr_type, 'url')
    assert_equal(addrs[0].address, 'http://foo.com/blah_blah_(wikipedia)')
    assert_equal(addrs[0].full_spec(), 'http://foo.com/blah_blah_(wikipedia)')


    s = "Sasha Klizhentas <[email protected]>"
    addrs = address.parse_list(s)

    assert_equal(1, len(addrs))

    assert_equal(addrs[0].addr_type, 'email')
    assert_equal(addrs[0].display_name, 'Sasha Klizhentas')
    assert_equal(addrs[0].address, '[email protected]')
    assert_equal(addrs[0].full_spec(), 'Sasha Klizhentas <[email protected]>')


    s = "[email protected],[email protected]"
    addrs = address.parse_list(s)

    assert_equal(2, len(addrs))

    assert_equal(addrs[0].addr_type, 'email')
    assert_equal(addrs[0].display_name, '')
    assert_equal(addrs[0].address, '[email protected]')
    assert_equal(addrs[0].full_spec(), '[email protected]')

    assert_equal(addrs[1].addr_type, 'email')
    assert_equal(addrs[1].display_name, '')
    assert_equal(addrs[1].address, '[email protected]')
    assert_equal(addrs[1].full_spec(), '[email protected]')

Example 129

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_validation.py
@ignore_warnings
def test_check_array():
    # accept_sparse == None
    # raise error on sparse inputs
    X = [[1, 2], [3, 4]]
    X_csr = sp.csr_matrix(X)
    assert_raises(TypeError, check_array, X_csr)
    # ensure_2d
    assert_warns(DeprecationWarning, check_array, [0, 1, 2])
    X_array = check_array([0, 1, 2])
    assert_equal(X_array.ndim, 2)
    X_array = check_array([0, 1, 2], ensure_2d=False)
    assert_equal(X_array.ndim, 1)
    # don't allow ndim > 3
    X_ndim = np.arange(8).reshape(2, 2, 2)
    assert_raises(ValueError, check_array, X_ndim)
    check_array(X_ndim, allow_nd=True)  # doesn't raise
    # force_all_finite
    X_inf = np.arange(4).reshape(2, 2).astype(np.float)
    X_inf[0, 0] = np.inf
    assert_raises(ValueError, check_array, X_inf)
    check_array(X_inf, force_all_finite=False)  # no raise
    # nan check
    X_nan = np.arange(4).reshape(2, 2).astype(np.float)
    X_nan[0, 0] = np.nan
    assert_raises(ValueError, check_array, X_nan)
    check_array(X_inf, force_all_finite=False)  # no raise

    # dtype and order enforcement.
    X_C = np.arange(4).reshape(2, 2).copy("C")
    X_F = X_C.copy("F")
    X_int = X_C.astype(np.int)
    X_float = X_C.astype(np.float)
    Xs = [X_C, X_F, X_int, X_float]
    dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
    orders = ['C', 'F', None]
    copys = [True, False]

    for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
        X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
        if dtype is not None:
            assert_equal(X_checked.dtype, dtype)
        else:
            assert_equal(X_checked.dtype, X.dtype)
        if order == 'C':
            assert_true(X_checked.flags['C_CONTIGUOUS'])
            assert_false(X_checked.flags['F_CONTIGUOUS'])
        elif order == 'F':
            assert_true(X_checked.flags['F_CONTIGUOUS'])
            assert_false(X_checked.flags['C_CONTIGUOUS'])
        if copy:
            assert_false(X is X_checked)
        else:
            # doesn't copy if it was already good
            if (X.dtype == X_checked.dtype and
                    X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
                    and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
                assert_true(X is X_checked)

    # allowed sparse != None
    X_csc = sp.csc_matrix(X_C)
    X_coo = X_csc.tocoo()
    X_dok = X_csc.todok()
    X_int = X_csc.astype(np.int)
    X_float = X_csc.astype(np.float)

    Xs = [X_csc, X_coo, X_dok, X_int, X_float]
    accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
    for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
                                                 copys):
        with warnings.catch_warnings(record=True) as w:
            X_checked = check_array(X, dtype=dtype,
                                    accept_sparse=accept_sparse, copy=copy)
        if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
            message = str(w[0].message)
            messages = ["object dtype is not supported by sparse matrices",
                        "Can't check dok sparse matrix for nan or inf."]
            assert_true(message in messages)
        else:
            assert_equal(len(w), 0)
        if dtype is not None:
            assert_equal(X_checked.dtype, dtype)
        else:
            assert_equal(X_checked.dtype, X.dtype)
        if X.format in accept_sparse:
            # no change if allowed
            assert_equal(X.format, X_checked.format)
        else:
            # got converted
            assert_equal(X_checked.format, accept_sparse[0])
        if copy:
            assert_false(X is X_checked)
        else:
            # doesn't copy if it was already good
            if (X.dtype == X_checked.dtype and X.format == X_checked.format):
                assert_true(X is X_checked)

    # other input formats
    # convert lists to arrays
    X_dense = check_array([[1, 2], [3, 4]])
    assert_true(isinstance(X_dense, np.ndarray))
    # raise on too deep lists
    assert_raises(ValueError, check_array, X_ndim.tolist())
    check_array(X_ndim.tolist(), allow_nd=True)  # doesn't raise
    # convert weird stuff to arrays
    X_no_array = NotAnArray(X_dense)
    result = check_array(X_no_array)
    assert_true(isinstance(result, np.ndarray))

Example 130

Project: angr Source File: test_cdg.py
def test_dominance_frontiers():

    # This graph comes from Fig.1 of paper An Efficient Method of Computing Static Single Assignment Form by Ron Cytron,
    # etc.

    # Create a project with a random binary - it will not be used anyways
    p = angr.Project(test_location + "/x86_64/datadep_test",
                     load_options={'auto_load_libs': False},
                     use_sim_procedures=True)

    # Create the CDG analysis
    cdg = p.analyses.CDG(None, no_construct=True)

    # Create our mock control flow graph
    g = networkx.DiGraph()
    g.add_edge('Entry', 1)
    g.add_edge(1, 2)
    g.add_edge(2, 3)
    g.add_edge(2, 7)
    g.add_edge(3, 4)
    g.add_edge(3, 5)
    g.add_edge(4, 6)
    g.add_edge(5, 6)
    g.add_edge(6, 8)
    g.add_edge(7, 8)
    g.add_edge(8, 9)
    g.add_edge(9, 10)
    g.add_edge(9, 11)
    g.add_edge(11, 9)
    g.add_edge(10, 11)
    g.add_edge(11, 12)
    g.add_edge(12, 2)
    g.add_edge(12, 'Exit')
    g.add_edge('Entry', 'Exit')

    # Create the mock post-dom graph
    postdom = networkx.DiGraph()
    postdom.add_edge('Entry', 1)
    postdom.add_edge(1, 2)
    postdom.add_edge(2, 3)
    postdom.add_edge(3, 4)
    postdom.add_edge(3, 5)
    postdom.add_edge(3, 6)
    postdom.add_edge(2, 7)
    postdom.add_edge(2, 8)
    postdom.add_edge(8, 9)
    postdom.add_edge(9, 10)
    postdom.add_edge(9, 11)
    postdom.add_edge(11, 12)
    postdom.add_edge('Entry', 'Exit')

    # Manually set the normalized_cfg
    cdg._normalized_cfg = g

    # Call df_construct()
    df = cdg._df_construct(postdom)

    standard_df = {
        1: { 'Exit' },
        2: { 'Exit', 2 },
        3: { 8 },
        4: { 6 },
        5: { 6 },
        6: { 8 },
        7: { 8 },
        8: { 'Exit', 2 },
        9: { 'Exit', 2, 9 },
        10: { 11 },
        11: { 'Exit', 2, 9 },
        12: { 'Exit', 2 },
        'Entry': set(),
        'Exit': set()
    }
    nose.tools.assert_equal(df, standard_df)

Example 131

Project: workload-automation Source File: test_execution.py
    def test_CTRL_C(self):
        workloads = [
            WorkloadRunSpec(id='1', number_of_iterations=2, instrumentation=['Signal Catcher']),
            WorkloadRunSpec(id='2', number_of_iterations=2, instrumentation=['Signal Catcher']),
            WorkloadRunSpec(id='3', number_of_iterations=2, instrumentation=['Signal Catcher']),
            WorkloadRunSpec(id='4', number_of_iterations=2, instrumentation=['Signal Catcher']),
        ]

        workloads[0]._workload = BadWorkload(KeyboardInterrupt, ["setup"])
        workloads[1]._workload = BadWorkload(KeyboardInterrupt, ["run"])
        workloads[2]._workload = BadWorkload(KeyboardInterrupt, ["update_result"])
        workloads[3]._workload = BadWorkload(KeyboardInterrupt, ["teardown"])

        expected_status = [IterationResult.ABORTED, IterationResult.ABORTED]

        expected_signals = [
            [
                signal.RUN_START.name,
                signal.RUN_INIT.name,
                signal.WORKLOAD_SPEC_START.name,
                    signal.ITERATION_START.name,
                        signal.BEFORE_WORKLOAD_SETUP.name,
                        signal.AFTER_WORKLOAD_SETUP.name,
                    signal.ITERATION_END.name,
                signal.WORKLOAD_SPEC_END.name,
                signal.RUN_FIN.name,
                signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
                signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
                signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
                signal.RUN_END.name
            ],
            [
                signal.RUN_START.name,
                signal.RUN_INIT.name,
                signal.WORKLOAD_SPEC_START.name,
                    signal.ITERATION_START.name,
                        signal.BEFORE_WORKLOAD_SETUP.name,
                        signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                        signal.AFTER_WORKLOAD_SETUP.name,
                        signal.BEFORE_WORKLOAD_EXECUTION.name,
                        signal.AFTER_WORKLOAD_EXECUTION.name,
                        signal.BEFORE_WORKLOAD_TEARDOWN.name,
                        signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                        signal.AFTER_WORKLOAD_TEARDOWN.name,
                    signal.ITERATION_END.name,
                signal.WORKLOAD_SPEC_END.name,
                signal.RUN_FIN.name,
                signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
                signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
                signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
                signal.RUN_END.name
            ],
            [
                signal.RUN_START.name,
                signal.RUN_INIT.name,
                signal.WORKLOAD_SPEC_START.name,
                    signal.ITERATION_START.name,
                        signal.BEFORE_WORKLOAD_SETUP.name,
                        signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                        signal.AFTER_WORKLOAD_SETUP.name,
                        signal.BEFORE_WORKLOAD_EXECUTION.name,
                        signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                        signal.AFTER_WORKLOAD_EXECUTION.name,
                        signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                        signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                        signal.BEFORE_WORKLOAD_TEARDOWN.name,
                        signal.SUCCESSFUL_WORKLOAD_TEARDOWN.name,
                        signal.AFTER_WORKLOAD_TEARDOWN.name,
                    signal.ITERATION_END.name,
                signal.WORKLOAD_SPEC_END.name,
                signal.RUN_FIN.name,
                signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
                signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
                signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
                signal.RUN_END.name
            ],
            [
                signal.RUN_START.name,
                signal.RUN_INIT.name,
                signal.WORKLOAD_SPEC_START.name,
                    signal.ITERATION_START.name,
                        signal.BEFORE_WORKLOAD_SETUP.name,
                        signal.SUCCESSFUL_WORKLOAD_SETUP.name,
                        signal.AFTER_WORKLOAD_SETUP.name,
                        signal.BEFORE_WORKLOAD_EXECUTION.name,
                        signal.SUCCESSFUL_WORKLOAD_EXECUTION.name,
                        signal.AFTER_WORKLOAD_EXECUTION.name,
                        signal.BEFORE_WORKLOAD_RESULT_UPDATE.name,
                        signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE.name,
                        signal.AFTER_WORKLOAD_RESULT_UPDATE.name,
                        signal.BEFORE_WORKLOAD_TEARDOWN.name,
                        signal.AFTER_WORKLOAD_TEARDOWN.name,
                    signal.ITERATION_END.name,
                signal.WORKLOAD_SPEC_END.name,
                signal.RUN_FIN.name,
                signal.BEFORE_OVERALL_RESULTS_PROCESSING.name,
                signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING.name,
                signal.AFTER_OVERALL_RESULTS_PROCESSING.name,
                signal.RUN_END.name
            ],
        ]

        for i in xrange(0, len(workloads)):
            context = Mock()
            context.reboot_policy = RebootPolicy("never")
            context.config.workload_specs = [workloads[i]]

            runner = BySpecRunner(Mock(), context, Mock())
            runner.init_queue(context.config.workload_specs)

            instrument = _instantiate(SignalCatcher)
            instrumentation.install(instrument)

            try:
                runner.run()
            finally:
                instrumentation.uninstall(instrument)

            #Check queue was handled correctly
            assert_equal(len(runner.completed_jobs), 2)
            assert_equal(len(runner.job_queue), 0)

            #check correct signals were sent
            assert_equal(expected_signals[i], instrument.signals_received)

            #Check job status'
            for j in range(0, len(runner.completed_jobs)):
                assert_equal(runner.completed_jobs[j].result.status, expected_status[j])

Example 132

Project: allura Source File: test_repository.py
Function: test_log
    def test_log(self):
        entries = list(self.repo.log(id_only=False, limit=25))
        assert_equal(entries, [
            {'parents': [5],
             'refs': ['HEAD'],
             'committed': {
                 'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
                 'name': u'coldmind', 'email': ''},
             'message': u'',
             'rename_details': {},
             'id': 6,
             'authored': {
                 'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
                 'name': u'coldmind',
                 'email': ''
             }, 'size': None},
            {'parents': [4],
             'refs': [],
             'committed': {
                 'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
                 'name': u'rick446',
                 'email': ''},
             'message': u'Copied a => b',
             'rename_details': {},
             'id': 5,
             'authored': {
                 'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
                 'name': u'rick446',
                 'email': ''},
             'size': None},
            {'parents': [3],
             'refs': [],
             'committed': {
                 'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
                 'name': u'rick446',
                 'email': ''},
             'message': u'Remove hello.txt',
             'rename_details': {},
             'id': 4,
             'authored': {
                 'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
                 'name': u'rick446',
                 'email': ''},
             'size': None},
            {'parents': [2],
             'refs': [],
             'committed': {
                 'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
                 'name': u'rick446',
                 'email': ''},
             'message': u'Modify readme',
             'rename_details': {},
             'id': 3,
             'authored':
             {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
              'name': u'rick446',
              'email': ''},
             'size': None},
            {'parents': [1],
             'refs': [],
             'committed': {
                 'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
                 'name': u'rick446',
                 'email': ''},
             'message': u'Add path',
             'rename_details': {},
             'id': 2,
             'authored': {
                 'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
                 'name': u'rick446',
                 'email': ''},
             'size': None},
            {'parents': [],
             'refs': [],
             'committed': {
                 'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
                 'name': u'rick446',
                 'email': ''},
             'message': u'Create readme',
             'rename_details': {},
             'id': 1,
             'authored': {
                 'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
                 'name': u'rick446',
                 'email': ''},
             'size': None}])

Example 133

Project: CoilSnake Source File: test_graphics.py
def test_write_4bpp_graphic_to_block():
    source = [[8, 1, 12, 9, 6, 5, 3, 2],
              [11, 5, 8, 14, 1, 7, 15, 0],
              [8, 13, 3, 7, 2, 0, 2, 3],
              [10, 0, 4, 14, 7, 10, 11, 9],
              [8, 8, 12, 9, 13, 12, 2, 6],
              [11, 14, 14, 4, 14, 4, 10, 7],
              [12, 2, 12, 8, 4, 15, 12, 14],
              [10, 13, 12, 1, 10, 11, 11, 2]]
    target = Block()
    target.from_list([0] * 32)
    assert_equal(32, write_4bpp_graphic_to_block(source=source, target=target, offset=0, x=0, y=0, bit_offset=0))
    assert_list_equal(target.to_list(),
                      [
                          0b01010110,
                          0b00001011,

                          0b11001110,
                          0b10010110,

                          0b01110001,
                          0b00111011,

                          0b00001011,
                          0b10011110,

                          0b00011000,
                          0b00000011,

                          0b10000001,
                          0b11101011,

                          0b00000100,
                          0b01000101,

                          0b01010110,
                          0b10001111,

                          0b00101100,
                          0b10110000,

                          0b01010110,
                          0b10110010,

                          0b01010000,
                          0b11000000,

                          0b00111000,
                          0b10010111,

                          0b00101101,
                          0b11111100,

                          0b01111101,
                          0b11101010,

                          0b10101111,
                          0b10110111,

                          0b01100000,
                          0b11101110
                      ])

Example 134

Project: nbviewer Source File: test_utils.py
def test_transform_ipynb_uri():
    test_data = (
        # GIST_RGX
        ('1234',
        u'/1234'),
        ('1234/',
        u'/1234'),
        # GIST_URL_RGX
        ('https://gist.github.com/user-name/1234',
        u'/1234'),
        ('https://gist.github.com/user-name/1234/',
        u'/1234'),
        # GITHUB_URL_RGX
        ('https://github.com/user-name_/repo-name_/blob/master/path/file.ipynb',
        u'/github/user-name_/repo-name_/blob/master/path/file.ipynb'),
        ('http://github.com/user-name_/repo-name_/blob/master/path/file.ipynb',
        u'/github/user-name_/repo-name_/blob/master/path/file.ipynb'),
        ('https://github.com/user-name_/repo-name_/tree/master/path/',
        u'/github/user-name_/repo-name_/tree/master/path/'),
        # GITHUB_USER_RGX
        ('ipy-thon',
        u'/github/ipy-thon/'),
        # GITHUB_USER_REPO_RGX
        ('ipy-thon/ipy-thon',
        u'/github/ipy-thon/ipy-thon/tree/master/'),
        #DropBox Urls
        ( u'http://www.dropbox.com/s/bar/baz.qux',
          u'/url/dl.dropbox.com/s/bar/baz.qux'),
        ( u'https://www.dropbox.com/s/zip/baz.qux',
          u'/urls/dl.dropbox.com/s/zip/baz.qux'),
        ( u'https://www.dropbox.com/sh/mhviow274da2wly/CZKwRRcA0k/nested/furthernested/User%2520Interface.ipynb?dl=1',
          u'/urls/dl.dropbox.com/sh/mhviow274da2wly/CZKwRRcA0k/nested/furthernested/User%2520Interface.ipynb'),
        # URL
        ('https://example.org/ipynb',
        u'/urls/example.org/ipynb'),
        ('http://example.org/ipynb',
        u'/url/example.org/ipynb'),
        ('example.org/ipynb',
        u'/url/example.org/ipynb'),
        (u'example.org/ipynb',
        u'/url/example.org/ipynb'),
        ('https://gist.github.com/user/1234/raw/a1b2c3/file.ipynb',
        u'/urls/gist.github.com/user/1234/raw/a1b2c3/file.ipynb'),
    )
    uri_rewrite_list = provider_uri_rewrites(default_rewrites)
    for ipynb_uri, expected_output in test_data:
        output = utils.transform_ipynb_uri(ipynb_uri, uri_rewrite_list)
        nt.assert_equal(output, expected_output, "%s => %s != %s" % (
            ipynb_uri, output, expected_output
        ))

Example 135

Project: riko Source File: test_examples.py
    def test_kazeeki(self):
        """Tests the kazeeki pipeline
        """
        pipe_name = 'kazeeki'
        pipeline = self._get_pipeline(pipe_name)
        raw = (
            '<p>We are looking for freelancers ( individuals and companies )'
            ' who offer their services related to Architecture Walkthrough and'
            ' 3D animations. Please consider this job as a potential to '
            'several more and a long term relationship.   We are a Media...\n'
            '    <br> <br>\n    <b>Category:</b> Design &amp; Multimedia >'
            ' Animation <br>\n    <b>Type and Budget:</b> Hourly ($10 - $15 / '
            'hr)<br>\n    <b>Time Left:</b> Ends: 29d, 23h (Ends Thu, 05 Feb '
            '2015 11:46:40 EST) <br>\n    <b>Start Date:</b> 06 Jan 2015 <br>'
            '\n    <b>Proposals:</b> 0 (<a href=\"https://www.elance.com/php/'
            'landing/main/login.php?assumePreviousLogin=1&amp;redirect=https'
            '%3A%2F%2Fwww.elance.com%2Fr%2Fjobs%2Fcat-design-multimedia%3F'
            'showUpgradeModelIfFreeMember%3D1\">login</a>) <br>\n    '
            '<b>Client:</b> Client (0 jobs posted, 0% awarded, $0 total '
            'purchased, Payment Method Verified) <br>\n    <b>Client Location:'
            '</b> , , Cambodia <br>\n        <b>Desired Skills:</b> Animation'
            '  3D Modeling  Computer Graphics  3d Animation  3D Rendering <br>'
            '\n    <b>Job ID:</b> 66963214 <br> <br>\n    <a href=\"https://'
            'www.elance.com/j/3d-architecture-walkthrough-3d-animation-artists'
            '/66963214/\">View job »</a></p>')

        _hash = ctypes.c_uint(hash(raw)).value

        example = {
            'author': {'name': None, 'uri': None},
            'id': _hash,
            'k:author': 'unknown',
            'k:budget': Decimal('0'),
            'k:budget_converted': Decimal('0.000000'),
            'k:budget_converted_w_sym': '$0.00',
            'k:budget_full': '$0.00',
            'k:budget_w_sym': '$0.00',
            'k:client_location': 'Cambodia',
            'k:content': (
                'We are looking for freelancers ( individuals and companies ) '
                'who offer their services related to Architecture Walkthrough '
                'and 3D animations. Please consider this job as a potential '
                'to several more and a long term relationship.   We are a '
                'Media...'),
            'k:cur_code': 'USD',
            'k:due': ' Thu, 05 Feb 2015 11:46:40 EST',
            'k:job_type': '2',
            'k:marketplace': 'elance.com',
            'k:parsed_type': 'fixed',
            'k:posted': 'Tue, 06 Jan 2015 11:46:40 EST',
            'k:rate': Decimal('1.000000'),
            'k:submissions': '0',
            'k:tags': [
                {'content': 'animation'},
                {'content': 'design'},
                {'content': 'multimedia'}],
            'k:work_location': 'unknown',
            'link': (
                'https://www.elance.com/j/3d-architecture-walkthrough-3d-'
                'animation-artists/66963214/'),
            'links': [{}],
            'title': (
                '3D Architecture Walkthrough &amp; 3D / Animation Artists ')}

        length = len(pipeline)
        msg = 'Pipeline %s has length %i, not 1'
        nt.assert_equal(length, 180, msg % (pipe_name, length))
        nt.assert_equal(example, pipeline[-1])

Example 136

Project: hwrt Source File: geometry_test.py
def point_segment_distance_test():
    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 0)
    line = geometry.LineSegment(ap1, ap2)

    # Test 1
    point = geometry.Point(0, 0)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 0.0)

    # Test 2
    point = geometry.Point(0, 1)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    # Test 3: Line segement is just a point
    line = geometry.LineSegment(ap1, ap1)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    # Test 3: Line segement is a straight vertical line
    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(0, 1)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(1, 0)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    point = geometry.Point(0, 2)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    point = geometry.Point(0, -1)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    ap2 = geometry.Point(0, 0)
    ap1 = geometry.Point(0, 1)
    point = geometry.Point(0, -1)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(0, 1)
    line = geometry.LineSegment(ap2, ap1)
    point = geometry.Point(0, -1)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(0, 1)
    line = geometry.LineSegment(ap2, ap1)
    point = geometry.Point(0, 2)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 0)
    line = geometry.LineSegment(ap2, ap1)
    point = geometry.Point(2, 0)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 0)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(2, 0)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 0)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(-1, 0)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 0)
    line = geometry.LineSegment(ap2, ap1)
    point = geometry.Point(-1, 0)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 1.0)

    # Test 3: Line segement is a straight vertical line and point is on it
    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(0, 1)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(0, 0.1)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 0)

    # Test 4: Vertical line tests
    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(0, 1)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(0.5, 0.5)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 0.5)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(0, 1)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(-0.5, 0.5)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line), 0.5)

    # Test 5: Other
    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 1)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(1, 0)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line),
                            (2**0.5)/2.0)

    # Test 6: Continued line
    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 1)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(2, 2)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line),
                            2**0.5)

    ap1 = geometry.Point(0, 0)
    ap2 = geometry.Point(1, 1)
    line = geometry.LineSegment(ap1, ap2)
    point = geometry.Point(-1, -1)
    nose.tools.assert_equal(geometry.point_segment_distance(point, line),
                            2**0.5)

Example 137

Project: nideep Source File: test_net_merge.py
    def test_duplicate(self):

        fpath = os.path.join(os.path.dirname(ROOT_PKG_PATH),
                             TEST_DATA_DIRNAME, TEST_NET_FILENAME)

        n1 = Parser().from_net_params_file(fpath)
        n2 = Parser().from_net_params_file(fpath)

        n1_tmp = NetParameter(); n1_tmp.CopyFrom(n1)
        n2_tmp = NetParameter(); n2_tmp.CopyFrom(n2)
        s = mrg.merge_indep_net_spec([n1_tmp, n2_tmp])

        assert_is_not_none(s)
        assert_is_instance(s, str)
        assert_greater(len(s), 0)

        n = NetParameter()
        text_format.Merge(s, n)
        assert_is_not_none(n)

        # Data Layer from first network
        for l in n.layer:
            if l.type.lower() == 'data':
                for l1 in n1.layer:
                    if l1.type.lower() == 'data':

                        dat_phase = [x.phase for x in l.include]
                        # compare test with test and train with train
                        if dat_phase == [x.phase for x in l1.include]:

                            assert_is_not(l.top, l1.top)
                            assert_list_equal(list(l.top), list(l1.top))
                            assert_equal(l.data_param.source, l1.data_param.source)
                            assert_equal(l.data_param.backend, l1.data_param.backend)
                            assert_equal(l.data_param.batch_size, l1.data_param.batch_size)
                            assert_equal(l.transform_param.scale, l1.transform_param.scale)
        # For non-data layers

        # back up merged net
        for ni in [n1, n2]:
            for l1 in ni.layer:
                found = False
                if l1.type.lower() != 'data':

                    for l in n.layer:
                        if l.type.lower() == l1.type.lower() and \
                           [t.split('_nidx')[0] for t in l.top] == list(l1.top) and \
                           [b.split('_nidx')[0] for b in l.bottom] == list(l1.bottom):

                            assert_true(l.name.startswith(l1.name))

                            fnames1 = [f.name for f in l1.DESCRIPTOR.fields]
                            fnames = [f.name for f in l.DESCRIPTOR.fields]
                            assert_list_equal(fnames, fnames1)

                            l.ClearField('name')
                            l.ClearField('top')
                            l.ClearField('bottom')
                            l1.ClearField('name')
                            l1.ClearField('top')
                            l1.ClearField('bottom')

                            assert_equal(text_format.MessageToString(l), text_format.MessageToString(l1))

                            found = True
                else:
                    continue  # skip for data layers
                assert_true(found, "Failed to find %s in merged network!" % (l1.name,))

Example 138

Project: claripy Source File: test_solver.py
def raw_solver(solver_type):
    #bc = claripy.backends.BackendConcrete(clrp)
    #bz = claripy.backends.BackendZ3(clrp)
    #claripy.expression_backends = [ bc, bz, ba ]

    print "YOYO"
    s = solver_type()

    s.simplify()

    x = claripy.BVS('x', 32)
    y = claripy.BVS('y', 32)
    z = claripy.BVS('z', 32)

    l.debug("adding constraints")

    s.add(x == 10)
    s.add(y == 15)

    # Batch evaluation
    results = s.batch_eval([x + 5, x + 6, 3], 2)
    nose.tools.assert_equal(len(results), 1)
    nose.tools.assert_equal(results[0][0], 15) # x + 5
    nose.tools.assert_equal(results[0][1], 16) # x + 6
    nose.tools.assert_equal(results[0][2], 3)  # constant

    l.debug("checking")
    nose.tools.assert_true(s.satisfiable())
    nose.tools.assert_false(s.satisfiable(extra_constraints=[x == 5]))
    nose.tools.assert_equal(s.eval(x + 5, 1)[0], 15)
    nose.tools.assert_true(s.solution(x + 5, 15))
    nose.tools.assert_true(s.solution(x, 10))
    nose.tools.assert_true(s.solution(y, 15))
    nose.tools.assert_false(s.solution(y, 13))

    shards = s.split()
    nose.tools.assert_equal(len(shards), 2)
    nose.tools.assert_equal(len(shards[0].variables), 1)
    nose.tools.assert_equal(len(shards[1].variables), 1)
    if isinstance(s, claripy.frontend_mixins.ConstraintExpansionMixin) or (
        isinstance(s, claripy.frontends.HybridFrontend) and
        isinstance(s._exact_frontend, claripy.frontend_mixins.ConstraintExpansionMixin)
    ): #the hybrid frontend actually uses the exact frontend for the split
        nose.tools.assert_equal({ len(shards[0].constraints), len(shards[1].constraints) }, { 2, 1 }) # adds the != from the solution() check
    if isinstance(s, claripy.frontends.ReplacementFrontend):
        nose.tools.assert_equal({ len(shards[0].constraints), len(shards[1].constraints) }, { 1, 1 }) # not a caching frontend

    # test result caching
    s = solver_type()
    s.add(x == 10)
    s.add(y == 15)
    nose.tools.assert_false(s.satisfiable(extra_constraints=(x==5,)))
    nose.tools.assert_true(s.satisfiable())

    s = solver_type()
    #claripy.expression_backends = [ bc, ba, bz ]
    s.add(claripy.UGT(x, 10))
    s.add(claripy.UGT(x, 20))
    s.simplify()
    nose.tools.assert_equal(len(s.constraints), 1)
    #nose.tools.assert_equal(str(s.constraints[0]._obj), "Not(ULE(x <= 20))")

    s.add(claripy.UGT(y, x))
    s.add(claripy.ULT(z, 5))

    # test that duplicate constraints are ignored
    old_count = len(s.constraints)
    s.add(claripy.ULT(z, 5))
    nose.tools.assert_equal(len(s.constraints), old_count)

    #print "========================================================================================"
    #print "========================================================================================"
    #print "========================================================================================"
    #print "========================================================================================"
    #a = s.eval(z, 100)
    #print "ANY:", a
    #print "========================================================================================"
    #mx = s.max(z)
    #print "MAX",mx
    #print "========================================================================================"
    #mn = s.min(z)
    #print "MIN",mn
    #print "========================================================================================"
    #print "========================================================================================"
    #print "========================================================================================"
    #print "========================================================================================"

    print "CONSTRATINT COUNTS:", [ len(_.constraints) for _ in s.split() ]

    nose.tools.assert_equal(s.max(z), 4)
    nose.tools.assert_equal(s.min(z), 0)
    nose.tools.assert_equal(s.min(y), 22)
    nose.tools.assert_equal(s.max(y), 2**y.size()-1)

    print "CONSTRATINT COUNTS:", [ len(_.constraints) for _ in s.split() ]

    ss = s.split()
    nose.tools.assert_equal(len(ss), 2)
    #if isinstance(s, claripy.frontend_mixins.ConstraintExpansionMixin):
    #   nose.tools.assert_equal({ len(_.constraints) for _ in ss }, { 3, 2 }) # constraints from min or max

    # Batch evaluation
    s.add(y < 24)
    s.add(z < x) # Just to make sure x, y, and z belong to the same solver, since batch evaluation does not support the
                 # situation where expressions belong to more than one solver
    results = s.batch_eval([x, y, z], 20)
    nose.tools.assert_set_equal(
        set(results),
        {(21L, 23L, 1L), (22L, 23L, 3L), (22L, 23L, 2L), (22L, 23L, 4L), (21L, 22L, 4L), (21L, 23L, 4L), (22L, 23L, 0L),
         (22L, 23L, 1L), (21L, 22L, 1L), (21L, 22L, 3L), (21L, 22L, 2L), (21L, 22L, 0L), (21L, 23L, 0L), (21L, 23L, 2L),
         (21L, 23L, 3L)
        }
    )

    # test that False makes it unsat
    s = solver_type()
    s.add(claripy.BVV(1,1) == claripy.BVV(1,1))
    nose.tools.assert_true(s.satisfiable())
    s.add(claripy.BVV(1,1) == claripy.BVV(0,1))
    nose.tools.assert_false(s.satisfiable())

    # test extra constraints
    s = solver_type()
    x = claripy.BVS('x', 32)
    nose.tools.assert_items_equal(s.eval(x, 2, extra_constraints=[x==10]), ( 10, ))
    s.add(x == 10)
    nose.tools.assert_false(s.solution(x, 2))
    nose.tools.assert_true(s.solution(x, 10))

    # test result caching

    if isinstance(s, claripy.frontend_mixins.ModelCacheMixin):
        count = claripy._backends_module.backend_z3.solve_count

        s = solver_type()
        x = claripy.BVS('x', 32)
        s.add(x == 10)
        nose.tools.assert_true(s.satisfiable())
        assert claripy._backends_module.backend_z3.solve_count == count + 1
        nose.tools.assert_equals(s.eval(x, 1)[0], 10)
        assert claripy._backends_module.backend_z3.solve_count == count + 1
        s.add(x == 10)
        s.add(x > 9)
        nose.tools.assert_equals(s.eval(x, 1)[0], 10)
        assert claripy._backends_module.backend_z3.solve_count == count + 1

        y = claripy.BVS('y', 32)
        s.add(y < 999)
        assert s.satisfiable()
        assert claripy._backends_module.backend_z3.solve_count == count + 1
        nose.tools.assert_equals(s.eval(y, 1)[0], 0)
        assert claripy._backends_module.backend_z3.solve_count == count + 1

Example 139

Project: dipy Source File: test_dti.py
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0,
                                                        gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle /
            np.linalg.norm(A_squiggle)))
    evals_eigh, evecs_eigh = np.linalg.eigh(tensor)
    # Sort according to eigen-value from large to small:
    evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]]
    # Check that eigenvalues and eigenvectors are properly sorted through
    # that previous operation:
    for i in range(3):
        assert_array_almost_equal(np.dot(tensor, evecs[:, i]),
                                  evals[i] * evecs[:, i])
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)
        # Test that the eigenvectors are correct, one-by-one:
        for i in range(3):
            # Eigenvectors have intrinsic sign ambiguity
            # (see
            # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf)
            # so we need to allow for sign flips. One of the following should
            # always be true:
            assert_(
                    np.all(np.abs(tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6) or
                    np.all(np.abs(-tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6))
            # We set a fixed tolerance of 10e-6, similar to array_almost_equal

        err_msg = "Calculation of tensor from Y does not compare to "
        err_msg += "analytical solution"
        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=err_msg)

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test custom fit tensor method
    try:
        model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42)
        fit = model.fit_method()
    except Exception as exc:
        assert False, "TensorModel should accept custom fit methods: %s" % exc
    assert fit == 42, "Custom fit method for TensorModel returned %s." % fit

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)

Example 140

Project: delocate Source File: test_delocating.py
def test_delocate_tree_libs():
    # Test routine to copy library dependencies into a local directory
    with InTemporaryDirectory() as tmpdir:
        # Copy libs into a temporary directory
        subtree = pjoin(tmpdir, 'subtree')
        all_local_libs = _make_libtree(subtree)
        liba, libb, libc, test_lib, slibc, stest_lib = all_local_libs
        lib_dict = tree_libs(subtree)
        copy_dir = 'dynlibs'
        os.makedirs(copy_dir)
        # First check that missing library causes error
        set_install_name(liba,
                         '/usr/lib/libstdc++.6.dylib',
                         '/unlikely/libname.dylib')
        lib_dict = tree_libs(subtree)
        assert_raises(DelocationError,
                      delocate_tree_libs, lib_dict, copy_dir, subtree)
        # fix it
        set_install_name(liba,
                         '/unlikely/libname.dylib',
                         '/usr/lib/libstdc++.6.dylib')
        lib_dict = tree_libs(subtree)
        copied = delocate_tree_libs(lib_dict, copy_dir, subtree)
        # Only the out-of-tree libraries get copied
        exp_dict = get_ext_dict(all_local_libs)
        assert_equal(copied, exp_dict)
        assert_equal(set(os.listdir(copy_dir)),
                     set([basename(realpath(lib)) for lib in EXT_LIBS]))
        # Libraries using the copied libraries now have an install name starting
        # with @loader_path, then pointing to the copied library directory
        for lib in all_local_libs:
            pathto_copies = relpath(realpath(copy_dir), dirname(realpath(lib)))
            lib_inames = get_install_names(lib)
            new_links = ['@loader_path/{0}/{1}'.format(pathto_copies,
                                                       basename(elib))
                         for elib in copied]
            assert_true(set(new_links) <= set(lib_inames))
        # Libraries now have a relative loader_path to their corresponding
        # in-tree libraries
        for requiring, using, rel_path in (
            (libb, 'liba.dylib', ''),
            (libc, 'liba.dylib', ''),
            (libc, 'libb.dylib', ''),
            (test_lib, 'libc.dylib', ''),
            (slibc, 'liba.dylib', '../'),
            (slibc, 'libb.dylib', '../'),
            (stest_lib, 'libc.dylib', '')):
            loader_path = '@loader_path/' + rel_path + using
            assert_true(loader_path in get_install_names(requiring))
        # Check test libs still work
        back_tick([test_lib])
        back_tick([stest_lib])
        # Check case where all local libraries are out of tree
        subtree2 = pjoin(tmpdir, 'subtree2')
        liba, libb, libc, test_lib, slibc, stest_lib = _make_libtree(subtree2)
        copy_dir2 = 'dynlibs2'
        os.makedirs(copy_dir2)
        # Trying to delocate where all local libraries appear to be
        # out-of-tree will raise an error because of duplicate library names
        # (libc and slibc both named <something>/libc.dylib)
        lib_dict2 = tree_libs(subtree2)
        assert_raises(DelocationError,
                      delocate_tree_libs, lib_dict2, copy_dir2, '/fictional')
        # Rename a library to make this work
        new_slibc = pjoin(dirname(slibc), 'libc2.dylib')
        os.rename(slibc, new_slibc)
        # Tell test-lib about this
        set_install_name(stest_lib, slibc, new_slibc)
        slibc = new_slibc
        # Confirm new test-lib still works
        back_tick([test_lib])
        back_tick([stest_lib])
        # Delocation now works
        lib_dict2 = tree_libs(subtree2)
        copied2 = delocate_tree_libs(lib_dict2, copy_dir2, '/fictional')
        local_libs = [liba, libb, libc, slibc, test_lib, stest_lib]
        rp_liba, rp_libb, rp_libc, rp_slibc, rp_test_lib, rp_stest_lib = \
                [realpath(L) for L in local_libs]
        exp_dict = get_ext_dict(local_libs)
        exp_dict.update({
            rp_libc: {rp_test_lib: libc},
            rp_slibc: {rp_stest_lib: slibc},
            rp_libb: {rp_slibc: libb,
                      rp_libc: libb},
            rp_liba: {rp_slibc: liba,
                      rp_libc: liba,
                      rp_libb: liba}})
        assert_equal(copied2, exp_dict)
        ext_local_libs = (set(realpath(L) for L in EXT_LIBS) |
                          set([liba, libb, libc, slibc]))
        assert_equal(set(os.listdir(copy_dir2)),
                     set([basename(lib) for lib in ext_local_libs]))
        # Libraries using the copied libraries now have an install name starting
        # with @loader_path, then pointing to the copied library directory
        all_local_libs = liba, libb, libc, test_lib, slibc, stest_lib
        for lib in all_local_libs:
            pathto_copies = relpath(realpath(copy_dir2),
                                    dirname(realpath(lib)))
            lib_inames = get_install_names(lib)
            new_links = ['@loader_path/{0}/{1}'.format(pathto_copies,
                                                       basename(elib))
                         for elib in copied]
            assert_true(set(new_links) <= set(lib_inames))

Example 141

Project: dipy Source File: test_streamline.py
def test_set_number_of_points():
    # Test resampling of only one streamline
    nb_points = 12
    modified_streamline_cython = set_number_of_points(
        streamline, nb_points)
    modified_streamline_python = set_number_of_points_python(
        streamline, nb_points)
    assert_equal(len(modified_streamline_cython), nb_points)
    # Using a 5 digits precision because of streamline is in float32.
    assert_array_almost_equal(modified_streamline_cython,
                              modified_streamline_python, 5)

    modified_streamline_cython = set_number_of_points(
        streamline_64bit, nb_points)
    modified_streamline_python = set_number_of_points_python(
        streamline_64bit, nb_points)
    assert_equal(len(modified_streamline_cython), nb_points)
    assert_array_almost_equal(modified_streamline_cython,
                              modified_streamline_python)

    res = []
    simple_streamline = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'f4')
    for nb_points in range(2, 200):
        modified_streamline_cython = set_number_of_points(
            simple_streamline, nb_points)
        res.append(nb_points - len(modified_streamline_cython))
    assert_equal(np.sum(res), 0)

    # Test resampling of multiple streamlines of different nb_points
    nb_points = 12
    modified_streamlines_cython = set_number_of_points(
        streamlines, nb_points)

    for i, s in enumerate(streamlines):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        # Using a 5 digits precision because of streamline is in float32.
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python, 5)

    modified_streamlines_cython = set_number_of_points(
        streamlines_64bit, nb_points)

    for i, s in enumerate(streamlines_64bit):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python)

    # Test streamlines with mixed dtype
    streamlines_mixed_dtype = [streamline,
                               streamline.astype(np.float64),
                               streamline.astype(np.int32),
                               streamline.astype(np.int64)]
    nb_points_mixed_dtype = [len(s) for s in set_number_of_points(
        streamlines_mixed_dtype, nb_points)]
    assert_array_equal(nb_points_mixed_dtype,
                       [nb_points] * len(streamlines_mixed_dtype))

    # Test streamlines with different shape
    modified_streamlines_cython = set_number_of_points(
        heterogeneous_streamlines, nb_points)

    for i, s in enumerate(heterogeneous_streamlines):
        modified_streamline_python = set_number_of_points_python(s, nb_points)
        assert_array_almost_equal(modified_streamlines_cython[i],
                                  modified_streamline_python)

    # Test streamline with integer dtype
    modified_streamline = set_number_of_points(streamline.astype(np.int32))
    assert_true(modified_streamline.dtype == np.float32)
    modified_streamline = set_number_of_points(streamline.astype(np.int64))
    assert_true(modified_streamline.dtype == np.float64)

    # Test empty list
    assert_equal(set_number_of_points([]), [])

    # Test streamline having only one point
    assert_raises(ValueError, set_number_of_points, np.array([[1, 2, 3]]))

    # We do not support list of lists, it should be numpy ndarray.
    streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
    assert_raises(AttributeError, set_number_of_points, streamline_unsupported)

    # Test setting number of points of a numpy with flag WRITABLE=False
    streamline_readonly = streamline.copy()
    streamline_readonly.setflags(write=False)
    assert_equal(len(set_number_of_points(streamline_readonly, nb_points=42)),
                 42)

    # Test setting computing length of a numpy with flag WRITABLE=False
    streamlines_readonly = []
    for s in streamlines:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
                 len(streamlines_readonly))

    streamlines_readonly = []
    for s in streamlines_64bit:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_equal(len(set_number_of_points(streamlines_readonly, nb_points=42)),
                 len(streamlines_readonly))

    # Test if nb_points is less than 2
    assert_raises(ValueError, set_number_of_points, [np.ones((10, 3)),
                  np.ones((10, 3))], nb_points=1)

Example 142

Project: ANALYSE Source File: tests.py
    def un_flag_thread(self, mock_request, is_closed):
        mock_request.return_value.status_code = 200
        self._set_mock_request_data(mock_request, {
            "title": "Hello",
            "body": "this is a post",
            "course_id": "MITx/999/Robot_Super_Course",
            "anonymous": False,
            "anonymous_to_peers": False,
            "commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
            "created_at": "2013-05-10T18:53:43Z",
            "updated_at": "2013-05-10T18:53:43Z",
            "at_position_list": [],
            "closed": is_closed,
            "id": "518d4237b023791dca00000d",
            "user_id": "1",
            "username": "robot",
            "votes": {
                "count": 0,
                "up_count": 0,
                "down_count": 0,
                "point": 0
            },
            "abuse_flaggers": [],
            "type": "thread",
            "group_id": None,
            "pinned": False,
            "endorsed": False,
            "unread_comments_count": 0,
            "read": False,
            "comments_count": 0
        })
        url = reverse('un_flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
        response = self.client.post(url)
        assert_true(mock_request.called)

        call_list = [
            (
                ('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
                {
                    'data': None,
                    'params': {'mark_as_read': True, 'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            ),
            (
                ('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
                {
                    'data': {'user_id': '1'},
                    'params': {'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            ),
            (
                ('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
                {
                    'data': None,
                    'params': {'mark_as_read': True, 'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            )
        ]

        assert_equal(call_list, mock_request.call_args_list)

        assert_equal(response.status_code, 200)

Example 143

Project: nipy Source File: test_image.py
def test_rollimg():
    AT = AffineTransform
    data = np.random.standard_normal((3,4,7,5))
    aff = np.diag([1,2,3,4,1])
    im = Image(data, AT('ijkl', 'xyzt', aff))
    # No attempt to preserve the diagonal
    im1 = rollimg(im, 1)
    assert_equal(im1.coordmap, rollimg(im, 'j').coordmap)
    assert_equal(im1.coordmap, rollimg(im, 'y').coordmap)
    assert_equal(im1.coordmap, rollimg(im, -3).coordmap)
    assert_equal(im1.coordmap,
                 AT('jikl', 'xyzt', aff[:, (1, 0, 2, 3, 4)]))
    assert_array_equal(im1.get_data(), np.rollaxis(data, 1))
    im2 = rollimg(im, 2)
    assert_equal(im2.coordmap, rollimg(im, 'k').coordmap)
    assert_equal(im2.coordmap, rollimg(im, 'z').coordmap)
    assert_equal(im2.coordmap, rollimg(im, -2).coordmap)
    assert_equal(im2.coordmap,
                 AT('kijl', 'xyzt', aff[:, (2, 0, 1, 3, 4)]))
    assert_array_equal(im2.get_data(), np.rollaxis(data, 2))
    im3 = rollimg(im, 3)
    assert_equal(im3.coordmap, rollimg(im, 'l').coordmap)
    assert_equal(im3.coordmap, rollimg(im, 't').coordmap)
    assert_equal(im3.coordmap, rollimg(im, -1).coordmap)
    assert_equal(im3.coordmap,
                        AT('lijk', 'xyzt', aff[:, (3, 0, 1, 2, 4)]))
    assert_array_equal(im3.get_data(), np.rollaxis(data, 3))
    # We can roll to before a specified axis
    im31 = rollimg(im, 3, 1)
    assert_equal(im31.coordmap, rollimg(im, 'l', 'j').coordmap)
    assert_equal(im31.coordmap, rollimg(im, 't', 'y').coordmap)
    assert_equal(im31.coordmap, rollimg(im, 't', 'j').coordmap)
    assert_equal(im31.coordmap, rollimg(im, 'l', 'y').coordmap)
    assert_equal(im31.coordmap, rollimg(im, -1, 'y').coordmap)
    assert_equal(im31.coordmap, rollimg(im, -1, -3).coordmap)
    assert_equal(im31.coordmap,
                 AT('iljk', 'xyzt', aff[:, (0, 3, 1, 2, 4)]))
    assert_array_equal(im31.get_data(), np.rollaxis(data, 3, 1))
    # Check that ambiguous axes raise an exception; 'l' appears both as an axis
    # and a reference coord name and in different places
    im_amb = Image(data, AT('ijkl', 'xylt', np.diag([1,2,3,4,1])))
    assert_raises(AxisError, rollimg, im_amb, 'l')
    # But if it's unambiguous, then 'l' can appear both as an axis and a
    # reference coord name
    im_unamb = Image(data, AT('ijkl', 'xyzl', np.diag([1,2,3,4,1])))
    im_rolled = rollimg(im_unamb, 'l')
    assert_array_equal(im_rolled.get_data(),
                       im_unamb.get_data().transpose([3,0,1,2]))
    # Zero row / col means we can't find an axis mapping, when fix0 is false
    aff_z = np.diag([1, 2, 3, 0, 1])
    im_z = Image(data, AT('ijkl', 'xyzt', aff_z))
    assert_raises(AxisError, rollimg, im_z, 't', fix0=False)
    # But we can work it out if we turn on our zero detector
    assert_equal(rollimg(im_z, 't', fix0=True).coordmap,
                 AT('lijk', 'xyzt', aff_z[:, (3, 0, 1, 2, 4)]))
    # That's the default
    assert_equal(rollimg(im_z, 't').coordmap,
                 AT('lijk', 'xyzt', aff_z[:, (3, 0, 1, 2, 4)]))
    # Non square is OK
    aff_r = np.array([[1, 0, 0, 10],
                      [0, 2, 0, 11],
                      [0, 0, 2, 12],
                      [0, 0, 0, 13],
                      [0, 0, 0, 1]])
    im_r = Image(data[:,:,:,0], AT('ijk', 'xyzt', aff_r))
    assert_equal(rollimg(im_r, 'k').coordmap,
                 AT('kij', 'xyzt', aff_r[:, (2, 0, 1, 3)]))
    # Unless you're tring to get at the dropped input dimension of course
    assert_raises(AxisError, rollimg, im_r, 't')
    # Another check for integers, input names, output names, reversing
    for i, o, n in zip('ijkl', 'xyzt', range(4)):
        im_i = rollimg(im, i)
        im_o = rollimg(im, o)
        im_n = rollimg(im, n)
        assert_array_equal(im_i.get_data(), im_o.get_data())
        assert_array_equal(im_i.affine, im_o.affine)
        assert_array_equal(im_n.get_data(), im_o.get_data())
        for _im in [im_n, im_o, im_i]:
            # We're rollimg back.  We want to roll the new axis 0 back to where
            # it started, which was position n
            im_n_inv = rollimg(_im, 0, n + 1)
            assert_array_equal(im_n_inv.affine, im.affine)
            assert_array_equal(im_n_inv.get_data(), im.get_data())

Example 144

Project: delocate Source File: test_scripts.py
def test_add_platforms():
    # Check adding platform to wheel name and tag section
    exp_items = [('Generator', 'bdist_wheel (0.23.0)'),
                 ('Root-Is-Purelib', 'false'),
                 ('Tag', 'cp27-none-macosx_10_6_intel'),
                 ('Wheel-Version', '1.0')]
    assert_equal(get_winfo(PLAT_WHEEL, drop_version=False), exp_items)
    with InTemporaryDirectory() as tmpdir:
        # First wheel needs proper wheel filename for later unpack test
        out_fname = basename(PURE_WHEEL)
        # Need to specify at least one platform
        assert_raises(RuntimeError, run_command,
            ['delocate-addplat', PURE_WHEEL, '-w', tmpdir])
        plat_args = ['-p', 'macosx_10_9_intel',
                    '--plat-tag', 'macosx_10_9_x86_64']
        # Can't add platforms to a pure wheel
        assert_raises(RuntimeError, run_command,
            ['delocate-addplat', PURE_WHEEL, '-w', tmpdir] + plat_args)
        assert_false(exists(out_fname))
        # Error raised (as above) unless ``--skip-error`` flag set
        code, stdout, stderr = run_command(
            ['delocate-addplat', PURE_WHEEL, '-w', tmpdir, '-k'] + plat_args)
        # Still doesn't do anything though
        assert_false(exists(out_fname))
        # Works for plat_wheel
        out_fname = ('fakepkg1-1.0-cp27-none-macosx_10_6_intel.'
                     'macosx_10_9_intel.macosx_10_9_x86_64.whl')
        code, stdout, stderr = run_command(
            ['delocate-addplat', PLAT_WHEEL, '-w', tmpdir] + plat_args)
        assert_true(isfile(out_fname))
        # Expected output minus wheel-version (that might change)
        extra_exp = [('Generator', 'bdist_wheel (0.23.0)'),
                      ('Root-Is-Purelib', 'false'),
                      ('Tag', 'cp27-none-macosx_10_6_intel'),
                      ('Tag', 'cp27-none-macosx_10_9_intel'),
                      ('Tag', 'cp27-none-macosx_10_9_x86_64')]
        assert_equal(get_winfo(out_fname), extra_exp)
        # If wheel exists (as it does) then raise error
        assert_raises(RuntimeError, run_command,
            ['delocate-addplat', PLAT_WHEEL, '-w', tmpdir] + plat_args)
        # Unless clobber is set
        code, stdout, stderr = run_command(
            ['delocate-addplat', PLAT_WHEEL, '-c', '-w', tmpdir] + plat_args)
        # Can also specify platform tags via --osx-ver flags
        code, stdout, stderr = run_command(
            ['delocate-addplat', PLAT_WHEEL, '-c', '-w', tmpdir, '-x', '10_9'])
        assert_equal(get_winfo(out_fname), extra_exp)
        # Can mix plat_tag and osx_ver
        out_big_fname = ('fakepkg1-1.0-cp27-none-macosx_10_6_intel.'
                         'macosx_10_9_intel.macosx_10_9_x86_64.'
                         'macosx_10_10_intel.macosx_10_10_x86_64.whl')
        extra_big_exp = [('Generator', 'bdist_wheel (0.23.0)'),
                         ('Root-Is-Purelib', 'false'),
                         ('Tag', 'cp27-none-macosx_10_10_intel'),
                         ('Tag', 'cp27-none-macosx_10_10_x86_64'),
                         ('Tag', 'cp27-none-macosx_10_6_intel'),
                         ('Tag', 'cp27-none-macosx_10_9_intel'),
                         ('Tag', 'cp27-none-macosx_10_9_x86_64')]
        code, stdout, stderr = run_command(
            ['delocate-addplat', PLAT_WHEEL, '-w', tmpdir, '-x', '10_10']
            + plat_args)
        assert_equal(get_winfo(out_big_fname), extra_big_exp)
        # Default is to write into directory of wheel
        os.mkdir('wheels')
        shutil.copy2(PLAT_WHEEL, 'wheels')
        local_plat = pjoin('wheels', basename(PLAT_WHEEL))
        local_out = pjoin('wheels', out_fname)
        code, stdout, stderr = run_command(
            ['delocate-addplat', local_plat]  + plat_args)
        assert_true(exists(local_out))
        # With rm_orig flag, delete original unmodified wheel
        os.unlink(local_out)
        code, stdout, stderr = run_command(
            ['delocate-addplat', '-r', local_plat]  + plat_args)
        assert_false(exists(local_plat))
        assert_true(exists(local_out))
        # Copy original back again
        shutil.copy2(PLAT_WHEEL, 'wheels')
        # If platforms already present, don't write more
        res = sorted(os.listdir('wheels'))
        assert_equal(get_winfo(local_out), extra_exp)
        code, stdout, stderr = run_command(
            ['delocate-addplat', local_out, '--clobber']  + plat_args)
        assert_equal(sorted(os.listdir('wheels')), res)
        assert_equal(get_winfo(local_out), extra_exp)
        # The wheel doesn't get deleted output name same as input, as here
        code, stdout, stderr = run_command(
            ['delocate-addplat', local_out, '-r', '--clobber']  + plat_args)
        assert_equal(sorted(os.listdir('wheels')), res)
        # But adds WHEEL tags if missing, even if file name is OK
        shutil.copy2(local_plat, local_out)
        assert_not_equal(get_winfo(local_out), extra_exp)
        code, stdout, stderr = run_command(
            ['delocate-addplat', local_out, '--clobber']  + plat_args)
        assert_equal(sorted(os.listdir('wheels')), res)
        assert_equal(get_winfo(local_out), extra_exp)

Example 145

Project: claripy Source File: test_vsa.py
def test_vsa_constraint_to_si():
    # Set backend
    b = claripy.backends.vsa
    s = claripy.SolverVSA() #pylint:disable=unused-variable

    SI = claripy.SI
    BVV = claripy.BVV

    claripy.vsa.strided_interval.allow_dsis = False

    #
    # If(SI == 0, 1, 0) == 1
    #

    s1 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
    ast_true = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) == BVV(1, 1))
    ast_false = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) != BVV(1, 1))

    trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
    nose.tools.assert_equal(trueside_sat, True)
    nose.tools.assert_equal(len(trueside_replacement), 1)
    nose.tools.assert_true(trueside_replacement[0][0] is s1)
    # True side: claripy.SI<32>0[0, 0]
    nose.tools.assert_true(
        claripy.backends.vsa.is_true(trueside_replacement[0][1] == claripy.SI(bits=32, stride=0, lower_bound=0, upper_bound=0)))

    falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
    nose.tools.assert_equal(falseside_sat, True)
    nose.tools.assert_equal(len(falseside_replacement), 1)
    nose.tools.assert_true(falseside_replacement[0][0] is s1)
    # False side; claripy.SI<32>1[1, 2]

    nose.tools.assert_true(
        claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
    )

    #
    # If(SI == 0, 1, 0) <= 1
    #

    s1 = SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
    ast_true = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) <= BVV(1, 1))
    ast_false = (claripy.If(s1 == BVV(0, 32), BVV(1, 1), BVV(0, 1)) > BVV(1, 1))

    trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
    nose.tools.assert_equal(trueside_sat, True) # Always satisfiable

    falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
    nose.tools.assert_equal(falseside_sat, False) # Not sat

    #
    # If(SI == 0, 20, 10) > 15
    #

    s1 = SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
    ast_true = (claripy.If(s1 == BVV(0, 32), BVV(20, 32), BVV(10, 32)) > BVV(15, 32))
    ast_false = (claripy.If(s1 == BVV(0, 32), BVV(20, 32), BVV(10, 32)) <= BVV(15, 32))

    trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
    nose.tools.assert_equal(trueside_sat, True)
    nose.tools.assert_equal(len(trueside_replacement), 1)
    nose.tools.assert_true(trueside_replacement[0][0] is s1)
    # True side: SI<32>0[0, 0]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
    )

    falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
    nose.tools.assert_equal(falseside_sat, True)
    nose.tools.assert_equal(len(falseside_replacement), 1)
    nose.tools.assert_true(falseside_replacement[0][0] is s1)
    # False side; SI<32>1[1, 2]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
    )

    #
    # If(SI == 0, 20, 10) >= 15
    #

    s1 = SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
    ast_true = (claripy.If(s1 == BVV(0, 32), BVV(15, 32), BVV(10, 32)) >= BVV(15, 32))
    ast_false = (claripy.If(s1 == BVV(0, 32), BVV(15, 32), BVV(10, 32)) < BVV(15, 32))

    trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
    nose.tools.assert_equal(trueside_sat, True)
    nose.tools.assert_equal(len(trueside_replacement), 1)
    nose.tools.assert_true(trueside_replacement[0][0] is s1)
    # True side: SI<32>0[0, 0]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
    )

    falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
    nose.tools.assert_equal(falseside_sat, True)
    nose.tools.assert_equal(len(falseside_replacement), 1)
    nose.tools.assert_true(falseside_replacement[0][0] is s1)
    # False side; SI<32>0[0,0]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
    )

    #
    # Extract(0, 0, Concat(BVV(0, 63), If(SI == 0, 1, 0))) == 1
    #

    s2 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
    ast_true = (claripy.Extract(0, 0, claripy.Concat(BVV(0, 63), claripy.If(s2 == 0, BVV(1, 1), BVV(0, 1)))) == 1)
    ast_false = (claripy.Extract(0, 0, claripy.Concat(BVV(0, 63), claripy.If(s2 == 0, BVV(1, 1), BVV(0, 1)))) != 1)

    trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
    nose.tools.assert_equal(trueside_sat, True)
    nose.tools.assert_equal(len(trueside_replacement), 1)
    nose.tools.assert_true(trueside_replacement[0][0] is s2)
    # True side: claripy.SI<32>0[0, 0]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
    )

    falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
    nose.tools.assert_equal(falseside_sat, True)
    nose.tools.assert_equal(len(falseside_replacement), 1)
    nose.tools.assert_true(falseside_replacement[0][0] is s2)
    # False side; claripy.SI<32>1[1, 2]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
    )

    #
    # Extract(0, 0, ZeroExt(32, If(SI == 0, BVV(1, 32), BVV(0, 32)))) == 1
    #

    s3 = claripy.SI(bits=32, stride=1, lower_bound=0, upper_bound=2)
    ast_true = (claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(s3 == 0, BVV(1, 32), BVV(0, 32)))) == 1)
    ast_false = (claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(s3 == 0, BVV(1, 32), BVV(0, 32)))) != 1)

    trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
    nose.tools.assert_equal(trueside_sat, True)
    nose.tools.assert_equal(len(trueside_replacement), 1)
    nose.tools.assert_true(trueside_replacement[0][0] is s3)
    # True side: claripy.SI<32>0[0, 0]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=0, lower_bound=0, upper_bound=0))
    )

    falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
    nose.tools.assert_equal(falseside_sat, True)
    nose.tools.assert_equal(len(falseside_replacement), 1)
    nose.tools.assert_true(falseside_replacement[0][0] is s3)
    # False side; claripy.SI<32>1[1, 2]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=1, upper_bound=2))
    )

    #
    # Extract(0, 0, ZeroExt(32, If(Extract(32, 0, (SI & claripy.SI)) < 0, BVV(1, 1), BVV(0, 1))))
    #

    s4 = claripy.SI(bits=64, stride=1, lower_bound=0, upper_bound=0xffffffffffffffff)
    ast_true = (
        claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(claripy.Extract(31, 0, (s4 & s4)).SLT(0), BVV(1, 32), BVV(0, 32)))) == 1)
    ast_false = (
        claripy.Extract(0, 0, claripy.ZeroExt(32, claripy.If(claripy.Extract(31, 0, (s4 & s4)).SLT(0), BVV(1, 32), BVV(0, 32)))) != 1)

    trueside_sat, trueside_replacement = b.constraint_to_si(ast_true)
    nose.tools.assert_equal(trueside_sat, True)
    nose.tools.assert_equal(len(trueside_replacement), 1)
    nose.tools.assert_true(trueside_replacement[0][0] is s4[31:0])
    # True side: claripy.SI<32>0[0, 0]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(trueside_replacement[0][1], SI(bits=32, stride=1, lower_bound=-0x80000000, upper_bound=-1))
    )

    falseside_sat, falseside_replacement = b.constraint_to_si(ast_false)
    nose.tools.assert_equal(falseside_sat, True)
    nose.tools.assert_equal(len(falseside_replacement), 1)
    nose.tools.assert_true(falseside_replacement[0][0] is s4[31:0])
    # False side; claripy.SI<32>1[1, 2]
    nose.tools.assert_true(
        claripy.backends.vsa.identical(falseside_replacement[0][1], SI(bits=32, stride=1, lower_bound=0, upper_bound=0x7fffffff))
    )

Example 146

Project: ANALYSE Source File: tests.py
    def flag_thread(self, mock_request, is_closed):
        mock_request.return_value.status_code = 200
        self._set_mock_request_data(mock_request, {
            "title": "Hello",
            "body": "this is a post",
            "course_id": "MITx/999/Robot_Super_Course",
            "anonymous": False,
            "anonymous_to_peers": False,
            "commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
            "created_at": "2013-05-10T18:53:43Z",
            "updated_at": "2013-05-10T18:53:43Z",
            "at_position_list": [],
            "closed": is_closed,
            "id": "518d4237b023791dca00000d",
            "user_id": "1", "username": "robot",
            "votes": {
                "count": 0,
                "up_count": 0,
                "down_count": 0,
                "point": 0
            },
            "abuse_flaggers": [1],
            "type": "thread",
            "group_id": None,
            "pinned": False,
            "endorsed": False,
            "unread_comments_count": 0,
            "read": False,
            "comments_count": 0,
        })
        url = reverse('flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
        response = self.client.post(url)
        assert_true(mock_request.called)

        call_list = [
            (
                ('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
                {
                    'data': None,
                    'params': {'mark_as_read': True, 'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            ),
            (
                ('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
                {
                    'data': {'user_id': '1'},
                    'params': {'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            ),
            (
                ('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
                {
                    'data': None,
                    'params': {'mark_as_read': True, 'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            )
        ]

        assert_equal(call_list, mock_request.call_args_list)

        assert_equal(response.status_code, 200)

Example 147

Project: ANALYSE Source File: tests.py
Function: flag_comment
    def flag_comment(self, mock_request, is_closed):
        mock_request.return_value.status_code = 200
        self._set_mock_request_data(mock_request, {
            "body": "this is a comment",
            "course_id": "MITx/999/Robot_Super_Course",
            "anonymous": False,
            "anonymous_to_peers": False,
            "commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
            "created_at": "2013-05-10T18:53:43Z",
            "updated_at": "2013-05-10T18:53:43Z",
            "at_position_list": [],
            "closed": is_closed,
            "id": "518d4237b023791dca00000d",
            "user_id": "1",
            "username": "robot",
            "votes": {
                "count": 0,
                "up_count": 0,
                "down_count": 0,
                "point": 0
            },
            "abuse_flaggers": [1],
            "type": "comment",
            "endorsed": False
        })
        url = reverse('flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
        response = self.client.post(url)
        assert_true(mock_request.called)

        call_list = [
            (
                ('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
                {
                    'data': None,
                    'params': {'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            ),
            (
                ('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
                {
                    'data': {'user_id': '1'},
                    'params': {'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            ),
            (
                ('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
                {
                    'data': None,
                    'params': {'request_id': ANY},
                    'headers': ANY,
                    'timeout': 5
                }
            )
        ]

        assert_equal(call_list, mock_request.call_args_list)

        assert_equal(response.status_code, 200)

Example 148

Project: regreg Source File: test_doctemplates.py
def test_more_fully():
    @doc_templater()
    class C(object):
        @doc_template_provider
        def fa(self, arg):
            """A docstring %(a)s"""

        @doc_template_provider
        def fb(self, arg):
            """B docstring %(b)s"""

    # Providing a template sets the docstring to None
    assert_equal(C().fa.__doc__, None)

    @doc_templater(dict(a = 2, b='two'))
    class D(C):
        @doc_template_user
        def fa(self, arg):
            pass

        @doc_template_user
        def fb(self, arg):
            pass

    # Using a template
    assert_equal(D().fa.__doc__, "A docstring 2")
    assert_equal(D().fb.__doc__, "B docstring two")

    @doc_templater(dict(a = 3))
    class E(D):
        @doc_template_user
        def fa(self, arg):
            pass

        @doc_template_user
        def fb(self, arg):
            pass

    # Overriding a parameter in the template
    assert_equal(E().fa.__doc__, "A docstring 3")
    assert_equal(E().fb.__doc__, "B docstring two")

    def missing_param_error():
        @doc_templater(dict(a = 4))
        class Q(object):
            @doc_template_user
            def fb(self, arg):
                pass

    # Parameters inherited from subclass - missing - keyerror
    assert_raises(KeyError, missing_param_error)

    @doc_templater(dict(a = 4))
    class E(D):
        @doc_template_user
        @doc_template_provider
        def fa(self, arg):
            """Another docstring %(a)s"""

    # Can be both a user and a provider.
    assert_equal(E().fa.__doc__, "Another docstring 4")
    assert_equal(E().fb.__doc__, "B docstring two")

    @doc_templater(dict(a = 4))
    class F(D):
        @doc_template_provider
        @doc_template_user
        def fa(self, arg):
            """Another docstring %(a)s"""

    # The decorators can be in either order
    assert_equal(F().fa.__doc__, "Another docstring 4")
    assert_equal(F().fb.__doc__, "B docstring two")

    def null_doc_error(err=True):
        @doc_templater(dict(a = 99), doc_error=err)
        class Q(F):
            @doc_template_user
            def fa(self, arg):
                "A docstring too far"
        return Q

    # If docstring to be replaced is not empty, raise error by default
    assert_raises(ValueError, null_doc_error, True)
    assert_equal(null_doc_error(False)().fa.__doc__, "Another docstring 99")

    @doc_templater(dict(a = 5))
    class G(F):
        @doc_template_user
        def fa(self, arg):
            pass

    # Showing the provision from before
    assert_equal(G().fa.__doc__, "Another docstring 5")
    assert_equal(G().fb.__doc__, "B docstring two")

    @doc_templater(dict(a=6, b='three'))
    class H(F):
        @doc_template_provider
        def fa(self, arg):
            """Yet another docstring %(a)s"""

    # Providing without using, again
    assert_equal(H().fa.__doc__, None)

    @doc_templater()
    class I(H):
        @doc_template_user
        def fa(self, arg):
            pass

        @doc_template_user
        def fb(self, arg):
            pass

    # Providing without using, again
    assert_equal(I().fa.__doc__, "Yet another docstring 6")
    assert_equal(I().fb.__doc__, "B docstring three")

Example 149

Project: terryfy Source File: test_travisparse.py
def test_get_envs():
    # Get fetch of environment from .travis.yml
    assert_equal(get_envs({}), '')
    assert_equal(get_envs(dict(install = ['something'])), '')
    yaml = dict(env = {'global': ['LATEST_TAG=1'],
                       'matrix': ['VERSION=2.7.8 NUMPY_VERSION=1.6.1',
                                  'VERSION=3.3.5 NUMPY_VERSION=1.7.1',
                                  'VERSION=3.4.1 NUMPY_VERSION=1.7.1']})
    assert_equal(get_envs(yaml),
"""LATEST_TAG=1
VERSION=2.7.8 NUMPY_VERSION=1.6.1
""")
    yaml = dict(env = {'matrix': ['VERSION=2.7.8 NUMPY_VERSION=1.6.1',
                                  'VERSION=3.3.5 NUMPY_VERSION=1.7.1',
                                  'VERSION=3.4.1 NUMPY_VERSION=1.7.1']})
    assert_equal(get_envs(yaml),
"""VERSION=2.7.8 NUMPY_VERSION=1.6.1
""")
    yaml = dict(env = ['ISOLATED=true', 'ISOLATED=false'])
    assert_equal(get_envs(yaml),
"""ISOLATED=true
""")
    # excludes too complicated
    yaml = dict(env = {'matrix':
                       {'exclude':
                        [{'gemfile': 'Gemfile', 'rvm': '2.0.0'}]}})
    assert_raises(TravisError, get_envs, yaml)
    # includes too complicated
    yaml = dict(env = {'matrix':
                       {'include':
                        [{'gemfile': 'gemfiles/Gemfile.rails-3.2.x',
                          'rvm': 'ruby-head',
                          'env': 'ISOLATED=false'}]}})
    assert_raises(TravisError, get_envs, yaml)
    # global implies matrix
    yaml = dict(env = {'global': ['LATEST_TAG=1']})
    assert_raises(TravisError, get_envs, yaml)
    # one line is OK too
    yaml = dict(env = {'global': 'LATEST_TAG=1',
                       'matrix': 'VERSION=3.3.1'})
    assert_equal(get_envs(yaml),
"""LATEST_TAG=1
VERSION=3.3.1
""")
    yaml = dict(env = 'MY_VAR=1')
    assert_equal(get_envs(yaml),
"""MY_VAR=1
""")

Example 150

Project: mne-python Source File: test_tfr.py
def test_time_frequency():
    """Test the to-be-deprecated time-frequency transform (PSD and ITC)."""
    # Set parameters
    event_id = 1
    tmin = -0.2
    tmax = 0.498  # Allows exhaustive decimation testing

    # Setup for reading the raw data
    raw = read_raw_fif(raw_fname)
    events = read_events(event_fname)

    include = []
    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more

    # picks MEG gradiometers
    picks = pick_types(raw.info, meg='grad', eeg=False,
                       stim=False, include=include, exclude=exclude)

    picks = picks[:2]
    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
    data = epochs.get_data()
    times = epochs.times
    nave = len(data)

    epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)

    freqs = np.arange(6, 20, 5)  # define frequencies of interest
    n_cycles = freqs / 4.

    # Test first with a single epoch
    power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    # Now compute evoked
    evoked = epochs.average()
    power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
                              return_itc=False)
    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                            use_fft=True, return_itc=True)
    power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                              use_fft=True, return_itc=True, decim=slice(0, 2))
    # Test picks argument and average parameter
    assert_raises(ValueError, tfr_morlet, epochs, freqs=freqs,
                  n_cycles=n_cycles, return_itc=True, average=False)

    power_picks, itc_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=True, picks=picks, average=True)

    epochs_power_picks = \
        tfr_morlet(epochs_nopicks,
                   freqs=freqs, n_cycles=n_cycles, use_fft=True,
                   return_itc=False, picks=picks, average=False)
    power_picks_avg = epochs_power_picks.average()
    # the actual data arrays here are equivalent, too...
    assert_array_almost_equal(power.data, power_picks.data)
    assert_array_almost_equal(power.data, power_picks_avg.data)
    assert_array_almost_equal(itc.data, itc_picks.data)
    assert_array_almost_equal(power.data, power_evoked.data)

    print(itc)  # test repr
    print(itc.ch_names)  # test property
    itc += power  # test add
    itc -= power  # test sub

    power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')

    assert_true('meg' in power)
    assert_true('grad' in power)
    assert_false('mag' in power)
    assert_false('eeg' in power)

    assert_equal(power.nave, nave)
    assert_equal(itc.nave, nave)
    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(power_.data.shape == (len(picks), len(freqs), 2))
    assert_true(power_.data.shape == itc_.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    # grand average
    itc2 = itc.copy()
    itc2.info['bads'] = [itc2.ch_names[0]]  # test channel drop
    gave = grand_average([itc2, itc])
    assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
                                   itc2.data.shape[1],
                                   itc2.data.shape[2]))
    assert_equal(itc2.ch_names[1:], gave.ch_names)
    assert_equal(gave.nave, 2)
    itc2.drop_channels(itc2.info["bads"])
    assert_array_almost_equal(gave.data, itc2.data)
    itc2.data = np.ones(itc2.data.shape)
    itc.data = np.zeros(itc.data.shape)
    itc2.nave = 2
    itc.nave = 1
    itc.drop_channels([itc.ch_names[0]])
    combined_itc = combine_tfr([itc2, itc])
    assert_array_almost_equal(combined_itc.data,
                              np.ones(combined_itc.data.shape) * 2 / 3)

    # more tests
    power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
                            return_itc=True)

    assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
    assert_true(power.data.shape == itc.data.shape)
    assert_true(np.sum(itc.data >= 1) == 0)
    assert_true(np.sum(itc.data <= 0) == 0)

    tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,
                     return_itc=False).data[0]
    assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
    tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,
                      decim=slice(0, 2), average=False,
                      return_itc=False).data[0]
    assert_true(tfr2.shape == (len(picks), len(freqs), 2))

    single_power = tfr_morlet(epochs, freqs, 2, average=False,
                              return_itc=False).data
    single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),
                               average=False, return_itc=False).data
    single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),
                               average=False, return_itc=False).data
    single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),
                               average=False, return_itc=False).data

    assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
    assert_array_almost_equal(np.mean(single_power2, axis=0),
                              power.data[:, :, :2])
    assert_array_almost_equal(np.mean(single_power3, axis=0),
                              power.data[:, :, 1:3])
    assert_array_almost_equal(np.mean(single_power4, axis=0),
                              power.data[:, :, 2:4])

    power_pick = power.pick_channels(power.ch_names[:10:2])
    assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
    assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
    power_drop = power.drop_channels(power.ch_names[1:10:2])
    assert_equal(power_drop.ch_names, power_pick.ch_names)
    assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))

    mne.equalize_channels([power_pick, power_drop])
    assert_equal(power_pick.ch_names, power_drop.ch_names)
    assert_equal(power_pick.data.shape, power_drop.data.shape)

    # Test decimation:
    # 2: multiple of len(times) even
    # 3: multiple odd
    # 8: not multiple, even
    # 9: not multiple, odd
    for decim in [2, 3, 8, 9]:
        for use_fft in [True, False]:
            power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
                                    use_fft=use_fft, return_itc=True,
                                    decim=decim)
            assert_equal(power.data.shape[2],
                         np.ceil(float(len(times)) / decim))
    freqs = list(range(50, 55))
    decim = 2
    _, n_chan, n_time = data.shape
    tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,
                     return_itc=False).data[0]
    assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))

    # Test cwt modes
    Ws = morlet(512, [10, 20], n_cycles=2)
    assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
    for use_fft in [True, False]:
        for mode in ['same', 'valid', 'full']:
            # XXX JRK: full wavelet decomposition needs to be implemented
            if (not use_fft) and mode == 'full':
                assert_raises(ValueError, cwt, data[0, :, :], Ws,
                              use_fft=use_fft, mode=mode)
                continue
            cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode)

    # Test decim parameter checks
    assert_raises(TypeError, tfr_morlet, epochs, freqs=freqs,
                  n_cycles=n_cycles, use_fft=True, return_itc=True,
                  decim='decim')
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4