pytest.raises

Here are the examples of the python api pytest.raises taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

199 Examples 7

Example 101

View license
    def test_add_node(self):
        """add nodes to the graph, with and without layers"""
        # add node without any additional attributes / layers
        self.docgraph.add_node(1)
        assert len(self.docgraph.node) == 2
        assert self.docgraph.node[1] == {'layers': {'discoursegraph'}}

        # add nodes with additional layer(s)
        self.docgraph.add_node(2, layers={'foo'})
        assert len(self.docgraph.node) == 3
        assert self.docgraph.node[2] == {'layers': {'foo'}}

        self.docgraph.add_node(3, layers={'foo', 'bar'})
        assert len(self.docgraph.node) == 4
        assert self.docgraph.node[3] == {'layers': {'foo', 'bar'}}

        self.docgraph.add_node(4, layers={self.docgraph.ns, 'foo', 'bar'})
        assert len(self.docgraph.node) == 5
        assert self.docgraph.node[4] == \
            {'layers': {'discoursegraph', 'foo', 'bar'}}

        # re-add an already existing node with different layer(s)
        # this will simply add the new layers to the existing set
        self.docgraph.add_node(4, layers={'bla'})
        assert len(self.docgraph.node) == 5
        assert self.docgraph.node[4] == \
            {'layers': {'discoursegraph', 'foo', 'bar', 'bla'}}

        self.docgraph.add_node(4, layers={'xyz', 'abc'})
        assert len(self.docgraph.node) == 5
        assert self.docgraph.node[4] == \
            {'layers': {'discoursegraph', 'foo', 'bar', 'bla', 'xyz', 'abc'}}

        # try to add node with bad layers
        # 'layers' must be a set of str
        with pytest.raises(AssertionError) as excinfo:
            self.docgraph.add_node(666, layers='foo')
        with pytest.raises(AssertionError) as excinfo:
            self.docgraph.add_node(666, layers=['foo'])
        with pytest.raises(AssertionError) as excinfo:
            self.docgraph.add_node(666, layers={23})
        with pytest.raises(AssertionError) as excinfo:
            self.docgraph.add_node(666, layers={'foo', 23})

Example 102

Project: barman
Source File: test_wal_archiver.py
View license
    def test_archive_wal(self, tmpdir, capsys):
        """
        Test WalArchiver.archive_wal behaviour when the WAL file already
        exists in the archive
        """

        # Setup the test environment
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })
        backup_manager.compression_manager.get_compressor.return_value = None
        backup_manager.server.get_backup.return_value = None

        basedir = tmpdir.join('main')
        incoming_dir = basedir.join('incoming')
        archive_dir = basedir.join('wals')
        xlog_db = archive_dir.join('xlog.db')
        wal_name = '000000010000000000000001'
        wal_file = incoming_dir.join(wal_name)
        wal_file.ensure()
        archive_dir.ensure(dir=True)
        xlog_db.ensure()
        backup_manager.server.xlogdb.return_value.__enter__.return_value = (
            xlog_db.open(mode='a'))
        archiver = FileWalArchiver(backup_manager)
        backup_manager.server.archivers = [archiver]

        # Tests a basic archival process
        wal_info = WalFileInfo.from_file(wal_file.strpath)
        archiver.archive_wal(None, wal_info)

        assert not os.path.exists(wal_file.strpath)
        assert os.path.exists(wal_info.fullpath(backup_manager.server))

        # Tests the archiver behaviour for duplicate WAL files, as the
        # wal file named '000000010000000000000001' was already archived
        # in the previous test
        wal_file.ensure()
        wal_info = WalFileInfo.from_file(wal_file.strpath)

        with pytest.raises(MatchingDuplicateWalFile):
            archiver.archive_wal(None, wal_info)

        # Tests the archiver behaviour for duplicated WAL files with
        # different contents
        wal_file.write('test')
        wal_info = WalFileInfo.from_file(wal_file.strpath)

        with pytest.raises(DuplicateWalFile):
            archiver.archive_wal(None, wal_info)

        # Tests the archiver behaviour for duplicate WAL files, as the
        # wal file named '000000010000000000000001' was already archived
        # in the previous test and the input file uses compression
        compressor = PyGZipCompressor(backup_manager.config, 'pygzip')
        compressor.compress(wal_file.strpath, wal_file.strpath)
        wal_info = WalFileInfo.from_file(wal_file.strpath)
        assert os.path.exists(wal_file.strpath)
        backup_manager.compression_manager.get_compressor.return_value = (
            compressor)

        with pytest.raises(MatchingDuplicateWalFile):
            archiver.archive_wal(None, wal_info)

        # Test the archiver behaviour when the incoming file is compressed
        # and it has been already archived and compressed.
        compressor.compress(wal_info.fullpath(backup_manager.server),
                            wal_info.fullpath(backup_manager.server))

        wal_info = WalFileInfo.from_file(wal_file.strpath)
        with pytest.raises(MatchingDuplicateWalFile):
            archiver.archive_wal(None, wal_info)

        # Reset the status of the incoming and WALs directory
        # removing the files archived during the preceding tests.
        os.unlink(wal_info.fullpath(backup_manager.server))
        os.unlink(wal_file.strpath)

        # Test the archival of a WAL file using compression.
        wal_file.write('test')
        wal_info = WalFileInfo.from_file(wal_file.strpath)
        archiver.archive_wal(compressor, wal_info)
        assert os.path.exists(wal_info.fullpath(backup_manager.server))
        assert not os.path.exists(wal_file.strpath)
        assert 'gzip' == identify_compression(
            wal_info.fullpath(backup_manager.server)
        )

Example 103

Project: gnsq
Source File: test_reader.py
View license
def test_basic():
    with pytest.raises(ValueError):
        Reader('test', 'test')

    with pytest.raises(TypeError):
        Reader(
            topic='test',
            channel='test',
            nsqd_tcp_addresses=None,
            lookupd_http_addresses='http://localhost:4161/',
        )

    with pytest.raises(TypeError):
        Reader(
            topic='test',
            channel='test',
            nsqd_tcp_addresses='localhost:4150',
            lookupd_http_addresses=None,
        )

    def message_handler(reader, message):
        pass

    reader = Reader(
        topic='test',
        channel='test',
        name='test',
        max_concurrency=-1,
        nsqd_tcp_addresses='localhost:4150',
        lookupd_http_addresses='http://localhost:4161/',
        message_handler=message_handler
    )

    assert reader.name == 'test'
    assert reader.max_concurrency == multiprocessing.cpu_count()
    assert len(reader.on_message.receivers) == 1

    assert isinstance(reader.nsqd_tcp_addresses, set)
    assert len(reader.nsqd_tcp_addresses) == 1

    assert isinstance(reader.lookupds, list)
    assert len(reader.lookupds) == 1

Example 104

Project: eppy
Source File: test_bunch_subclass.py
View license
def test_EpBunch():
    """py.test for EpBunch"""

    iddfile = StringIO(iddtxt)
    fname = StringIO(idftxt)
    block, data, commdct, idd_index = readidf.readdatacommdct1(fname, 
                iddfile=iddfile)

    # setup code walls - can be generic for any object
    ddtt = data.dt
    dtls = data.dtls
    wall_i = dtls.index('BuildingSurface:Detailed'.upper())
    wallkey = 'BuildingSurface:Detailed'.upper()
    wallidd = commdct[wall_i]

    dwalls = ddtt[wallkey]
    dwall = dwalls[0]


    wallfields = [comm.get('field') for comm in commdct[wall_i]]
    wallfields[0] = ['key']
    wallfields = [field[0] for field in wallfields]
    wall_fields = [bunchhelpers.makefieldname(field) for field in wallfields]
    assert wall_fields[:20] == [
        'key', 'Name', 'Surface_Type',
        'Construction_Name', 'Zone_Name', 'Outside_Boundary_Condition',
        'Outside_Boundary_Condition_Object', 'Sun_Exposure', 'Wind_Exposure',
        'View_Factor_to_Ground', 'Number_of_Vertices', 'Vertex_1_Xcoordinate',
        'Vertex_1_Ycoordinate', 'Vertex_1_Zcoordinate', 'Vertex_2_Xcoordinate',
        'Vertex_2_Ycoordinate', 'Vertex_2_Zcoordinate', 'Vertex_3_Xcoordinate',
        'Vertex_3_Ycoordinate', 'Vertex_3_Zcoordinate']


    bwall = EpBunch(dwall, wall_fields, wallidd)

    # print bwall.Name
    # print data.dt[wallkey][0][1]
    assert bwall.Name == data.dt[wallkey][0][1]
    bwall.Name = 'Gumby'
    # print bwall.Name
    # print data.dt[wallkey][0][1]
    # print
    assert bwall.Name == data.dt[wallkey][0][1]

    # set aliases
    bwall.__aliases = {'Constr':'Construction_Name'}

    # print "wall.Construction_Name = %s" % (bwall.Construction_Name, )
    # print "wall.Constr = %s" % (bwall.Constr, )
    # print
    assert bwall.Construction_Name == bwall.Constr
    # print "change wall.Constr"
    bwall.Constr = 'AnewConstr'
    # print "wall.Constr = %s" % (bwall.Constr, )
    # print "wall.Constr = %s" % (data.dt[wallkey][0][3], )
    # print
    assert bwall.Constr == data.dt[wallkey][0][3]

    # add functions
    bwall.__functions = {'svalues':bunch_subclass.somevalues}
    assert 'svalues' in bwall.__functions

    # print bwall.svalues
    assert bwall.svalues == (
        'Gumby', 'AnewConstr',
        [
            'BuildingSurface:Detailed', 'Gumby', 'Wall', 'AnewConstr',
            'West Zone', 'Outdoors', '', 'SunExposed', 'WindExposed',
            '0.5000000', '4', '0', '0', '3.048000', '0', '0', '0', '6.096000',
            '0', '0', '6.096000', '0', '3.048000'])

    # print bwall.__functions

    # test __getitem__
    assert bwall["Name"] == data.dt[wallkey][0][1]
    # test __setitem__
    newname = "loofah"
    bwall["Name"] = newname
    assert bwall.Name == newname
    assert bwall["Name"] == newname
    assert data.dt[wallkey][0][1] == newname
    # test functions and alias again
    assert bwall.Constr == data.dt[wallkey][0][3]
    assert bwall.svalues == (
        newname, 'AnewConstr',
        [
            'BuildingSurface:Detailed', newname, 'Wall', 'AnewConstr',
            'West Zone', 'Outdoors', '', 'SunExposed', 'WindExposed',
            '0.5000000', '4', '0', '0', '3.048000', '0', '0', '0', '6.096000',
            '0', '0', '6.096000', '0', '3.048000'])
    # test bunch_subclass.BadEPFieldError
    with pytest.raises(bunch_subclass.BadEPFieldError):
        bwall.Name_atypo = "newname"
    with pytest.raises(bunch_subclass.BadEPFieldError):
        thename = bwall.Name_atypo
    with pytest.raises(bunch_subclass.BadEPFieldError):
        bwall["Name_atypo"] = "newname"
    with pytest.raises(bunch_subclass.BadEPFieldError):
        thename = bwall["Name_atypo"]

    # test where constr["obj"] has to be extended
    # more items are added to an extendible field
    constr_i = dtls.index('Construction'.upper())
    constrkey = 'Construction'.upper()
    constridd = commdct[constr_i]
    dconstrs = ddtt[constrkey]
    dconstr = dconstrs[0]
    constrfields = [comm.get('field') for comm in commdct[constr_i]]
    constrfields[0] = ['key']
    constrfields = [field[0] for field in constrfields]
    constr_fields = [bunchhelpers.makefieldname(field) for field in constrfields]
    bconstr = EpBunch(dconstr, constr_fields, constridd)
    assert bconstr.Name == "Dbl Clr 3mm/13mm Air"
    bconstr.Layer_4 = "butter"
    assert bconstr.obj == [
        'Construction', 'Dbl Clr 3mm/13mm Air', 'CLEAR 3MM', 'AIR 13MM',
        'CLEAR 3MM', 'butter']
    bconstr.Layer_7 = "cheese"
    assert bconstr.obj == [
        'Construction', 'Dbl Clr 3mm/13mm Air', 'CLEAR 3MM', 'AIR 13MM',
        'CLEAR 3MM', 'butter', '', '', 'cheese']
    bconstr["Layer_8"] = "jam"
    assert bconstr.obj == [
        'Construction', 'Dbl Clr 3mm/13mm Air', 'CLEAR 3MM', 'AIR 13MM',
        'CLEAR 3MM', 'butter', '', '', 'cheese', 'jam']

    # retrieve a valid field that has no value
    assert bconstr.Layer_10 == ''
    assert bconstr["Layer_10"] == ''

Example 105

Project: odl
Source File: grid_test.py
View license
def test_regulargrid_getitem():
    minpt = (0.75, 0, -5, 4)
    maxpt = (1.25, 0, 1, 13)
    shape = (2, 1, 5, 4)

    grid = RegularGrid(minpt, maxpt, shape)

    # Single indices yield points as an array
    indices = [1, 0, 1, 1]
    values = [vec[i] for i, vec in zip(indices, grid.coord_vectors)]
    assert all_equal(grid[1, 0, 1, 1], values)

    indices = [0, 0, 4, 3]
    values = [vec[i] for i, vec in zip(indices, grid.coord_vectors)]
    assert all_equal(grid[0, 0, 4, 3], values)

    with pytest.raises(IndexError):
        grid[1, 0, 1, 2, 0]

    with pytest.raises(IndexError):
        grid[1, 1, 6, 2]

    with pytest.raises(IndexError):
        grid[1, 0, 4, 6]

    # Slices return new RegularGrid's
    assert grid == grid[...]

    # Use TensorGrid implementation as reference here
    tensor_grid = TensorGrid(*grid.coord_vectors)

    test_slice = np.s_[1, :, ::2, ::3]
    assert all_equal(grid[test_slice].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[1:2, :, ::2, ::3].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[1:2, :, ::2, ..., ::3].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)

    test_slice = np.s_[0:1, :, :, 2:4]
    assert all_equal(grid[test_slice].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[:1, :, :, 2:].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[:-1, ..., 2:].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)

    test_slice = np.s_[:, 0, :, :]
    assert all_equal(grid[test_slice].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[:, 0, ...].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[0:2, :, ...].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[...].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)

    test_slice = np.s_[:, :, 0::2, :]
    assert all_equal(grid[test_slice].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[..., 0::2, :].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)

    test_slice = np.s_[..., 1, :]
    assert all_equal(grid[test_slice].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)
    assert all_equal(grid[:, :, 1, :].coord_vectors,
                     tensor_grid[test_slice].coord_vectors)

    # Fewer indices
    assert grid[1:] == grid[1:, :, :, :]
    assert grid[1:, 0] == grid[1:, 0, :, :]
    assert grid[1:, 0, :-1] == grid[1:, 0, :-1, :]

    # Two ellipses not allowed
    with pytest.raises(ValueError):
        grid[1, ..., ..., 0]

    # Too many axes
    with pytest.raises(IndexError):
        grid[1, 0, 1:2, 0, :]

    # New axes not supported
    with pytest.raises(ValueError):
        grid[1, 0, None, 1, 0]

    # Empty axes not allowed
    with pytest.raises(ValueError):
        grid[1, 0, 0:0, 1]
    with pytest.raises(ValueError):
        grid[1, 1:, 0, 1]

    # One-dimensional grid
    grid = RegularGrid(1, 5, 5)
    assert grid == grid[...]

    sub_grid = RegularGrid(1, 5, 3)
    assert grid[::2], sub_grid

Example 106

Project: instaseis
Source File: test_receiver.py
View license
def test_error_handling_when_parsing_station_files(tmpdir):
    """
    Tests error handling when parsing station files.
    """
    # Differing coordinates for channels of the same station.
    inv = obspy.read_inventory()
    with pytest.raises(ReceiverParseError) as err:
        inv[0][0][0].latitude -= 10
        Receiver.parse(inv)
    assert err.value.args[0] == ("The coordinates of the channels of station "
                                 "'GR.FUR' are not identical.")

    # Once again, with a file.
    with io.BytesIO() as buf:
        inv.write(buf, format="stationxml")
        buf.seek(0)
        with pytest.raises(ReceiverParseError) as err:
            Receiver.parse(buf)
    assert err.value.args[0] == ("The coordinates of the channels of station "
                                 "'GR.FUR' are not identical.")

    # ObsPy Trace without a sac attribute.
    with pytest.raises(ReceiverParseError) as err:
        Receiver.parse(obspy.read())
    assert err.value.args[0] == ("ObsPy Trace must have an sac attribute.")

    # Trigger error when a SEED files has differing origins.
    filename = os.path.join(DATA, "dataless.seed.BW_FURT")
    p = obspy.io.xseed.parser.Parser(filename)
    p.blockettes[52][1].latitude += 1
    with pytest.raises(ReceiverParseError) as err:
        Receiver.parse(p)
    assert err.value.args[0] == ("The coordinates of the channels of station "
                                 "'BW.FURT' are not identical")

    # Same thing but this time with a file.
    tmpfile = os.path.join(tmpdir.strpath, "temp.seed")
    p.write_seed(tmpfile)
    with pytest.raises(ReceiverParseError) as err:
        Receiver.parse(tmpfile)
    assert err.value.args[0] == ("The coordinates of the channels of station "
                                 "'BW.FURT' are not identical")

    # Parsing random string.
    with pytest.raises(ValueError) as err:
        Receiver.parse("random_string")
    assert err.value.args[0] == "'random_string' could not be parsed."

Example 107

Project: QNET
Source File: test_qsd_codegen.py
View license
def test_qsd_codegen_observables(caplog, slh_Sec6, slh_Sec6_vals):
    A2 = Destroy(hs1)
    Sp = LocalSigma(hs2, 1, 0)
    Sm = Sp.dag()
    codegen = QSDCodeGen(circuit=slh_Sec6, num_vals=slh_Sec6_vals)

    with pytest.raises(QSDCodeGenError) as excinfo:
        scode = codegen._observables_lines(indent=0)
    assert "Must register at least one observable" in str(excinfo.value)

    name = 'a_1 sigma_10^[2]'
    codegen.add_observable(Sp*A2*Sm*Sp, name=name)
    filename = codegen._observables[name][1]
    assert filename == 'a_1_sigma_10_2.out'
    codegen.add_observable(Sp*A2*Sm*Sp, name=name)
    assert 'Overwriting existing operator' in caplog.text()

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(Sp*A2*A2*Sm*Sp, name="xxxx"*20)
    assert "longer than limit" in str(exc_info.value)
    name = 'A2^2'
    codegen.add_observable(Sp*A2*A2*Sm*Sp, name=name)
    assert name in codegen._observables
    filename = codegen._observables[name][1]
    assert filename == 'A2_2.out'

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name='A2_2')
    assert "Cannot generate unique filename" in str(exc_info.value)

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name="A2\t2")
    assert "invalid characters" in str(exc_info.value)

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name="A"*100)
    assert "longer than limit" in str(exc_info.value)

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name="()")
    assert "Cannot generate filename" in str(exc_info.value)

    codegen = QSDCodeGen(circuit=slh_Sec6, num_vals=slh_Sec6_vals)
    codegen.add_observable(Sp*A2*Sm*Sp, name="X1")
    codegen.add_observable(Sm*Sp*A2*Sm, name="X2")
    assert codegen._observables["X2"] == (Sm*Sp*A2*Sm, 'X2.out')
    codegen.add_observable(A2, name="A2")
    assert codegen._observables["A2"] == (A2, 'A2.out')
    scode = codegen._observables_lines(indent=0)
    assert dedent(scode).strip() == dedent(r'''
    const int nOfOut = 3;
    Operator outlist[nOfOut] = {
      (A1 * S2_1_0),
      (A1 * S2_0_1),
      A1
    };
    char *flist[nOfOut] = {"X1.out", "X2.out", "A2.out"};
    int pipe[4] = {1,2,3,4};
    ''').strip()
    # Note how the observables have been simplified
    assert Sp*A2*Sm*Sp == Sp*A2
    assert codegen._operator_str(Sp*A2) == '(A1 * S2_1_0)'
    assert Sm*Sp*A2*Sm == Sm*A2
    assert codegen._operator_str(Sm*A2) == '(A1 * S2_0_1)'
    # If the oberservables introduce new operators or symbols, these should
    # extend the existing ones
    P1 = LocalSigma(hs2, 1, 1)
    zeta = symbols("zeta", real=True)
    codegen.add_observable(zeta*P1, name="P1")
    assert P1 in codegen._local_ops
    assert str(codegen._qsd_ops[P1]) == 'S2_1_1'
    assert zeta in codegen.syms
    codegen.num_vals.update({zeta: 1.0})
    assert 'zeta' in codegen._parameters_lines(indent=0)
    assert str(codegen._qsd_ops[P1]) in codegen._operator_basis_lines(indent=0)
    assert Sp*A2 in set(codegen.observables)
    assert Sm*A2 in set(codegen.observables)
    assert zeta*P1 in set(codegen.observables)
    assert list(codegen.observable_names) == ['X1', 'X2', 'A2', 'P1']
    assert codegen.get_observable('X1') == Sp*A2*Sm*Sp

Example 108

Project: flask-resty
Source File: test_testing.py
View license
def test_objects():
    complex_object = {
        'a': 1,
        'b': [1, 2, 3],
        'c': [{}, {'a': 1}],
        'd': {
            'a': 1,
            'b': [],
        },
    }

    assert_value(complex_object, complex_object)

    assert_value(complex_object, {})

    assert_value(complex_object, {'a': 1})

    assert_value(complex_object, {
        'b': [1, 2, 3],
        'c': [{}, {}],
    })

    assert_value(complex_object, {
        'd': {
            'a': 1,
        },
    })

    with pytest.raises(AssertionError):
        assert_value(complex_object, [])

    with pytest.raises(AssertionError):
        assert_value(complex_object, None)

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'b': [1, 2],
        })

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'a': 1,
            'foo': 1,
        })

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'c': [{}, {'b': 2}],
        })

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'b': [1, 2, 3, 4],
        })

Example 109

Project: QNET
Source File: test_qsd_codegen.py
View license
def test_qsd_codegen_traj(slh_Sec6):
    A2 = Destroy(hs1)
    Sp = LocalSigma(hs2, 1, 0)
    Sm = Sp.dag()
    codegen = QSDCodeGen(circuit=slh_Sec6)
    codegen.add_observable(Sp*A2*Sm*Sp, name="X1")
    codegen.add_observable(Sm*Sp*A2*Sm, name="X2")
    codegen.add_observable(A2, name="A2")

    with pytest.raises(QSDCodeGenError) as excinfo:
        scode = codegen._trajectory_lines(indent=0)
    assert "No trajectories set up"  in str(excinfo.value)

    codegen.set_trajectories(psi_initial=None, stepper='AdaptiveStep', dt=0.01,
            nt_plot_step=100, n_plot_steps=5, n_trajectories=1,
            traj_save=10)
    scode = codegen._trajectory_lines(indent=0)
    assert dedent(scode).strip() == dedent(r'''
    ACG gen(rndSeed); // random number generator
    ComplexNormal rndm(&gen); // Complex Gaussian random numbers

    double dt = 0.01;
    int dtsperStep = 100;
    int nOfSteps = 5;
    int nTrajSave = 10;
    int nTrajectory = 1;
    int ReadFile = 0;

    AdaptiveStep stepper(psiIni, H, nL, L);
    Trajectory traj(psiIni, dt, stepper, &rndm);

    traj.sumExp(nOfOut, outlist, flist , dtsperStep, nOfSteps,
                nTrajectory, nTrajSave, ReadFile);
    ''').strip()

    with pytest.raises(ValueError) as excinfo:
        codegen.set_moving_basis(move_dofs=0, delta=0.01, width=2,
                                 move_eps=0.01)
    assert "move_dofs must be an integer >0" in str(excinfo.value)
    with pytest.raises(ValueError) as excinfo:
        codegen.set_moving_basis(move_dofs=4, delta=0.01, width=2,
                                 move_eps=0.01)
    assert "move_dofs must not be larger" in str(excinfo.value)
    with pytest.raises(QSDCodeGenError) as excinfo:
        codegen.set_moving_basis(move_dofs=3, delta=0.01, width=2,
                                 move_eps=0.01)
    assert "A moving basis cannot be used" in str(excinfo.value)
    codegen.set_moving_basis(move_dofs=2, delta=0.01, width=2, move_eps=0.01)
    scode = codegen._trajectory_lines(indent=0)
    assert dedent(scode).strip() == dedent(r'''
    ACG gen(rndSeed); // random number generator
    ComplexNormal rndm(&gen); // Complex Gaussian random numbers

    double dt = 0.01;
    int dtsperStep = 100;
    int nOfSteps = 5;
    int nTrajSave = 10;
    int nTrajectory = 1;
    int ReadFile = 0;

    AdaptiveStep stepper(psiIni, H, nL, L);
    Trajectory traj(psiIni, dt, stepper, &rndm);

    int move = 2;
    double delta = 0.01;
    int width = 2;
    double moveEps = 0.01;

    traj.sumExp(nOfOut, outlist, flist , dtsperStep, nOfSteps,
                nTrajectory, nTrajSave, ReadFile, move,
                delta, width, moveEps);
    ''').strip()

Example 110

Project: reikna
Source File: test_transformation.py
View license
def test_wrong_data_path():
    """
    Check that the error is thrown if the connector is a part of the signature,
    but this particular data path (input or output) is already hidden
    by a previously connected transformation.
    """

    N = 200
    coeff_dtype = numpy.float32
    arr_type = Type(numpy.complex64, (N, N))

    d = DummyAdvanced(arr_type, coeff_dtype)
    identity = tr_identity(d.parameter.C)

    d.parameter.C.connect(identity, identity.o1, C_in=identity.i1)
    d.parameter.D.connect(identity, identity.i1, D_out=identity.o1)
    assert list(d.signature.parameters.values()) == [
        Parameter('C', Annotation(arr_type, 'o')),
        Parameter('C_in', Annotation(arr_type, 'i')),
        Parameter('D_out', Annotation(arr_type, 'o')),
        Parameter('D', Annotation(arr_type, 'i')),
        Parameter('coeff1', Annotation(coeff_dtype)),
        Parameter('coeff2', Annotation(coeff_dtype))]

    # Now input to C is hidden by the previously connected transformation
    with pytest.raises(ValueError):
        d.parameter.C.connect(identity, identity.o1, C_in_prime=identity.i1)

    # Same goes for D
    with pytest.raises(ValueError):
        d.parameter.D.connect(identity, identity.i1, D_out_prime=identity.o1)

    # Also we cannot make one of the transformation outputs an existing output parameter
    with pytest.raises(ValueError):
        d.parameter.C.connect(identity, identity.i1, D_out=identity.o1)

    # Output of C is still available though
    d.parameter.C.connect(identity, identity.i1, C_out=identity.o1)
    assert list(d.signature.parameters.values()) == [
        Parameter('C_out', Annotation(arr_type, 'o')),
        Parameter('C_in', Annotation(arr_type, 'i')),
        Parameter('D_out', Annotation(arr_type, 'o')),
        Parameter('D', Annotation(arr_type, 'i')),
        Parameter('coeff1', Annotation(coeff_dtype)),
        Parameter('coeff2', Annotation(coeff_dtype))]

Example 111

Project: overloading.py
Source File: test_overloading.py
View license
@requires_typing
def test_typing_tuple():

    @overloaded
    def f(arg: Tuple[int, str]):
        return int, str

    assert f.__complex_positions == {}
    assert f.__complex_parameters == {}

    @overloads(f)
    def f(arg: Tuple[str, int]):
        return str, int

    assert f.__complex_positions == {0: 8}
    assert f.__complex_parameters == {'arg': 8}

    for _ in range(rounds):
        assert f((1, b)) == (int, str)
        assert f((a, 2)) == (str, int)
        with pytest.raises(TypeError):
            f((1, 2))
        with pytest.raises(TypeError):
            f(())

    @overloads(f)
    def f(arg: Tuple):
        return ()

    for _ in range(rounds):
        assert f((1, b)) == (int, str)
        assert f((a, 2)) == (str, int)
        assert f((1, 2)) == ()
        assert f(())     == ()

    @overloaded
    def f(arg: Tuple[int, ...]):
        return int

    @overloads(f)
    def f(arg: Tuple[str, ...]):
        return str

    for _ in range(rounds):
        assert f((1, 2, 3)) == int
        assert f((a, b, c)) == str
        with pytest.raises(TypeError):
            f((x, 2, 3))
        if overloading.DEBUG:
            with pytest.raises(AssertionError):
                f(())

    @overloads(f)
    def f(arg: Tuple):
        return ()

    for _ in range(rounds):
        assert f((1, 2, 3)) == int
        assert f((a, b, c)) == str
        assert f((x, 2, 3)) == ()
        if overloading.DEBUG:
            with pytest.raises(AssertionError):
                f(())

Example 112

Project: crosscat
Source File: test_pred_prob.py
View license
def test_predictive_probability_unobserved(seed=0):
    # This function tests the predictive probability for the joint distirbution.
    # Throughout, we will check that the result is the same for the joint and
    # simple calls.
    T, M_r, M_c, X_L, X_D, engine = quick_le(seed)

    # Hypothetical column number should throw an error.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 10, 2)]
    Y = []
    with pytest.raises(ValueError):
        vals = engine.predictive_probability(M_c, X_L, X_D, Y, Q)

    # Inconsistent row numbers should throw an error.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS-1, 10, 2)]
    Y = []
    with pytest.raises(ValueError):
        vals = engine.predictive_probability(M_c, X_L, X_D, Y, Q)

    # Duplicate column numbers should throw an error,
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 1, 2)]
    Y = []
    with pytest.raises(ValueError):
        val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)

    # Different row numbers should throw an error.
    Q = [(N_ROWS, 0, 1.5), (N_ROWS+1, 1, 2)]
    Y = [(N_ROWS, 1, 1.5), (N_ROWS, 2, 3)]
    with pytest.raises(Exception):
        val = engine.predictive_probability(M_c, X_L, X_D, Y, Q[0])

    # Inconsistent with constraints should be negative infinity.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 0, 1.3)]
    Y = [(N_ROWS, 1, 1.6)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val == -float('inf')
    assert isinstance(val, float)

    # Consistent with constraints should be log(1) == 0.
    Q = [(N_ROWS, 0, 1.3)]
    Y = [(N_ROWS, 0, 1.3)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val == 0

    # Consistent with constraints should not impact other queries.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 0, 1.3)]
    Y = [(N_ROWS, 1, 1.5), (N_ROWS, 2, 3)]
    val_0 = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    val_1 = engine.predictive_probability(M_c, X_L, X_D, Y, Q[1:])
    assert val_0 == val_1

    # Predictive and simple should be the same in univariate case (cont).
    Q = [(N_ROWS, 0, 0.5)]
    Y = [(0, 0, 1), (N_ROWS//2, 4, 5), (N_ROWS, 1, 0.5), (N_ROWS+1, 0, 1.2)]
    val_0 = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    val_1 = engine.simple_predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val_0 == val_1

    # Predictive and simple should be the same in univariate case (disc).
    Q = [(N_ROWS, 2, 1)]
    Y = [(0, 0, 1), (N_ROWS//2, 4, 5), (N_ROWS, 1, 0.5), (N_ROWS+1, 0, 1.2)]
    val_0 = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    val_1 = engine.simple_predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val_0 == val_1

    # Do some full joint queries, all on the same row.
    Q = [(N_ROWS, 3, 4), (N_ROWS, 4, 1.3)]
    Y = [(N_ROWS, 0, 1), (N_ROWS, 1, -0.7), (N_ROWS, 2, 3)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert isinstance(val, float)

    Q = [(N_ROWS, 0, 1), (N_ROWS, 1, -0.7), (N_ROWS, 2, 3)]
    Y = [(N_ROWS, 3, 4), (N_ROWS, 4, 1.3)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert isinstance(val, float)

Example 113

Project: arctic
Source File: test_version_store.py
View license
def test_prunes_previous_version_append_interaction(library):
    ts = ts1
    ts2 = ts1.append(pd.DataFrame(index=[ts.index[-1] + dtd(days=1),
                                         ts.index[-1] + dtd(days=2), ],
                                  data=[3.7, 3.8],
                                  columns=['near']))
    ts2.index.name = ts1.index.name
    ts3 = ts.append(pd.DataFrame(index=[ts2.index[-1] + dtd(days=1),
                                        ts2.index[-1] + dtd(days=2)],
                                 data=[4.8, 4.9],
                                 columns=['near']))
    ts3.index.name = ts1.index.name
    ts4 = ts
    ts5 = ts2
    ts6 = ts3
    now = dt.utcnow()
    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=130)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=129)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts2, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=128)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts3, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=127)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts4, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol, as_of=3).data)
    assert_frame_equal(ts4, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=126)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts5, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol, as_of=3).data)
    assert_frame_equal(ts4, library.read(symbol, as_of=4).data)
    assert_frame_equal(ts5, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts6, prune_previous_version=True)

    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=1)
    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=2)
    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=3)
    assert_frame_equal(ts5, library.read(symbol, as_of=5).data)
    assert_frame_equal(ts6, library.read(symbol).data)

Example 114

Project: setuptools
Source File: test_resources.py
View license
    def testResolve(self):
        ad = pkg_resources.Environment([])
        ws = WorkingSet([])
        # Resolving no requirements -> nothing to install
        assert list(ws.resolve([], ad)) == []
        # Request something not in the collection -> DistributionNotFound
        with pytest.raises(pkg_resources.DistributionNotFound):
            ws.resolve(parse_requirements("Foo"), ad)

        Foo = Distribution.from_filename(
            "/foo_dir/Foo-1.2.egg",
            metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
        )
        ad.add(Foo)
        ad.add(Distribution.from_filename("Foo-0.9.egg"))

        # Request thing(s) that are available -> list to activate
        for i in range(3):
            targets = list(ws.resolve(parse_requirements("Foo"), ad))
            assert targets == [Foo]
            list(map(ws.add, targets))
        with pytest.raises(VersionConflict):
            ws.resolve(parse_requirements("Foo==0.9"), ad)
        ws = WorkingSet([])  # reset

        # Request an extra that causes an unresolved dependency for "Baz"
        with pytest.raises(pkg_resources.DistributionNotFound):
            ws.resolve(parse_requirements("Foo[bar]"), ad)
        Baz = Distribution.from_filename(
            "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
        )
        ad.add(Baz)

        # Activation list now includes resolved dependency
        assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) == [Foo, Baz]
        # Requests for conflicting versions produce VersionConflict
        with pytest.raises(VersionConflict) as vc:
            ws.resolve(parse_requirements("Foo==1.2\nFoo!=1.2"), ad)

        msg = 'Foo 0.9 is installed but Foo==1.2 is required'
        assert vc.value.report() == msg

Example 115

Project: mongokat
Source File: test_api.py
View license
def test_document_common_methods(Sample):

  from bson import ObjectId
  import collections

  assert Sample.collection.find().count() == 0

  # Instanciate
  new_object = Sample({"name": "XXX", "url": "http://example.com"})

  # Should not save to DB yet.
  assert Sample.collection.find().count() == 0

  # Now save()
  new_object.save()

  # Once the object is in DB, we can't do it anymore.
  with pytest.raises(Exception):
    new_object.save()

  assert type(new_object["_id"]) == ObjectId

  assert Sample.collection.find().count() == 1
  db_object = Sample.collection.find_one()
  assert type(db_object) == dict
  assert db_object["name"] == "XXX"

  # test insert()
  inserted_object = Sample.insert({"name": "ZZZ", "url": "http://example2.com", "stats": {"nb_of_products": 2}})
  assert type(inserted_object) == ObjectId

  assert Sample.collection.find().count() == 2

  # Find back with different methods
  orm_object = Sample.find_by_id(db_object["_id"])
  assert orm_object["name"] == "XXX"
  orm_object = Sample.find_by_id(str(db_object["_id"]))
  assert orm_object["name"] == "XXX"
  orm_object = Sample.find_by_id({"_id": db_object["_id"]})
  assert orm_object["name"] == "XXX"
  orm_object = Sample.find_by_id({"_id": str(db_object["_id"])})
  assert orm_object["name"] == "XXX"
  assert isinstance(orm_object, sample_models.SampleDocument)

  # exists()
  assert Sample.exists({"name": "XXX"})

  # Other find styles
  cursor = Sample.find({"name": "XXX"})
  assert "cursor" in str(type(cursor)).lower()
  orm_objects = list(cursor)
  assert len(orm_objects) == 1
  assert isinstance(orm_objects[0], sample_models.SampleDocument)
  assert orm_objects[0]["name"] == "XXX"

  orm_object = Sample.find_one({"_id": db_object["_id"]})
  assert orm_object["name"] == "XXX"
  assert isinstance(orm_object, sample_models.SampleDocument)

  # TODO - should that not work?
  orm_object = Sample.find_one({"_id": str(db_object["_id"])})
  assert orm_object is None

  col_cursor = Sample.iter_column({"name": "XXX"})
  assert isinstance(col_cursor, collections.Iterable)
  assert list(col_cursor) == [new_object["_id"]]

  col = Sample.list_column({"name": "XXX"}, field="name")
  assert col == ["XXX"]
  col = Sample.list_column({"name": "ZZZ"}, field="stats.nb_of_products")
  assert col == [2]

  with pytest.raises(KeyError):
    Sample.list_column({"name": "ZZZ"}, field="inexistent_field")

  # We should be able to fetch & save partial objects.
  orm_object = Sample.find_by_id(db_object["_id"], fields=["url"])
  assert list(dict(orm_object).keys()) == ["url"]
  assert dict(orm_object)["url"] == "http://example.com"

  # If we save() that, it will create a new object because we lack an _id :(
  with pytest.raises(Exception):
    orm_object.save()

  assert Sample.collection.find().count() == 2

  # FIXME not anymore as we are requesting _id for each query
  # orm_object.save(force=True)

  # assert Sample.collection.find().count() == 3

  orm_object = Sample.find_by_id(db_object["_id"], fields=["url", "_id"])
  assert dict(orm_object) == {"url": "http://example.com", "_id": db_object["_id"]}

  # Change the data a bit and save.
  # This would remove "name" from the doc.
  orm_object["url"] = "http://other.example.com"

  # Not authorized!
  with pytest.raises(Exception):
    orm_object.save()

  assert Sample.collection.find().count() == 2
  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert "name" in db_object

  orm_object.save(force=True)

  # Should not add anything new
  assert Sample.collection.find().count() == 2

  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert "name" not in db_object
  assert db_object["url"] == "http://other.example.com"

  orm_object = Sample.find_by_id(db_object["_id"], fields=["_id"])
  orm_object["name"] = "YYY"

  # This one should not overwrite unset fields.
  orm_object.save_partial()

  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert db_object["name"] == "YYY"
  assert db_object["url"] == "http://other.example.com"

  # Test the reload() method by changing the data from somewhere else
  Sample.collection.update({"_id": db_object["_id"]}, {"$set": {"name": "AAA"}})

  assert orm_object["name"] == "YYY"

  orm_object.reload()

  assert orm_object["name"] == "AAA"

  # Test .update() - local dict update()
  orm_object.update({"name": "BBB"})

  assert orm_object["name"] == "BBB"

  # Should not have changed the DB
  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert db_object["name"] == "AAA"

Example 116

Project: datashape
Source File: test_testing.py
View license
def test_record():
    assert_dshape_equal(
        R['a': int32, 'b': float32],
        R['a': int32, 'b': float32],
    )

    with pytest.raises(AssertionError) as e:
        assert_dshape_equal(
            R['a': int32, 'b': float32],
            R['a': int32, 'b': int32],
        )
    assert "'float32' != 'int32'" in str(e)
    assert "_['b'].name" in str(e.value)

    with pytest.raises(AssertionError) as e:
        assert_dshape_equal(
            R['a': int32, 'b': float32],
            R['a': int32, 'c': float32],
        )
    assert "'b' != 'c'" in str(e.value)

    with pytest.raises(AssertionError) as e:
        assert_dshape_equal(
            R['b': float32, 'a': float32],
            R['a': int32, 'b': float32],
            check_record_order=False,
        )
    assert "'float32' != 'int32'" in str(e.value)
    assert "_['a']" in str(e.value)

    assert_dshape_equal(
        R['b': float32, 'a': int32],
        R['a': int32, 'b': float32],
        check_record_order=False,
    )

    # check a nested record with and without ordering
    assert_dshape_equal(
        R['a': R['b': float32, 'a': int32]],
        R['a': R['a': int32, 'b': float32]],
        check_record_order=False,
    )

    with pytest.raises(AssertionError) as e:
        assert_dshape_equal(
            R['a': R['a': int32, 'b': float32]],
            R['a': R['b': float32, 'a': int32]],
        )

    assert "'a' != 'b'" in str(e.value)
    assert "_['a']" in str(e.value)

Example 117

Project: flask-restaction
Source File: test_api.py
View license
def test_authorize(tmpdir):
    metafile = tmpdir.join("meta.json")
    json.dump({
        "$roles": {
            "admin": {
                "hello": ["get", "post"]
            },
            "guest": {
                "hello": ["post"]
            }
        }
    }, metafile.open("w"))

    app = Flask(__name__)
    api = Api(app, metafile=metafile.strpath)

    class Hello:

        def get(self):
            pass

        def post(self):
            pass
    api.add_resource(Hello)

    with app.test_request_context("/hello", method="GET"):
        api.authorize("admin")
    with app.test_request_context("/hello", method="POST"):
        api.authorize("admin")
    with app.test_request_context("/hello", method="PUT"):
        with pytest.raises(ValueError):
            api.authorize("admin")
    with app.test_request_context("/hello/world", method="GET"):
        with pytest.raises(ValueError):
            api.authorize("admin")
    with app.test_request_context("/helloworld", method="POST"):
        with pytest.raises(ValueError):
            api.authorize("admin")
    with app.test_request_context("/helo", method="PUT"):
        with pytest.raises(ValueError):
            api.authorize("admin")
    with app.test_request_context("/hello", method="GET"):
        with pytest.raises(Forbidden):
            api.authorize("guest")
    with app.test_request_context("/hello", method="POST"):
        api.authorize("guest")
    with app.test_request_context("/hello", method="PUT"):
        with pytest.raises(ValueError):
            api.authorize("guest")

Example 118

Project: bolt
Source File: generic.py
View license
def map_suite(arr, b):
    """
    A set of tests for the map operator

    Parameters
    ----------
    arr: `ndarray`
        A 2D array used in the construction of `b` (used to check results)
    b: `BoltArray`
        The BoltArray to be used for testing
    """

    from numpy import ones
    import random
    random.seed(42)

    # a simple map should be equivalent to an element-wise multiplication (without axis specified)
    func1 = lambda x: x * 2
    mapped = b.map(func1)
    res = mapped.toarray()
    assert allclose(res, arr * 2)

    # a simple map should be equivalent to an element-wise multiplication (with axis specified)
    mapped = b.map(func1, axis=0)
    res = mapped.toarray()
    assert allclose(res, arr * 2)

    mapped = b.map(func1, axis=1)
    res = mapped.toarray()
    assert allclose(res, (arr * 2).transpose(1, 0, 2))

    mapped = b.map(func1, axis=(1, 2))
    res = mapped.toarray()
    assert allclose(res, (arr * 2).transpose(1, 2, 0))

    mapped = b.map(func1, axis=(0, 2))
    res = mapped.toarray()
    assert allclose(res, (arr * 2).transpose(0, 2, 1))

    mapped = b.map(func1, axis=2)
    res = mapped.toarray()
    assert allclose(res, (arr * 2).transpose(2, 0, 1))

    # more complicated maps can reshape elements so long as they do so consistently
    func2 = lambda x: ones(10)
    mapped = b.map(func2, axis=0)
    res = mapped.toarray()
    assert res.shape == (arr.shape[0], 10)

    # but the shape of the result will change if mapped over different axes
    mapped = b.map(func2, axis=(0, 1))
    res = mapped.toarray()
    assert res.shape == (arr.shape[0], arr.shape[1], 10)

    # if a map is not applied uniformly, it should produce an error
    with pytest.raises(Exception):
        def nonuniform_map(x):
            random.seed(x.tostring())
            return random.random()
        func3 = lambda x: ones(10) if nonuniform_map(x) < 0.5 else ones(5)
        mapped = b.map(func3)
        res = mapped.toarray()

    # check that changes in dtype are correctly handled
    if b.mode == 'spark':
        func3 = lambda x: x.astype('float32')
        mapped = b.map(func3, axis=0)
        assert mapped.dtype == dtype('float32')
        mapped = b.map(func3, axis=0, dtype=dtype('float32'))
        assert mapped.dtype == dtype('float32')

Example 119

Project: attrs
Source File: test_slots.py
View license
def test_bare_inheritance_from_slots():
    """
    Inheriting from a bare attr slot class works.
    """
    @attr.s(init=False, cmp=False, hash=False, repr=False, slots=True)
    class C1BareSlots(object):
        x = attr.ib(validator=attr.validators.instance_of(int))
        y = attr.ib()

        def method(self):
            return self.x

        @classmethod
        def classmethod(cls):
            return "clsmethod"

        @staticmethod
        def staticmethod():
            return "staticmethod"

    @attr.s(init=False, cmp=False, hash=False, repr=False)
    class C1Bare(object):
        x = attr.ib(validator=attr.validators.instance_of(int))
        y = attr.ib()

        def method(self):
            return self.x

        @classmethod
        def classmethod(cls):
            return "clsmethod"

        @staticmethod
        def staticmethod():
            return "staticmethod"

    @attr.s(slots=True)
    class C2Slots(C1BareSlots):
        z = attr.ib()

    @attr.s(slots=True)
    class C2(C1Bare):
        z = attr.ib()

    c2 = C2Slots(x=1, y=2, z="test")
    assert 1 == c2.x
    assert 2 == c2.y
    assert "test" == c2.z

    assert 1 == c2.method()
    assert "clsmethod" == c2.classmethod()
    assert "staticmethod" == c2.staticmethod()

    with pytest.raises(AttributeError):
        c2.t = "test"

    non_slot_instance = C2(x=1, y=2, z="test")
    if has_pympler:
        assert asizeof(c2) < asizeof(non_slot_instance)

    c3 = C2Slots(x=1, y=3, z="test")
    assert c3 > c2
    c2_ = C2Slots(x=1, y=2, z="test")
    assert c2 == c2_

    assert "C2Slots(x=1, y=2, z='test')" == repr(c2)

    hash(c2)  # Just to assert it doesn't raise.

    assert {"x": 1, "y": 2, "z": "test"} == attr.asdict(c2)

Example 120

Project: cryptography
Source File: test_dsa.py
View license
    def test_invalid_dsa_private_key_arguments(self, backend):
        # Test a p < 1024 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=2 ** 1000,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=DSA_KEY_1024.x
            ).private_key(backend)

        # Test a p < 2048 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=2 ** 2000,
                        q=DSA_KEY_2048.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_2048.public_numbers.y
                ),
                x=DSA_KEY_2048.x,
            ).private_key(backend)

        # Test a p < 3072 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=2 ** 3000,
                        q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_3072.public_numbers.y
                ),
                x=DSA_KEY_3072.x,
            ).private_key(backend)

        # Test a p > 3072 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=2 ** 3100,
                        q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_3072.public_numbers.y
                ),
                x=DSA_KEY_3072.x,
            ).private_key(backend)

        # Test a q < 160 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=2 ** 150,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=DSA_KEY_1024.x,
            ).private_key(backend)

        # Test a q < 256 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_2048.public_numbers.parameter_numbers.p,
                        q=2 ** 250,
                        g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_2048.public_numbers.y
                ),
                x=DSA_KEY_2048.x,
            ).private_key(backend)

        # Test a q > 256 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_3072.public_numbers.parameter_numbers.p,
                        q=2 ** 260,
                        g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_3072.public_numbers.y
                ),
                x=DSA_KEY_3072.x,
            ).private_key(backend)

        # Test a g < 1
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=0,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=DSA_KEY_1024.x,
            ).private_key(backend)

        # Test a g = 1
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=1,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=DSA_KEY_1024.x,
            ).private_key(backend)

        # Test a g > p
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=2 ** 1200,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=DSA_KEY_1024.x,
            ).private_key(backend)

        # Test x = 0
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=0,
            ).private_key(backend)

        # Test x < 0
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=-2,
            ).private_key(backend)

        # Test x = q
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=2 ** 159,
            ).private_key(backend)

        # Test x > q
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=2 ** 200,
            ).private_key(backend)

        # Test y != (g ** x) % p
        with pytest.raises(ValueError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=2 ** 100
                ),
                x=DSA_KEY_1024.x,
            ).private_key(backend)

        # Test a non-integer y value
        with pytest.raises(TypeError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=None
                ),
                x=DSA_KEY_1024.x,
            ).private_key(backend)

        # Test a non-integer x value
        with pytest.raises(TypeError):
            dsa.DSAPrivateNumbers(
                public_numbers=dsa.DSAPublicNumbers(
                    parameter_numbers=dsa.DSAParameterNumbers(
                        p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                        q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                        g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                    ),
                    y=DSA_KEY_1024.public_numbers.y
                ),
                x=None,
            ).private_key(backend)

Example 121

Project: imageio
Source File: test_core.py
View license
def test_functions():
    """ Test the user-facing API functions """
    
    # Test help(), it prints stuff, so we just check whether that goes ok
    imageio.help()  # should print overview
    imageio.help('PNG')  # should print about PNG
    
    fname1 = get_remote_file('images/chelsea.png', test_dir)
    fname2 = fname1[:-3] + 'jpg'
    fname3 = fname1[:-3] + 'notavalidext'
    open(fname3, 'wb')
    
    # Test read()
    R1 = imageio.read(fname1)
    R2 = imageio.read(fname1, 'png')
    assert R1.format is R2.format
    # Fail
    raises(ValueError, imageio.read, fname3)  # existing but not readable
    raises(IOError, imageio.read, 'notexisting.barf')
    raises(IndexError, imageio.read, fname1, 'notexistingformat')
    
    # Test save()
    W1 = imageio.save(fname2)
    W2 = imageio.save(fname2, 'JPG')
    assert W1.format is W2.format
    # Fail
    raises(IOError, imageio.save, '~/dirdoesnotexist/wtf.notexistingfile')
    
    # Test imread()
    im1 = imageio.imread(fname1)
    im2 = imageio.imread(fname1, 'png')
    assert im1.shape[2] == 3
    assert np.all(im1 == im2)
    
    # Test imsave()
    if os.path.isfile(fname2):
        os.remove(fname2)
    assert not os.path.isfile(fname2)
    imageio.imsave(fname2, im1[:, :, 0])
    imageio.imsave(fname2, im1)
    assert os.path.isfile(fname2)
    
    # Test mimread()
    fname3 = get_remote_file('images/newtonscradle.gif', test_dir)
    ims = imageio.mimread(fname3)
    assert isinstance(ims, list)
    assert len(ims) > 1
    assert ims[0].ndim == 3
    assert ims[0].shape[2] in (1, 3, 4)
    # Test protection
    with raises(RuntimeError):
        imageio.mimread('chelsea.png', 'dummy', length=np.inf)
    
    if IS_PYPY:
        return  # no support for npz format :(
    
    # Test mimsave()
    fname5 = fname3[:-4] + '2.npz'
    if os.path.isfile(fname5):
        os.remove(fname5)
    assert not os.path.isfile(fname5)
    imageio.mimsave(fname5, [im[:, :, 0] for im in ims])
    imageio.mimsave(fname5, ims)
    assert os.path.isfile(fname5)
    
    # Test volread()
    fname4 = get_remote_file('images/stent.npz', test_dir)
    vol = imageio.volread(fname4)
    assert vol.ndim == 3
    assert vol.shape[0] == 256
    assert vol.shape[1] == 128
    assert vol.shape[2] == 128
    
    # Test volsave()
    volc = np.zeros((10, 10, 10, 3), np.uint8)  # color volume
    fname6 = fname4[:-4] + '2.npz'
    if os.path.isfile(fname6):
        os.remove(fname6)
    assert not os.path.isfile(fname6)
    imageio.volsave(fname6, volc)
    imageio.volsave(fname6, vol)
    assert os.path.isfile(fname6)
    
    # Test mvolread()
    vols = imageio.mvolread(fname4)
    assert isinstance(vols, list)
    assert len(vols) == 1
    assert vols[0].shape == vol.shape
    
    # Test mvolsave()
    if os.path.isfile(fname6):
        os.remove(fname6)
    assert not os.path.isfile(fname6)
    imageio.mvolsave(fname6, [volc, volc])
    imageio.mvolsave(fname6, vols)
    assert os.path.isfile(fname6)
    
    # Fail for save functions
    raises(ValueError, imageio.imsave, fname2, np.zeros((100, 100, 5)))
    raises(ValueError, imageio.imsave, fname2, 42)
    raises(ValueError, imageio.mimsave, fname5, [np.zeros((100, 100, 5))])
    raises(ValueError, imageio.mimsave, fname5, [42])
    raises(ValueError, imageio.volsave, fname4, np.zeros((100, 100, 100, 40)))
    raises(ValueError, imageio.volsave, fname4, 42)
    raises(ValueError, imageio.mvolsave, fname4, [np.zeros((90, 90, 90, 40))])
    raises(ValueError, imageio.mvolsave, fname4, [42])

Example 122

Project: imageio
Source File: test_swf.py
View license
def test_reading_saving():
    
    need_internet()
    
    fname1 = get_remote_file('images/stent.swf', test_dir)
    fname2 = fname1[:-4] + '.out.swf'
    fname3 = fname1[:-4] + '.compressed.swf'
    fname4 = fname1[:-4] + '.out2.swf'
    
    # Read
    R = imageio.read(fname1)
    assert len(R) == 10
    assert R.get_meta_data() == {}  # always empty dict
    ims1 = []
    for im in R:
        assert im.shape == (657, 451, 4)
        assert mean(im) > 0
        ims1.append(im)
    # Seek
    assert (R.get_data(3) == ims1[3]).all()
    # Fails
    raises(IndexError, R.get_data, -1)  # No negative index
    raises(IndexError, R.get_data, 10)  # Out of bounds
    R.close()
    
    # Test loop
    R = imageio.read(fname1, loop=True)
    assert (R.get_data(10) == ims1[0]).all()
    
    # setting meta data is ignored
    W = imageio.save(fname2)
    W.set_meta_data({'foo': 3})
    W.close()
    
    # Write and re-read, now without loop, and with html page
    imageio.mimsave(fname2, ims1, loop=False, html=True)
    ims2 = imageio.mimread(fname2)
    
    # Check images. We can expect exact match, since
    # SWF is lossless.
    assert len(ims1) == len(ims2)
    for im1, im2 in zip(ims1, ims2):
        assert (im1 == im2).all()

    # Test compressed
    imageio.mimsave(fname3, ims2, compress=True)
    ims3 = imageio.mimread(fname3)
    assert len(ims1) == len(ims3)
    for im1, im3 in zip(ims1, ims3):
        assert (im1 == im3).all()
    
    # Test conventional, Bonus, we don't officially support this.
    _swf = imageio.plugins.swf.load_lib()
    _swf.write_swf(fname4, ims1)
    ims4 = _swf.read_swf(fname4)
    assert len(ims1) == len(ims4)
    for im1, im4 in zip(ims1, ims4):
        assert (im1 == im4).all()
    
    # We want to manually validate that this file plays in 3d party tools
    # So we write a small HTML5 doc that we can load
    html = """<!DOCTYPE html>
            <html>
            <body>
            
            Original:
            <embed src="%s">
            <br ><br >
            Written:
            <embed src="%s">
            <br ><br >
            Compressed:
            <embed src="%s">
            <br ><br >
            Written 2:
            <embed src="%s">
            </body>
            </html>
            """ % (fname1, fname2, fname3, fname4)
    
    with open(os.path.join(test_dir, 'test_swf.html'), 'wb') as f:
        for line in html.splitlines():
            f.write(line.strip().encode('utf-8') + b'\n')

Example 123

View license
    def test_create_party(self):
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test without repeats
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        data = {
            'party_name': 'Party One',
            'party_type': 'IN',
            'party_attributes_individual': {
                'fname': False,
                'fname_two': 'socks',
            },
            'party_photo': 'sad_birthday.png',
            'party_resource_invite': 'invitation.pdf',
        }

        party_objects, party_resources = mh.create_party(
            mh(), data, self.project
        )
        assert len(party_objects) == 1
        party = Party.objects.get(name='Party One')
        assert party.type == 'IN'
        assert party.attributes == {'fname': False, 'fname_two': 'socks'}
        assert len(party_resources) == 1
        assert party_resources[0]['id'] == party.id
        assert len(party_resources[0]['resources']) == 2
        assert 'sad_birthday.png' in party_resources[0]['resources']
        assert 'invitation.pdf' in party_resources[0]['resources']
        assert party.project == self.project

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test with repeats
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        data = {
            'party_repeat': [{
                'party_name': 'Party Two',
                'party_type': 'IN',
                'party_attributes_individual': {
                    'fname': False,
                    'fname_two': 'socks',
                },
                'party_photo': 'sad_birthday.png',
                'party_resource_invite': 'invitation.pdf',

            }, {
                'party_name': 'Party Three',
                'party_type': 'GR',
                'party_attributes_group': {
                    'fname': True,
                    'fname_two': 'video games',
                },
                'party_photo': 'awesome_birthday.png',
                'party_resource_invite': 'invitation_two.pdf',

            }]
        }
        party_objects, party_resources = mh.create_party(
            mh(), data, self.project
        )
        assert len(party_objects) == 2
        party = Party.objects.get(name='Party Two')
        assert party.type == 'IN'
        assert party.attributes == {'fname': False, 'fname_two': 'socks'}
        party2 = Party.objects.get(name='Party Three')
        assert party2.type == 'GR'
        assert party2.attributes == {
            'fname': True, 'fname_two': 'video games'}

        assert len(party_resources) == 2
        assert party_resources[0]['id'] == party.id
        assert len(party_resources[0]['resources']) == 2
        assert 'sad_birthday.png' in party_resources[0]['resources']
        assert 'invitation.pdf' in party_resources[0]['resources']
        assert party.project == self.project

        assert party_resources[1]['id'] == party2.id
        assert len(party_resources[1]['resources']) == 2
        assert 'awesome_birthday.png' in party_resources[1]['resources']
        assert 'invitation_two.pdf' in party_resources[1]['resources']
        assert party2.project == self.project

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test without fails
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        data = {
            'party_nonsense': 'Blah blah blah',
            'party_type': 'IN',
            'party_attributes_individual': {
                'fname': False,
                'fname_two': 'socks',
            },
            'party_photo': 'sad_birthday.png',
            'party_resource_invite': 'invitation.pdf',
        }

        with pytest.raises(InvalidXMLSubmission):
            mh.create_party(
                mh(), data, self.project
            )
        assert Party.objects.count() == 3

Example 124

Project: brainiak
Source File: test_brsa.py
View license
def test_fit():
    from brainiak.reprsimil.brsa import BRSA
    import brainiak.utils.utils as utils
    import scipy.stats
    import numpy as np
    import os.path
    np.random.seed(10)
    file_path = os.path.join(os.path.dirname(__file__), "example_design.1D")
    # Load an example design matrix
    design = utils.ReadDesign(fname=file_path)


    # concatenate it by 4 times, mimicking 4 runs of itenditcal timing
    design.design_task = np.tile(design.design_task[:,:-1],[4,1])
    design.n_TR = design.n_TR * 4

    # start simulating some data
    n_V = 200
    n_C = np.size(design.design_task,axis=1)
    n_T = design.n_TR

    noise_bot = 0.5
    noise_top = 1.5
    noise_level = np.random.rand(n_V)*(noise_top-noise_bot)+noise_bot
    # noise level is random.

    # AR(1) coefficient
    rho1_top = 0.8
    rho1_bot = -0.2
    rho1 = np.random.rand(n_V)*(rho1_top-rho1_bot)+rho1_bot

    # generating noise
    noise = np.zeros([n_T,n_V])
    noise[0,:] = np.random.randn(n_V) * noise_level / np.sqrt(1-rho1**2)
    for i_t in range(1,n_T):
        noise[i_t,:] = noise[i_t-1,:] * rho1 +  np.random.randn(n_V) * noise_level

    noise = noise + np.random.rand(n_V)
    # Random baseline

    # ideal covariance matrix
    ideal_cov = np.zeros([n_C,n_C])
    ideal_cov = np.eye(n_C)*0.6
    ideal_cov[0:4,0:4] = 0.2
    for cond in range(0,4):
        ideal_cov[cond,cond] = 2
    ideal_cov[5:9,5:9] = 0.9
    for cond in range(5,9):
        ideal_cov[cond,cond] = 1
    idx = np.where(np.sum(np.abs(ideal_cov),axis=0)>0)[0]
    L_full = np.linalg.cholesky(ideal_cov)        

    # generating signal
    snr_level = 5.0 # test with high SNR    
    # snr = np.random.rand(n_V)*(snr_top-snr_bot)+snr_bot
    # Notice that accurately speaking this is not snr. the magnitude of signal depends
    # not only on beta but also on x.
    inten = np.random.randn(n_V) * 20.0

    # parameters of Gaussian process to generate pseuso SNR
    tau = 0.8
    smooth_width = 5.0
    inten_kernel = 1.0
    
    coords = np.arange(0,n_V)[:,None]

    dist2 = np.square(coords-coords.T)

    inten_tile = np.tile(inten,[n_V,1])
    inten_diff2 = (inten_tile-inten_tile.T)**2

    K = np.exp(-dist2/smooth_width**2/2.0 -inten_diff2/inten_kernel**2/2.0) * tau**2 + np.eye(n_V)*tau**2*0.001

    L = np.linalg.cholesky(K)
    snr = np.exp(np.dot(L,np.random.randn(n_V))) * snr_level
    sqrt_v = noise_level*snr
    betas_simulated = np.dot(L_full,np.random.randn(n_C,n_V)) * sqrt_v
    signal = np.dot(design.design_task,betas_simulated)

    # Adding noise to signal as data
    Y = signal + noise


    scan_onsets = np.linspace(0,design.n_TR,num=5)


    # Test fitting with GP prior.
    brsa = BRSA(GP_space=True,GP_inten=True,verbose=False,n_iter = 200,auto_nuisance=False)

    # We also test that it can detect baseline regressor included in the design matrix for task conditions
    wrong_design = np.insert(design.design_task, 0, 1, axis=1)
    with pytest.raises(ValueError) as excinfo:
        brsa.fit(X=Y, design=wrong_design, scan_onsets=scan_onsets,
             coords=coords, inten=inten)
    assert 'Your design matrix appears to have included baseline time series.' in str(excinfo.value)
    # Now we fit with the correct design matrix.
    brsa.fit(X=Y, design=design.design_task, scan_onsets=scan_onsets,
             coords=coords, inten=inten)
    
    # Check that result is significantly correlated with the ideal covariance matrix
    u_b = brsa.U_
    u_i = ideal_cov
    p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)],
                              u_i[np.tril_indices_from(u_i)])[1]
    assert p < 0.01, "Fitted covariance matrix does not correlate with ideal covariance matrix!"
    # check that the recovered SNRs makes sense
    p = scipy.stats.pearsonr(brsa.nSNR_,snr)[1]
    assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
    assert np.isclose(np.mean(np.log(brsa.nSNR_)),0), "nSNR_ not normalized!"
    p = scipy.stats.pearsonr(brsa.sigma_,noise_level)[1]
    assert p < 0.01, "Fitted noise level does not correlate with simulated noise level!"
    p = scipy.stats.pearsonr(brsa.rho_,rho1)[1]
    assert p < 0.01, "Fitted AR(1) coefficient does not correlate with simulated values!"


    # Test fitting with lower rank and without GP prior
    rank = n_C - 1
    n_nureg = 1
    brsa = BRSA(rank=rank,n_nureg=n_nureg)
    brsa.fit(X=Y, design=design.design_task, scan_onsets=scan_onsets)
    u_b = brsa.U_
    u_i = ideal_cov
    p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)],u_i[np.tril_indices_from(u_i)])[1]
    assert p < 0.01, "Fitted covariance matrix does not correlate with ideal covariance matrix!"
    # check that the recovered SNRs makes sense
    p = scipy.stats.pearsonr(brsa.nSNR_,snr)[1]
    assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
    assert np.isclose(np.mean(np.log(brsa.nSNR_)),0), "nSNR_ not normalized!"
    p = scipy.stats.pearsonr(brsa.sigma_,noise_level)[1]
    assert p < 0.01, "Fitted noise level does not correlate with simulated noise level!"
    p = scipy.stats.pearsonr(brsa.rho_,rho1)[1]
    assert p < 0.01, "Fitted AR(1) coefficient does not correlate with simulated values!"

    assert not hasattr(brsa,'bGP_') and not hasattr(brsa,'lGPspace_') and not hasattr(brsa,'lGPinten_'),\
        'the BRSA object should not have parameters of GP if GP is not requested.'
    # GP parameters are not set if not requested
    assert brsa.beta0_.shape[0] == n_nureg, 'Shape of beta0 incorrect'
    p = scipy.stats.pearsonr(brsa.beta0_[0,:],np.mean(noise,axis=0))[1]
    assert p < 0.05, 'recovered beta0 does not correlate with the baseline of voxels.'

    # Test fitting with GP over just spatial coordinates.
    brsa = BRSA(GP_space=True)
    brsa.fit(X=Y, design=design.design_task, scan_onsets=scan_onsets, coords=coords)
    # Check that result is significantly correlated with the ideal covariance matrix
    u_b = brsa.U_
    u_i = ideal_cov
    p = scipy.stats.spearmanr(u_b[np.tril_indices_from(u_b)],u_i[np.tril_indices_from(u_i)])[1]
    assert p < 0.01, "Fitted covariance matrix does not correlate with ideal covariance matrix!"
    # check that the recovered SNRs makes sense
    p = scipy.stats.pearsonr(brsa.nSNR_,snr)[1]
    assert p < 0.01, "Fitted SNR does not correlate with simulated SNR!"
    assert np.isclose(np.mean(np.log(brsa.nSNR_)),0), "nSNR_ not normalized!"
    p = scipy.stats.pearsonr(brsa.sigma_,noise_level)[1]
    assert p < 0.01, "Fitted noise level does not correlate with simulated noise level!"
    p = scipy.stats.pearsonr(brsa.rho_,rho1)[1]
    assert p < 0.01, "Fitted AR(1) coefficient does not correlate with simulated values!"
    assert not hasattr(brsa,'lGPinten_'),\
        'the BRSA object should not have parameters of lGPinten_ if only smoothness in space is requested.'

Example 125

Project: cryptography
Source File: test_dsa.py
View license
    def test_invalid_dsa_public_key_arguments(self, backend):
        # Test a p < 1024 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=2 ** 1000,
                    q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                    g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                ),
                y=DSA_KEY_1024.public_numbers.y
            ).public_key(backend)

        # Test a p < 2048 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=2 ** 2000,
                    q=DSA_KEY_2048.public_numbers.parameter_numbers.q,
                    g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
                ),
                y=DSA_KEY_2048.public_numbers.y
            ).public_key(backend)

        # Test a p < 3072 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=2 ** 3000,
                    q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
                    g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
                ),
                y=DSA_KEY_3072.public_numbers.y
            ).public_key(backend)

        # Test a p > 3072 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=2 ** 3100,
                    q=DSA_KEY_3072.public_numbers.parameter_numbers.q,
                    g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
                ),
                y=DSA_KEY_3072.public_numbers.y
            ).public_key(backend)

        # Test a q < 160 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                    q=2 ** 150,
                    g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                ),
                y=DSA_KEY_1024.public_numbers.y
            ).public_key(backend)

        # Test a q < 256 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=DSA_KEY_2048.public_numbers.parameter_numbers.p,
                    q=2 ** 250,
                    g=DSA_KEY_2048.public_numbers.parameter_numbers.g,
                ),
                y=DSA_KEY_2048.public_numbers.y
            ).public_key(backend)

        # Test a q > 256 bits in length
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=DSA_KEY_3072.public_numbers.parameter_numbers.p,
                    q=2 ** 260,
                    g=DSA_KEY_3072.public_numbers.parameter_numbers.g,
                ),
                y=DSA_KEY_3072.public_numbers.y
            ).public_key(backend)

        # Test a g < 1
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                    q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                    g=0,
                ),
                y=DSA_KEY_1024.public_numbers.y
            ).public_key(backend)

        # Test a g = 1
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                    q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                    g=1,
                ),
                y=DSA_KEY_1024.public_numbers.y
            ).public_key(backend)

        # Test a g > p
        with pytest.raises(ValueError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                    q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                    g=2 ** 1200,
                ),
                y=DSA_KEY_1024.public_numbers.y
            ).public_key(backend)

        # Test a non-integer y value
        with pytest.raises(TypeError):
            dsa.DSAPublicNumbers(
                parameter_numbers=dsa.DSAParameterNumbers(
                    p=DSA_KEY_1024.public_numbers.parameter_numbers.p,
                    q=DSA_KEY_1024.public_numbers.parameter_numbers.q,
                    g=DSA_KEY_1024.public_numbers.parameter_numbers.g,
                ),
                y=None
            ).public_key(backend)

Example 126

Project: selinon
Source File: test_nodeFailures.py
View license
    def test_two_failures_no_fallback(self):
        #
        # flow1:
        #
        #    Task1 X     Task2 X
        #       |           |
        #       |           |
        #    Task3        Task4
        #
        # Note:
        #   Task1 will fail, then Task2 will fail
        #
        edge_table = {
            'flow1': [{'from': ['Task1'], 'to': ['Task3'], 'condition': self.cond_true},
                      {'from': ['Task2'], 'to': ['Task4'], 'condition': self.cond_true},
                      {'from': [], 'to': ['Task1', 'Task2'], 'condition': self.cond_true}],
        }
        failures = {
            'flow1': {'Task1': {'next': {'Task2': {'next': {}, 'fallback': []}}, 'fallback': []},
                      'Task2': {'next': {'Task1': {'next': {}, 'fallback': []}}, 'fallback': []}
                     }
        }
        self.init(edge_table, failures=failures)

        system_state = SystemState(id(self), 'flow1')
        retry = system_state.update()
        state_dict = system_state.to_dict()

        assert retry is not None
        assert system_state.node_args is None
        assert 'Task1' in self.instantiated_tasks
        assert 'Task2' in self.instantiated_tasks
        assert 'Task3' not in self.instantiated_tasks
        assert 'Task4' not in self.instantiated_tasks
        assert 'Task5' not in self.instantiated_tasks
        assert len(state_dict.get('waiting_edges')) == 2
        assert 0 in state_dict['waiting_edges']
        assert 1 in state_dict['waiting_edges']

        task1 = self.get_task('Task1')
        self.set_failed(task1)

        system_state = SystemState(id(self), 'flow1', state=state_dict,
                                   node_args=system_state.node_args)
        retry = system_state.update()
        state_dict = system_state.to_dict()

        assert retry is not None
        assert system_state.node_args is None
        assert 'Task1' in self.instantiated_tasks
        assert 'Task2' in self.instantiated_tasks
        assert 'Task3' not in self.instantiated_tasks
        assert 'Task4' not in self.instantiated_tasks
        assert 'Task5' not in self.instantiated_tasks
        assert len(state_dict.get('waiting_edges')) == 2
        assert 0 in state_dict['waiting_edges']
        assert 1 in state_dict['waiting_edges']
        assert 'Task1' in state_dict['failed_nodes']

        # No change so far
        system_state = SystemState(id(self), 'flow1', state=state_dict,
                                   node_args=system_state.node_args)
        retry = system_state.update()
        state_dict = system_state.to_dict()

        assert retry is not None
        assert system_state.node_args is None
        assert 'Task1' in self.instantiated_tasks
        assert 'Task2' in self.instantiated_tasks
        assert 'Task3' not in self.instantiated_tasks
        assert 'Task4' not in self.instantiated_tasks
        assert 'Task5' not in self.instantiated_tasks
        assert len(state_dict.get('waiting_edges')) == 2
        assert 0 in state_dict['waiting_edges']
        assert 1 in state_dict['waiting_edges']
        assert 'Task1' in state_dict['failed_nodes']

        # Task2 has failed
        task2 = self.get_task('Task2')
        self.set_failed(task2)

        with pytest.raises(FlowError):
            system_state = SystemState(id(self), 'flow1', state=state_dict,
                                       node_args=system_state.node_args)
            system_state.update()

Example 127

View license
    def test_create_tenure_relationship(self):
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test without repeats
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        party = PartyFactory.create(project=self.project)
        location = SpatialUnitFactory.create(project=self.project)

        data = {
            'tenure_type': 'CO',
            'tenure_relationship_attributes': {
                'fname': False,
                'fname_two': 'Tenure One'
            },
            'tenure_resource_photo': 'resource.png'
        }

        tenure_relationships, tenure_resources = mh.create_tenure_relationship(
            mh(), data, [party], [location], self.project)
        tenure = TenureRelationship.objects.get(tenure_type='CO')
        assert tenure_relationships == [tenure]
        assert tenure.party == party
        assert tenure.spatial_unit == location
        assert tenure.attributes == {'fname': False, 'fname_two': 'Tenure One'}
        assert len(tenure_resources) == 1
        assert tenure_resources[0]['id'] == tenure.id
        assert 'resource.png' in tenure_resources[0]['resources']

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # inside party_repeat
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        party2 = PartyFactory.create(project=self.project)
        party3 = PartyFactory.create(project=self.project)

        data = {
            'party_repeat': [{
                'tenure_type': 'WR',
                'tenure_relationship_attributes': {
                    'fname': False,
                    'fname_two': 'Tenure Two'
                },
                'tenure_resource_photo': 'resource_two.png'
            }, {
                'tenure_type': 'CO',
                'tenure_relationship_attributes': {
                    'fname': True,
                    'fname_two': 'Tenure Three'
                },
                'tenure_resource_photo': 'resource_three.png'
            }]
        }

        tenure_relationships, tenure_resources = mh.create_tenure_relationship(
            mh(), data, [party2, party3], [location], self.project)
        tenure2 = TenureRelationship.objects.get(party=party2)
        tenure3 = TenureRelationship.objects.get(party=party3)
        assert tenure_relationships == [tenure2, tenure3]

        assert tenure2.spatial_unit == location
        assert tenure2.tenure_type.id == 'WR'
        assert tenure2.attributes == {
            'fname': False, 'fname_two': 'Tenure Two'}

        assert tenure3.spatial_unit == location
        assert tenure3.tenure_type.id == 'CO'
        assert tenure3.attributes == {
            'fname': True, 'fname_two': 'Tenure Three'}

        assert len(tenure_resources) == 2
        assert tenure_resources[0]['id'] == tenure2.id
        assert 'resource_two.png' in tenure_resources[0]['resources']

        assert tenure_resources[1]['id'] == tenure3.id
        assert 'resource_three.png' in tenure_resources[1]['resources']

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # inside location_repeat
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        location2 = SpatialUnitFactory.create(project=self.project)
        location3 = SpatialUnitFactory.create(project=self.project)

        data = {
            'location_repeat': [{
                'tenure_type': 'WR',
                'tenure_relationship_attributes': {
                    'fname': False,
                    'fname_two': 'Tenure Four'
                },
                'tenure_resource_photo': 'resource_four.png'
            }, {
                'tenure_type': 'CO',
                'tenure_relationship_attributes': {
                    'fname': True,
                    'fname_two': 'Tenure Five'
                },
                'tenure_resource_photo': 'resource_five.png'
            }]
        }

        tenure_relationships, tenure_resources = mh.create_tenure_relationship(
            mh(), data, [party], [location2, location3], self.project)

        tenure4 = TenureRelationship.objects.get(spatial_unit=location2)
        tenure5 = TenureRelationship.objects.get(spatial_unit=location3)
        assert tenure_relationships == [tenure4, tenure5]

        assert tenure4.party == party
        assert tenure4.tenure_type.id == 'WR'
        assert tenure4.attributes == {
            'fname': False, 'fname_two': 'Tenure Four'}

        assert tenure5.party == party
        assert tenure5.tenure_type.id == 'CO'
        assert tenure5.attributes == {
            'fname': True, 'fname_two': 'Tenure Five'}

        assert len(tenure_resources) == 2
        assert tenure_resources[0]['id'] == tenure4.id
        assert 'resource_four.png' in tenure_resources[0]['resources']

        assert tenure_resources[1]['id'] == tenure5.id
        assert 'resource_five.png' in tenure_resources[1]['resources']

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # outside party_repeat
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        party4 = PartyFactory.create(project=self.project)
        party5 = PartyFactory.create(project=self.project)

        data = {
            'party_repeat': [],
            'tenure_type': 'CO',
            'tenure_relationship_attributes': {
                'fname': True,
                'fname_two': 'Tenure 6, 7'
            },
            'tenure_resource_photo': 'resource_six.png'
            }

        tenure_relationships, tenure_resources = mh.create_tenure_relationship(
            mh(), data, [party4, party5], [location], self.project)
        tenure6 = TenureRelationship.objects.get(party=party4)
        tenure7 = TenureRelationship.objects.get(party=party5)

        assert tenure_relationships == [tenure6, tenure7]
        assert tenure6.spatial_unit == location
        assert tenure6.tenure_type.id == 'CO'
        assert tenure6.attributes == {
            'fname': True, 'fname_two': 'Tenure 6, 7'}

        assert tenure7.spatial_unit == location
        assert tenure7.tenure_type.id == 'CO'
        assert tenure7.attributes == {
            'fname': True, 'fname_two': 'Tenure 6, 7'}

        assert len(tenure_resources) == 2
        assert tenure_resources[0]['id'] == tenure6.id
        assert 'resource_six.png' in tenure_resources[0]['resources']

        assert tenure_resources[1]['id'] == tenure7.id
        assert 'resource_six.png' in tenure_resources[1]['resources']

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # outside location_repeat
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        location4 = SpatialUnitFactory.create(project=self.project)
        location5 = SpatialUnitFactory.create(project=self.project)

        data = {
            'location_repeat': [],
            'tenure_type': 'WR',
            'tenure_relationship_attributes': {
                'fname': False,
                'fname_two': 'Tenure 8, 9'
            },
            'tenure_resource_photo': 'resource_seven.png'
        }

        tenure_relationships, tenure_resources = mh.create_tenure_relationship(
            mh(), data, [party], [location4, location5], self.project)

        tenure8 = TenureRelationship.objects.get(spatial_unit=location4)
        tenure9 = TenureRelationship.objects.get(spatial_unit=location5)
        assert tenure_relationships == [tenure8, tenure9]

        assert tenure8.party == party
        assert tenure8.tenure_type.id == 'WR'
        assert tenure8.attributes == {
            'fname': False, 'fname_two': 'Tenure 8, 9'}

        assert tenure9.party == party
        assert tenure9.tenure_type.id == 'WR'
        assert tenure9.attributes == {
            'fname': False, 'fname_two': 'Tenure 8, 9'}

        assert len(tenure_resources) == 2
        assert tenure_resources[0]['id'] == tenure8.id
        assert 'resource_seven.png' in tenure_resources[0]['resources']

        assert tenure_resources[1]['id'] == tenure9.id
        assert 'resource_seven.png' in tenure_resources[1]['resources']

        data = {
            'location_repeat': [],
            'tenure_nonsense': 'Blah blah blah',
            'tenure_relationship_attributes': {
                'fname': False,
                'fname_two': 'Tenure 8, 9'
            },
            'tenure_resource_photo': 'resource_seven.png'
        }

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test failing
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        with pytest.raises(InvalidXMLSubmission):
            mh.create_tenure_relationship(
                mh(), data, [party], [location4, location5], self.project)
        assert TenureRelationship.objects.count() == 9

Example 128

Project: zerotest
Source File: test_response_matcher.py
View license
def test_fuzzy_compare():
    matcher = ResponseMatcher(fuzzy_match=True)
    allow_none_matcher = ResponseMatcher(fuzzy_match=True, fuzzy_match_options={"allow_none": True})
    allow_blank_matcher = ResponseMatcher(fuzzy_match=True, fuzzy_match_options={"allow_blank": True})
    r1 = Response(status=200, headers={}, body=json.dumps({"id": 1, "name": "test"}))
    r2 = Response(status=200, headers={}, body=json.dumps({"id": 2, "name": "test", "some_field": 0}))
    # not work if not content-type
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": 1, "name": "test"})
    r1.headers['content-type'] = 'application/json'
    r2.headers['CONTENT-TYPE'] = 'application/json'
    matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": "42", "name": "test42"})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": 42, "name": 42})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": "42", "name": 42})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": 42, "name": "test42"})
    matcher.match_responses(r1, r2)

    r2.headers['use-less-header'] = True
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.headers.pop('use-less-header')
    r1.body = json.dumps({"id": 1, "name": "test", "followers": []})
    r2.body = json.dumps({"id": 42, "name": "test42", "followers": [1, 2, 3]})
    matcher.match_responses(r1, r2)

    r1.body = json.dumps({"id": 1, "name": "test", "followers": ["1", "2"]})
    r2.body = json.dumps({"id": 42, "name": "test42", "followers": [1, 2, 3]})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r1.body = json.dumps({"id": 1, "name": "test", "followers": [1, 2]})
    matcher.match_responses(r1, r2)

    r1.body = json.dumps(
        {
            "id": 1, "name": "test", "children":
            [{"id": 2, "name": "test2"}, {"id": 3, "name": "test3"}],
            "parent": {"id": 0, "name": "test0"}
        })
    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}],
            "parent": {"id": 5, "name": "test5"}
        })
    matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}, {"id": "what?"}],
            "parent": {"id": 5, "name": "test5"}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}, {}],
            "parent": {"id": "5", "name": "test5"}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}, {}],
            "parent": {"id": "5", "name": "test5"}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}],
            "parent": {}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)
    with pytest.warns(FuzzyMatchWarning):
        allow_blank_matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}],
            "parent": None
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)
    with pytest.raises(AssertionError):
        allow_blank_matcher.match_responses(r1, r2)
    with pytest.warns(FuzzyMatchWarning):
        allow_none_matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [],
            "parent": {"id": 5, "name": "test5"}
        })
    matcher.match_responses(r1, r2)

Example 129

Project: selinon
Source File: test_nodeFailures.py
View license
    def test_single_failure_finish_wait(self):
        #
        # flow1:
        #
        #    Task1       Task2 X
        #       |           |
        #       |           |
        #    Task3        Task4
        #
        # No fallback defined for Task2
        #
        # Note:
        #   Task2 will fail, no fallback defined, Dispatcher should wait for Task1 to finish and
        #   raise FlowError exception
        edge_table = {
            'flow1': [{'from': ['Task1'], 'to': ['Task3'], 'condition': self.cond_true},
                      {'from': ['Task2'], 'to': ['Task4'], 'condition': self.cond_true},
                      {'from': [], 'to': ['Task1', 'Task2'], 'condition': self.cond_true}],
        }
        failures = {
            'flow1': {'Task1': {'next': {'Task2': {'next': {}, 'fallback': []}}, 'fallback': []},
                      'Task2': {'next': {'Task1': {'next': {}, 'fallback': []}}, 'fallback': []}
                      }
        }
        self.init(edge_table, failures=failures)

        system_state = SystemState(id(self), 'flow1')
        retry = system_state.update()
        state_dict = system_state.to_dict()

        assert retry is not None
        assert system_state.node_args is None
        assert 'Task1' in self.instantiated_tasks
        assert 'Task2' in self.instantiated_tasks
        assert 'Task3' not in self.instantiated_tasks
        assert 'Task4' not in self.instantiated_tasks
        assert 'Task5' not in self.instantiated_tasks
        assert len(state_dict.get('waiting_edges')) == 2
        assert 0 in state_dict['waiting_edges']
        assert 1 in state_dict['waiting_edges']

        # Task2 has failed
        task2 = self.get_task('Task2')
        self.set_failed(task2)

        system_state = SystemState(id(self), 'flow1', state=state_dict,
                                   node_args=system_state.node_args)
        system_state.update()
        state_dict = system_state.to_dict()

        assert retry is not None
        assert system_state.node_args is None
        assert 'Task1' in self.instantiated_tasks
        assert 'Task2' in self.instantiated_tasks
        assert 'Task3' not in self.instantiated_tasks
        assert 'Task4' not in self.instantiated_tasks
        assert 'Task5' not in self.instantiated_tasks
        assert len(state_dict.get('waiting_edges')) == 2
        assert 0 in state_dict['waiting_edges']
        assert 1 in state_dict['waiting_edges']
        assert 'Task2' in state_dict['failed_nodes']

        # No change so far, still wait
        system_state = SystemState(id(self), 'flow1', state=state_dict,
                                   node_args=system_state.node_args)
        system_state.update()
        state_dict = system_state.to_dict()

        assert retry is not None
        assert system_state.node_args is None
        assert 'Task1' in self.instantiated_tasks
        assert 'Task2' in self.instantiated_tasks
        assert 'Task3' not in self.instantiated_tasks
        assert 'Task4' not in self.instantiated_tasks
        assert 'Task5' not in self.instantiated_tasks
        assert len(state_dict.get('waiting_edges')) == 2
        assert 0 in state_dict['waiting_edges']
        assert 1 in state_dict['waiting_edges']
        assert 'Task2' in state_dict['failed_nodes']

        # Task1 has finished successfully
        task1 = self.get_task('Task1')
        self.set_finished(task1, 0)

        # Wait for Task3
        system_state = SystemState(id(self), 'flow1', state=state_dict,
                                   node_args=system_state.node_args)
        system_state.update()
        state_dict = system_state.to_dict()

        assert retry is not None
        assert 'Task3' in self.instantiated_tasks
        assert 'Task4' not in self.instantiated_tasks

        # Task3 has finished successfully
        task3 = self.get_task('Task3')
        self.set_finished(task3, 0)

        with pytest.raises(FlowError):
            system_state = SystemState(id(self), 'flow1', state=state_dict,
                                       node_args=system_state.node_args)
            system_state.update()

Example 130

Project: cti-toolkit
Source File: test_client.py
View license
@httpretty.activate
def test_send_poll_request():
    """Test the sending of a configured poll request."""
    # Ensures that non-registered paths fail
    httpretty.HTTPretty.allow_net_connect = False

    # Mock the TAXII endpoint enough to send it a request
    httpretty.register_uri(
        httpretty.POST, 'http://example.com:80/taxii_endpoint',
        body=lambda request, uri, headers: (200, {}, 'OK'),
    )

    # Configure a client and make a poll request
    taxii_client = certau.source.SimpleTaxiiClient(
        username='user',
        password='pass',
        begin_ts='2015-12-30T10:13:05.00000+10:00',
        end_ts='2015-12-30T18:09:43.00000+10:00',
        hostname='example.com',
        path='/taxii_endpoint',
        collection='my_collection',
    )
    # send_poll_request should fail to get a valid poll response
    # and throw an exception as a result - below ensures this
    with pytest.raises(Exception) as excinfo:
        taxii_client.send_poll_request()
    assert str(excinfo.value) == 'TAXII response not a poll response as expected.'

    # Capture the client request data
    request = httpretty.last_request()

    # Remove non-repeatable headers
    headers = request.headers.dict
    del headers['content-length']

    # Check we have the correct request headers
    assert request.headers.dict == {
        'x-taxii-accept': 'urn:taxii.mitre.org:message:xml:1.1',
        'x-taxii-protocol': 'urn:taxii.mitre.org:protocol:http:1.0',
        'accept-encoding': 'identity',
        'user-agent': 'libtaxii.httpclient',
        'connection': 'close',
        'accept': 'application/xml',
        'x-taxii-content-type': 'urn:taxii.mitre.org:message:xml:1.1',
        'host': 'example.com:80',
        'x-taxii-services': 'urn:taxii.mitre.org:services:1.1',
        'content-type': 'application/xml',
        'authorization': 'Basic dXNlcjpwYXNz'
    }

    # Create a dict representation of the body XML
    dictbody = xmltodict.parse(request.body, dict_constructor=dict)

    # Remove non-repeatable items
    del dictbody['taxii_11:Poll_Request']['@message_id']

    # Check we have the correct request body
    assert dictbody == {
        u'taxii_11:Poll_Request': {
            u'@xmlns:taxii': u'http://taxii.mitre.org/messages/taxii_xml_binding-1',
            u'taxii_11:Inclusive_End_Timestamp': u'2015-12-30T18:09:43+10:00',
            u'@xmlns:taxii_11': u'http://taxii.mitre.org/messages/taxii_xml_binding-1.1',
            u'@collection_name': u'my_collection',
            u'@xmlns:tdq': u'http://taxii.mitre.org/query/taxii_default_query-1',
            u'taxii_11:Poll_Parameters': {
                u'taxii_11:Response_Type': u'FULL',
                u'@allow_asynch': u'false'
            },
            u'taxii_11:Exclusive_Begin_Timestamp': u'2015-12-30T10:13:05+10:00'
        }
    }

Example 131

Project: shuup
Source File: test_attributes.py
View license
def _populate_applied_attribute(aa):
    if aa.attribute.type == AttributeType.BOOLEAN:
        aa.value = True
        aa.save()
        assert aa.value is True, "Truth works"
        assert aa.untranslated_string_value == "1", "Integer attributes save string representations"
        aa.value = not 42  # (but it could be something else)
        aa.save()
        assert aa.value is False, "Lies work"
        assert aa.untranslated_string_value == "0", "Integer attributes save string representations"
        return

    if aa.attribute.type == AttributeType.INTEGER:
        aa.value = 320.51
        aa.save()
        assert aa.value == 320, "Integer attributes get rounded down"
        assert aa.untranslated_string_value == "320", "Integer attributes save string representations"
        return

    if aa.attribute.type == AttributeType.DECIMAL:
        aa.value = Decimal("0.636")  # Surface pressure of Mars
        aa.save()
        assert aa.value * 1000 == 636, "Decimals work like they should"
        assert aa.untranslated_string_value == "0.636", "Decimal attributes save string representations"
        return

    if aa.attribute.type == AttributeType.TIMEDELTA:
        aa.value = 86400
        aa.save()
        assert aa.value.days == 1, "86,400 seconds is one day"
        assert aa.untranslated_string_value == "86400", "Timedeltas are seconds as strings"

        aa.value = datetime.timedelta(days=4)
        aa.save()
        assert aa.value.days == 4, "4 days remain as 4 days"
        assert aa.untranslated_string_value == "345600", "Timedeltas are still seconds as strings"
        return

    if aa.attribute.type == AttributeType.UNTRANSLATED_STRING:
        aa.value = "Dog Hello"
        aa.save()
        assert aa.value == "Dog Hello", "Untranslated strings work"
        assert aa.untranslated_string_value == "Dog Hello", "Untranslated strings work"
        return

    if aa.attribute.type == AttributeType.TRANSLATED_STRING:
        assert aa.attribute.is_translated
        with override_settings(LANGUAGES=[(x, x) for x in ("en", "fi", "ga", "ja")]):
            versions = {
                "en": u"science fiction",
                "fi": u"tieteiskirjallisuus",
                "ga": u"ficsean eolaíochta",
                "ja": u"空想科学小説",
            }
            for language_code, text in versions.items():
                aa.set_current_language(language_code)
                aa.value = text
                aa.save()
                assert aa.value == text, "Translated strings work"
            for language_code, text in versions.items():
                assert aa.safe_translation_getter("translated_string_value", language_code=language_code) == text, "%s translation is safe" % language_code

            aa.set_current_language("xx")
            assert aa.value == "", "untranslated version yields an empty string"

        return

    if aa.attribute.type == AttributeType.DATE:
        aa.value = "2014-01-01"
        assert aa.value == datetime.date(2014, 1, 1), "Date parsing works"
        assert aa.untranslated_string_value == "2014-01-01", "Dates are saved as strings"
        return

    if aa.attribute.type == AttributeType.DATETIME:
        with pytest.raises(TypeError):
            aa.value = "yesterday"
        dt = datetime.datetime(1997, 8, 12, 14)
        aa.value = dt
        assert aa.value.toordinal() == 729248, "Date assignment works"
        assert aa.value.time().hour == 14, "The clock still works"
        assert aa.untranslated_string_value == dt.isoformat(), "Datetimes are saved as strings too"
        return

    raise NotImplementedError("Not implemented: populating %s" % aa.attribute.type)  # pragma: no cover

Example 132

Project: rhea
Source File: test_fifo_fast.py
View license
def test_overflow_ffifo(args=None):
    """ verify the synchronous FIFO
    """
    reset = ResetSignal(0, active=1, async=True)
    clock = Signal(bool(0))

    if args is None:
        args = Namespace(width=8, size=16, name='test')
    else:
        # @todo: verify args has the attributes needed for the FIFOBus
        pass 

    fbus = FIFOBus(width=args.width)
    glbl = Global(clock, reset)

    @myhdl.block
    def bench_fifo_overflow():
        # @todo: use args.fast, args.use_srl_prim
        tbdut = cores.fifo.fifo_fast(glbl, fbus,
                                     size=args.size, use_srl_prim=False)

        @always(delay(10))
        def tbclk():
            clock.next = not clock

        @instance
        def tbstim():
            fbus.write_data.next = 0xFE
            reset.next = reset.active
            yield delay(33)
            reset.next = not reset.active
            for ii in range(5):
                yield clock.posedge

            # write more bytes       
            rand = randrange(args.size+2,2*args.size+1)
            for num_bytes in range(args.size, rand):
                            
                for ii in range(num_bytes):
                    try:
                        fbus.write_data.next = ii
                        fbus.write.next = True
                        yield clock.posedge
                    except ValueError:
                        assert fbus.count == args.size
                        assert fbus.full, "FIFO should be full!" 
                        assert not fbus.empty, "FIFO should not be empty" 
                    else:
                        assert fbus.count <= args.size
                        if fbus.count < args.size:
                            assert not fbus.full
                        
                fbus.write.next = False
                fbus.write_data.next = 0xFE
                for cc in range(5):                
                    yield clock.posedge

                for ii in range(args.size):
                    fbus.read.next = True
                    yield clock.posedge
                    assert fbus.read_valid
                    assert fbus.read_data == ii, "rdata %x ii %x " % (fbus.read_data, ii)

                fbus.read.next = False
                yield clock.posedge
                assert fbus.empty

            fbus.clear.next = True
            yield clock.posedge
            fbus.clear.next = not fbus.clear
            for ii in range(5):
                yield clock.posedge
            raise StopSimulation

        return myhdl.instances()

    for kk in range(100):
        with pytest.raises(ValueError):
            run_testbench(bench_fifo_overflow)

Example 133

Project: python-arango
Source File: test_document.py
View license
def test_update_many():
    current_revs = {}
    docs = [doc.copy() for doc in test_docs]
    doc_keys = [doc['_key'] for doc in docs]
    col.insert_many(docs)

    # Test update_many with default options
    for doc in docs:
        doc['val'] = {'foo': 1}
    results = col.update_many(docs)
    for result, key in zip(results, doc_keys):
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert col[key]['val'] == {'foo': 1}
        current_revs[key] = result['_rev']

    # Test update_many with merge
    for doc in docs:
        doc['val'] = {'bar': 2}
    results = col.update_many(docs, merge=True)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert col[key]['val'] == {'foo': 1, 'bar': 2}
        current_revs[key] = result['_rev']

    # Test update_many without merge
    for doc in docs:
        doc['val'] = {'baz': 3}
    results = col.update_many(docs, merge=False)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert col[key]['val'] == {'baz': 3}
        current_revs[key] = result['_rev']

    # Test update_many with keep_none
    for doc in docs:
        doc['val'] = None
    results = col.update_many(docs, keep_none=True)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert col[key]['val'] is None
        current_revs[key] = result['_rev']

    # Test update_many without keep_none
    for doc in docs:
        doc['val'] = None
    results = col.update_many(docs, keep_none=False)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert 'val' not in col[key]
        current_revs[key] = result['_rev']

    # Test update_many with return_new and return_old
    for doc in docs:
        doc['val'] = 300
    results = col.update_many(docs, return_new=True, return_old=True)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert result['new']['_key'] == key
        assert result['new']['val'] == 300
        assert result['old']['_key'] == key
        assert 'val' not in result['old']
        assert col[key]['val'] == 300
        current_revs[key] = result['_rev']

    # Test update without return_new and return_old
    for doc in docs:
        doc['val'] = 400
    results = col.update_many(docs, return_new=False, return_old=False)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert 'new' not in result
        assert 'old' not in result
        assert col[key]['val'] == 400
        current_revs[key] = result['_rev']

    # Test update_many with check_rev
    for doc in docs:
        doc['val'] = 500
        doc['_rev'] = current_revs[doc['_key']] + '000'
    results = col.update_many(docs, check_rev=True)
    for result, key in zip(results, doc_keys):
        assert isinstance(result, DocumentRevisionError)
    for doc in col:
        assert doc['val'] == 400

    # Test update_many with sync
    for doc in docs:
        doc['val'] = 600
    results = col.update_many(docs, sync=True)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert result['sync'] is True
        assert col[key]['val'] == 600
        current_revs[key] = result['_rev']

    # Test update_many without sync
    for doc in docs:
        doc['val'] = 700
    results = col.update_many(docs, sync=False)
    for result, doc in zip(results, docs):
        key = doc['_key']
        assert result['_id'] == '{}/{}'.format(col.name, key)
        assert result['_key'] == key
        assert isinstance(result['_rev'], string_types)
        assert result['_old_rev'] == current_revs[key]
        assert result['sync'] is False
        assert col[key]['val'] == 700
        current_revs[key] = result['_rev']

    # Test update_many with missing documents
    results = col.update_many([{'_key': '6'}, {'_key': '7'}])
    for result, key in zip(results, doc_keys):
        assert isinstance(result, DocumentUpdateError)
    assert '6' not in col
    assert '7' not in col
    for doc in col:
        assert doc['val'] == 700

    # Test update_many in missing collection
    with pytest.raises(DocumentUpdateError):
        bad_col.update_many(docs)

Example 134

View license
    def test_create_spatial_unit(self):
        geoshape = ('45.56342779158167 -122.67650283873081 0.0 0.0;'
                    '45.56176327330353 -122.67669159919024 0.0 0.0;'
                    '45.56151562182025 -122.67490658909082 0.0 0.0;'
                    '45.563479432877415 -122.67494414001703 0.0 0.0;'
                    '45.56176327330353 -122.67669159919024 0.0 0.0')

        line = ('45.56342779158167 -122.67650283873081 0.0 0.0;'
                '45.56176327330353 -122.67669159919024 0.0 0.0;'
                '45.56151562182025 -122.67490658909082 0.0 0.0;')

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test without repeats
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        data = {
            'location_type': 'BU',
            'location_geometry': geoshape,
            'location_attributes': {
                'fname': False,
                'fname_two': 'Location One',
            },
            'location_photo': 'resource.png',
            'location_resource_invite': 'resource_two.pdf',
        }

        location_objects, location_resources = mh.create_spatial_unit(
            mh(), data, self.project)
        assert len(location_objects) == 1
        location = SpatialUnit.objects.get(type='BU')
        assert location.attributes == {
            'fname': False, 'fname_two': 'Location One'}
        assert location.geometry.geom_type == 'Polygon'
        assert len(location_resources) == 1
        assert location_resources[0]['id'] == location.id
        assert len(location_resources[0]['resources']) == 2
        assert 'resource.png' in location_resources[0]['resources']
        assert 'resource_two.pdf' in location_resources[0]['resources']
        assert location.project == self.project

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test with repeats
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        data = {
            'location_repeat': [{
                'location_type': 'PA',
                'location_geotrace': line,
                'location_attributes': {
                    'fname': False,
                    'fname_two': 'Location One',
                },
                'location_photo': 'resource.png',
                'location_resource_invite': 'resource_two.pdf',
            }, {
                'location_type': 'CB',
                'location_geoshape': geoshape,
                'location_attributes': {
                    'fname': True,
                    'fname_two': 'Location Two',
                },
                'location_photo': 'resource_three.png',
                'location_resource_invite': 'resource_four.pdf',
            }]
        }

        location_objects, location_resources = mh.create_spatial_unit(
            mh(), data, self.project)

        assert len(location_objects) == 2
        location = SpatialUnit.objects.get(type='PA')
        assert location.geometry.geom_type == 'LineString'
        assert location.attributes == {
            'fname': False, 'fname_two': 'Location One'}
        location2 = SpatialUnit.objects.get(type='CB')
        assert location2.geometry.geom_type == 'Polygon'
        assert location2.attributes == {
            'fname': True, 'fname_two': 'Location Two'}

        assert len(location_resources) == 2
        assert location_resources[0]['id'] == location.id
        assert len(location_resources[0]['resources']) == 2
        assert 'resource.png' in location_resources[0]['resources']
        assert 'resource_two.pdf' in location_resources[0]['resources']
        assert location.project == self.project

        assert location_resources[1]['id'] == location2.id
        assert len(location_resources[1]['resources']) == 2
        assert 'resource_three.png' in location_resources[1]['resources']
        assert 'resource_four.pdf' in location_resources[1]['resources']
        assert location2.project == self.project

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        # test fails
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~
        data = {
            'location_nonsense': 'BLAH BLAH',
            'location_geometry': line,
            'location_attributes': {
                'fname': False,
                'fname_two': 'Location One',
            },
            'location_photo': 'resource.png',
            'location_resource_invite': 'resource_two.pdf',
        }

        with pytest.raises(InvalidXMLSubmission):
            mh.create_spatial_unit(
                mh(), data, self.project)
        assert SpatialUnit.objects.count() == 3

Example 135

Project: flex
Source File: test_schema_validation.py
View license
def test_response_body_schema_validation_with_items_as_reference():
    """
    Ensure that when the expected response type is an object, and some other
    type is provided, that schema validation does not break since internally it
    will try to pull keys off of the value.
    """
    schema = SchemaFactory(
        definitions={
            'User': {
                'required': [
                    'id',
                    'name',
                ],
                'properties': {
                    'id': {
                        'type': INTEGER,
                    },
                    'name': {
                        'enum': ('bob', 'joe'),
                    },
                },
            },
            'UserList': {
                'type': OBJECT,
                'required': [
                    'results',
                ],
                'properties': {
                    'results': {
                        'type': ARRAY,
                        'items':{
                            '$ref': '#/definitions/User',
                        },
                    },
                },
            },
        },
        paths={
            '/get': {
                'get': {
                    'responses': {
                        '200': {
                            'description': 'Success',
                            'schema': {
                                '$ref': '#/definitions/UserList',
                            },
                        }
                    },
                },
            },
        },
    )

    response = ResponseFactory(
        url='http://www.example.com/get',
        status_code=200,
        content_type='application/json',
        content=json.dumps({'results': [{'id': 3, 'name': 'billy'}]}),
    )

    with pytest.raises(ValidationError) as err:
        validate_response(
            response=response,
            request_method='get',
            schema=schema,
        )

    assert_message_in_errors(
        MESSAGES['enum']['invalid'],
        err.value.detail,
        'body.schema.enum',
    )

Example 136

Project: pyqtgraph
Source File: test_ROI.py
View license
def check_getArrayRegion(roi, name, testResize=True, transpose=False):
    initState = roi.getState()
    
    #win = pg.GraphicsLayoutWidget()
    win = pg.GraphicsView()
    win.show()
    win.resize(200, 400)
    
    # Don't use Qt's layouts for testing--these generate unpredictable results.
    #vb1 = win.addViewBox()
    #win.nextRow()
    #vb2 = win.addViewBox()
    
    # Instead, place the viewboxes manually 
    vb1 = pg.ViewBox()
    win.scene().addItem(vb1)
    vb1.setPos(6, 6)
    vb1.resize(188, 191)

    vb2 = pg.ViewBox()
    win.scene().addItem(vb2)
    vb2.setPos(6, 203)
    vb2.resize(188, 191)
    
    img1 = pg.ImageItem(border='w')
    img2 = pg.ImageItem(border='w')

    vb1.addItem(img1)
    vb2.addItem(img2)
    
    np.random.seed(0)
    data = np.random.normal(size=(7, 30, 31, 5))
    data[0, :, :, :] += 10
    data[:, 1, :, :] += 10
    data[:, :, 2, :] += 10
    data[:, :, :, 3] += 10
    
    if transpose:
        data = data.transpose(0, 2, 1, 3)
    
    img1.setImage(data[0, ..., 0])
    vb1.setAspectLocked()
    vb1.enableAutoRange(True, True)
    
    roi.setZValue(10)
    vb1.addItem(roi)

    if isinstance(roi, pg.RectROI):
        if transpose:
            assert roi.getAffineSliceParams(data, img1, axes=(1, 2)) == ([28.0, 27.0], ((1.0, 0.0), (0.0, 1.0)), (1.0, 1.0))
        else:
            assert roi.getAffineSliceParams(data, img1, axes=(1, 2)) == ([27.0, 28.0], ((1.0, 0.0), (0.0, 1.0)), (1.0, 1.0))

    rgn = roi.getArrayRegion(data, img1, axes=(1, 2))
    #assert np.all((rgn == data[:, 1:-2, 1:-2, :]) | (rgn == 0))
    img2.setImage(rgn[0, ..., 0])
    vb2.setAspectLocked()
    vb2.enableAutoRange(True, True)
    
    app.processEvents()
    
    assertImageApproved(win, name+'/roi_getarrayregion', 'Simple ROI region selection.')

    with pytest.raises(TypeError):
        roi.setPos(0, False)

    roi.setPos([0.5, 1.5])
    rgn = roi.getArrayRegion(data, img1, axes=(1, 2))
    img2.setImage(rgn[0, ..., 0])
    app.processEvents()
    assertImageApproved(win, name+'/roi_getarrayregion_halfpx', 'Simple ROI region selection, 0.5 pixel shift.')

    roi.setAngle(45)
    roi.setPos([3, 0])
    rgn = roi.getArrayRegion(data, img1, axes=(1, 2))
    img2.setImage(rgn[0, ..., 0])
    app.processEvents()
    assertImageApproved(win, name+'/roi_getarrayregion_rotate', 'Simple ROI region selection, rotation.')

    if testResize:
        roi.setSize([60, 60])
        rgn = roi.getArrayRegion(data, img1, axes=(1, 2))
        img2.setImage(rgn[0, ..., 0])
        app.processEvents()
        assertImageApproved(win, name+'/roi_getarrayregion_resize', 'Simple ROI region selection, resized.')

    img1.scale(1, -1)
    img1.setPos(0, img1.height())
    img1.rotate(20)
    rgn = roi.getArrayRegion(data, img1, axes=(1, 2))
    img2.setImage(rgn[0, ..., 0])
    app.processEvents()
    assertImageApproved(win, name+'/roi_getarrayregion_img_trans', 'Simple ROI region selection, image transformed.')

    vb1.invertY()
    rgn = roi.getArrayRegion(data, img1, axes=(1, 2))
    img2.setImage(rgn[0, ..., 0])
    app.processEvents()
    assertImageApproved(win, name+'/roi_getarrayregion_inverty', 'Simple ROI region selection, view inverted.')

    roi.setState(initState)
    img1.resetTransform()
    img1.setPos(0, 0)
    img1.scale(1, 0.5)
    rgn = roi.getArrayRegion(data, img1, axes=(1, 2))
    img2.setImage(rgn[0, ..., 0])
    app.processEvents()
    assertImageApproved(win, name+'/roi_getarrayregion_anisotropic', 'Simple ROI region selection, image scaled anisotropically.')
    
    # allow the roi to be re-used
    roi.scene().removeItem(roi)

Example 137

Project: blitzdb
Source File: test_basics.py
View license
def test_basics(backend):

    francis_coppola = Director({'name' : 'Francis Coppola'})
    backend.save(francis_coppola)

    the_godfather = Movie({'title' : 'The Godfather','director' : francis_coppola})
    apocalypse_now = Movie({'title' : 'Apocalypse Now'})
    star_wars_v = Movie({'title' : 'Star Wars V: The Empire Strikes Back'})

    backend.save(the_godfather)
    backend.save(apocalypse_now)

    marlon_brando = Actor({'name': 'Marlon Brando', 'movies' : [the_godfather,apocalypse_now]})
    al_pacino = Actor({'name': 'Al Pacino', 'movies' : [the_godfather]})
    francis_coppola.favorite_actor = al_pacino

    backend.save(marlon_brando)
    backend.save(al_pacino)
    backend.save(francis_coppola)
    backend.commit()

    result = backend.filter(Movie,{'director.name' : francis_coppola.name})
    assert len(result) == 1
    assert the_godfather in result

    result = backend.filter(Movie,{'director.name' : {'$in' : [francis_coppola.name,'Clint Eastwood']}})
    assert len(result) == 1
    assert the_godfather in result

    result = backend.filter(Actor,{'movies' : {'$all' : [the_godfather,apocalypse_now]}})

    assert len(result) == 1
    assert marlon_brando in result

    result = backend.filter(Actor,{'movies' : {'$in' : [the_godfather,apocalypse_now]}})

    assert marlon_brando in result
    assert al_pacino in result
    assert len(result) == 2

    result = backend.filter(Actor,{'movies.title' : 'The Godfather'})

    assert len(result) == 2
    assert marlon_brando in result

    result = backend.filter(Actor,{'movies' : {'$elemMatch' : {'title' : 'The Godfather'}}})

    assert len(result) == 2
    assert marlon_brando in result
    assert al_pacino in result

    result = backend.filter(Actor,{'movies' : {'$all' : [{'$elemMatch' : {'title' : 'The Godfather'}},{'$elemMatch' : {'title' : 'Apocalypse Now'}}]}})

    assert len(result) == 1
    assert marlon_brando in result

    result = backend.filter(Actor,{'movies' : {'$all' : [the_godfather,apocalypse_now]}})

    assert len(result) == 1
    assert marlon_brando in result
    assert al_pacino not in result

    with pytest.raises(AttributeError):
        #this query is ambiguous and hence not supported
        result = backend.filter(Actor,{'movies' : [the_godfather,apocalypse_now]})

    result = backend.filter(Actor,{'movies.title' : 'The Godfather'})

    assert len(result) == 2
    assert marlon_brando in result
    assert al_pacino in result


    result = backend.filter(Actor,{'movies' : {'$in' : [the_godfather,apocalypse_now]}})

    assert len(result) == 2
    assert marlon_brando in result
    assert al_pacino in result

    result = backend.filter(Actor,{'movies.title' : 'The Godfather'})

    assert len(result) == 2
    assert marlon_brando in result
    assert al_pacino in result

    result = backend.filter(Actor,{'movies.director.name' : {'$in' : ['Francis Coppola']}})

    assert len(result) == 2
    assert marlon_brando in result
    assert al_pacino in result

    result = backend.filter(Actor,{'movies.director.favorite_actor.name' : {'$in' : ['Al Pacino']}})

    assert len(result) == 2
    assert marlon_brando in result
    assert al_pacino in result

    result = backend.filter(Actor,{'movies.title' : {'$nin' : ['The Godfather','Apocalypse Now']}})

    assert len(result) == 0

    result = backend.filter(Actor,{'$or' : [{'movies.title' : 'The Godfather'},{'movies.title' : 'Apocalypse Now'}]})

    assert marlon_brando in result
    assert al_pacino in result
    assert len(result) == 2


    result = backend.filter(Movie,{'director' : francis_coppola})

    assert len(result) == 1
    assert the_godfather in result

Example 138

Project: blitzdb
Source File: test_one_to_many.py
View license
def test_basics(backend):

    backend.init_schema()
    backend.create_schema()

    francis_coppola = Director({'name' : 'Francis Coppola'})
    stanley_kubrick = Director({'name' : 'Stanley Kubrick'})
    robert_de_niro = Actor({'name' : 'Robert de Niro','movies' : []})
    harrison_ford = Actor({'name' : 'Harrison Ford'})
    brian_de_palma = Director({'name' : 'Brian de Palma'})

    al_pacino = Actor({'name' : 'Al Pacino','movies' : []})

    scarface = Movie({'title' : 'Scarface','director' : brian_de_palma})

    the_godfather = Movie({'title' : 'The Godfather',
                           'director' : francis_coppola})

    space_odyssey = Movie({'title' : '2001 - A space odyssey',
                           'director' : stanley_kubrick})

    clockwork_orange = Movie({'title' : 'A Clockwork Orange',
                              'director' : stanley_kubrick})

    robert_de_niro.movies.append(the_godfather)
    al_pacino.movies.append(the_godfather)
    al_pacino.movies.append(scarface)

    apocalypse_now = Movie({'title' : 'Apocalypse Now'})
    star_wars_v = Movie({'title' : 'Star Wars V: The Empire Strikes Back'})
    harrison_ford.movies = [star_wars_v]

    backend.save(robert_de_niro)
    backend.save(al_pacino)
    backend.save(francis_coppola)
    backend.save(stanley_kubrick)
    backend.save(space_odyssey)
    backend.save(brian_de_palma)
    backend.save(harrison_ford)

    backend.update(stanley_kubrick,{'favorite_actor' : al_pacino})
    backend.update(francis_coppola,{'favorite_actor' : robert_de_niro})
    backend.update(stanley_kubrick,{'best_movie' : space_odyssey})

    backend.save(the_godfather)
    backend.save(clockwork_orange)
    backend.save(scarface)

    backend.commit()

    director = backend.get(Director,{'name' : 'Stanley Kubrick'})

    assert isinstance(director.movies,QuerySet)
    assert len(director.movies) == 2
    assert isinstance(director.best_movie,Movie)
    assert director.best_movie.lazy
    assert isinstance(director.best_movie.best_of_director,Director)
    #this object is lazy
    assert director.best_movie.best_of_director.lazy
    #pk value is not defined since this object is populated through a relation
    assert not 'pk' in director.best_movie.best_of_director.lazy_attributes
    assert director.best_movie.best_of_director
    assert director.best_movie.best_of_director.pk
    #when asking for the pk we had to load the object from the DB using the relational data
    assert not director.best_movie.best_of_director.lazy
    assert director.best_movie.best_of_director == director

    #we test with a movie without a best_of_director relation...
    the_godfather.revert(backend)
    #this should raise an exception
    with pytest.raises(Director.DoesNotExist):
        assert the_godfather.best_of_director.eager

Example 139

Project: zerotest
Source File: test_response_matcher.py
View license
def test_fuzzy_compare():
    matcher = ResponseMatcher(fuzzy_match=True)
    allow_none_matcher = ResponseMatcher(fuzzy_match=True, fuzzy_match_options={"allow_none": True})
    allow_blank_matcher = ResponseMatcher(fuzzy_match=True, fuzzy_match_options={"allow_blank": True})
    r1 = Response(status=200, headers={}, body=json.dumps({"id": 1, "name": "test"}))
    r2 = Response(status=200, headers={}, body=json.dumps({"id": 2, "name": "test", "some_field": 0}))
    # not work if not content-type
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": 1, "name": "test"})
    r1.headers['content-type'] = 'application/json'
    r2.headers['CONTENT-TYPE'] = 'application/json'
    matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": "42", "name": "test42"})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": 42, "name": 42})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": "42", "name": 42})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps({"id": 42, "name": "test42"})
    matcher.match_responses(r1, r2)

    r2.headers['use-less-header'] = True
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.headers.pop('use-less-header')
    r1.body = json.dumps({"id": 1, "name": "test", "followers": []})
    r2.body = json.dumps({"id": 42, "name": "test42", "followers": [1, 2, 3]})
    matcher.match_responses(r1, r2)

    r1.body = json.dumps({"id": 1, "name": "test", "followers": ["1", "2"]})
    r2.body = json.dumps({"id": 42, "name": "test42", "followers": [1, 2, 3]})
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r1.body = json.dumps({"id": 1, "name": "test", "followers": [1, 2]})
    matcher.match_responses(r1, r2)

    r1.body = json.dumps(
        {
            "id": 1, "name": "test", "children":
            [{"id": 2, "name": "test2"}, {"id": 3, "name": "test3"}],
            "parent": {"id": 0, "name": "test0"}
        })
    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}],
            "parent": {"id": 5, "name": "test5"}
        })
    matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}, {"id": "what?"}],
            "parent": {"id": 5, "name": "test5"}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}, {}],
            "parent": {"id": "5", "name": "test5"}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}, {}],
            "parent": {"id": "5", "name": "test5"}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}],
            "parent": {}
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)
    with pytest.warns(FuzzyMatchWarning):
        allow_blank_matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [{"id": 4, "name": "test4"}],
            "parent": None
        })
    with pytest.raises(AssertionError):
        matcher.match_responses(r1, r2)
    with pytest.raises(AssertionError):
        allow_blank_matcher.match_responses(r1, r2)
    with pytest.warns(FuzzyMatchWarning):
        allow_none_matcher.match_responses(r1, r2)

    r2.body = json.dumps(
        {
            "id": 42, "name": "test", "children":
            [],
            "parent": {"id": 5, "name": "test5"}
        })
    matcher.match_responses(r1, r2)

Example 140

Project: tmuxp
Source File: test_cli.py
View license
def test_resolve_dot(tmpdir, homedir, configdir, projectdir, monkeypatch):
    monkeypatch.setenv('HOME', homedir)
    projectdir.join('.tmuxp.yaml').ensure()
    user_config_name = 'myconfig'
    user_config = configdir.join('%s.yaml' % user_config_name).ensure()

    project_config = str(projectdir.join('.tmuxp.yaml'))

    with projectdir.as_cwd():
        expect = project_config
        assert resolve_config('.') == expect
        assert resolve_config('./') == expect
        assert resolve_config('') == expect
        assert resolve_config('../project') == expect
        assert resolve_config('../project/') == expect
        assert resolve_config('.tmuxp.yaml') == expect
        assert resolve_config(
            '../../.tmuxp/%s.yaml' % user_config_name) == str(user_config)
        assert resolve_config('myconfig') == str(user_config)
        assert resolve_config(
            '~/.tmuxp/myconfig.yaml') == str(user_config)

        with pytest.raises(Exception):
            resolve_config('.tmuxp.json')
        with pytest.raises(Exception):
            resolve_config('.tmuxp.ini')
        with pytest.raises(Exception):
            resolve_config('../')
        with pytest.raises(Exception):
            resolve_config('mooooooo')

    with homedir.as_cwd():
        expect = project_config
        assert resolve_config('work/project') == expect
        assert resolve_config('work/project/') == expect
        assert resolve_config('./work/project') == expect
        assert resolve_config('./work/project/') == expect
        assert resolve_config(
            '.tmuxp/%s.yaml' % user_config_name) == str(user_config)
        assert resolve_config(
            './.tmuxp/%s.yaml' % user_config_name) == str(user_config)
        assert resolve_config('myconfig') == str(user_config)
        assert resolve_config(
            '~/.tmuxp/myconfig.yaml') == str(user_config)

        with pytest.raises(Exception):
            resolve_config('')
        with pytest.raises(Exception):
            resolve_config('.')
        with pytest.raises(Exception):
            resolve_config('.tmuxp.yaml')
        with pytest.raises(Exception):
            resolve_config('../')
        with pytest.raises(Exception):
            resolve_config('mooooooo')

    with configdir.as_cwd():
        expect = project_config
        assert resolve_config('../work/project') == expect
        assert resolve_config('../../home/work/project') == expect
        assert resolve_config('../work/project/') == expect
        assert resolve_config(
            '%s.yaml' % user_config_name) == str(user_config)
        assert resolve_config(
            './%s.yaml' % user_config_name) == str(user_config)
        assert resolve_config('myconfig') == str(user_config)
        assert resolve_config(
            '~/.tmuxp/myconfig.yaml') == str(user_config)

        with pytest.raises(Exception):
            resolve_config('')
        with pytest.raises(Exception):
            resolve_config('.')
        with pytest.raises(Exception):
            resolve_config('.tmuxp.yaml')
        with pytest.raises(Exception):
            resolve_config('../')
        with pytest.raises(Exception):
            resolve_config('mooooooo')

    with tmpdir.as_cwd():
        expect = project_config
        assert resolve_config('home/work/project') == expect
        assert resolve_config('./home/work/project/') == expect
        assert resolve_config(
            'home/.tmuxp/%s.yaml' % user_config_name) == str(user_config)
        assert resolve_config(
            './home/.tmuxp/%s.yaml' % user_config_name) == str(user_config)
        assert resolve_config('myconfig') == str(user_config)
        assert resolve_config(
            '~/.tmuxp/myconfig.yaml') == str(user_config)

        with pytest.raises(Exception):
            resolve_config('')
        with pytest.raises(Exception):
            resolve_config('.')
        with pytest.raises(Exception):
            resolve_config('.tmuxp.yaml')
        with pytest.raises(Exception):
            resolve_config('../')
        with pytest.raises(Exception):
            resolve_config('mooooooo')

Example 141

Project: autonomie
Source File: test_treasury_files.py
View license
def test_mail_treasury_files(dbsession, config, get_csrf_request, company_125):
    request = get_csrf_request()
    request.matchdict = {'filetype': 'salaire',
                                 'year': '2010', 'month': '1'}
    view = MailTreasuryFilesView(None, request)
    result_dict = view()
    datas = result_dict['datas']
    assert len(datas.keys()) == 1
    for file_ in datas.values()[0]:
        assert file_['file'].code == file_['company'].code_compta


    form_datas = {
        'force': False,
        'mails': [
            {'company_id': company_125.id, 'attachment': '125_1_test.pdf'},
            {'company_id': company_125.id, 'attachment': '125_2_test.pdf'},
        ],
        'mail_subject': u"Sujet",
        "mail_message": u"Message {company.email} {year} {month}",
    }

    mails = view._prepare_mails(datas, form_datas, get_root_directory(request), '2010', '1')
    assert len(mails) == 2
    assert mails[0]['message'] == u"Message [email protected] 2010 1"
    assert mails[0]['email'] == '[email protected]'

    sent_file = datas[company_125.id][0]['file']
    from autonomie.models.files import store_sent_mail
    history = store_sent_mail(sent_file.path, sent_file.datas, company_125.id)
    dbsession.add(history)

    # Not force and already in history
    form_datas = {
        'force': False,
        'mails': [
            {'company_id': company_125.id, 'attachment': '125_0_test.pdf'},
            {'company_id': company_125.id, 'attachment': '125_1_test.pdf'},
            {'company_id': company_125.id, 'attachment': '125_2_test.pdf'},
            {'company_id': company_125.id, 'attachment': '125_3_test.pdf'},
        ],
        'mail_subject': u"Sujet",
        "mail_message": u"Message {company.email} {year} {month}",
    }

    mails = view._prepare_mails(
        datas, form_datas, get_root_directory(request), '2010', '1')
    assert len(mails) == 3

    # Force and already in history
    form_datas = {
        'force': True,
        'mails': [
            {'company_id': company_125.id, 'attachment': '125_0_test.pdf'},
            {'company_id': company_125.id, 'attachment': '125_1_test.pdf'},
            {'company_id': company_125.id, 'attachment': '125_2_test.pdf'},
            {'company_id': company_125.id, 'attachment': '125_3_test.pdf'},
        ],
        'mail_subject': u"Sujet",
        "mail_message": u"Message {company.email} {year} {month}",
    }

    mails = view._prepare_mails(
        datas, form_datas, get_root_directory(request), '2010', '1')
    assert len(mails) == 4

    # Invalid submitted datas
    form_datas = {
        'force': True,
        'mails': [
            {'company_id': -15, 'attachment': '125_3_test.pdf'},
        ],
        'mail_subject': u"Sujet",
        "mail_message": u"Message {company.email} {year} {month}",
    }

    with pytest.raises(Exception):
        mails = view._prepare_mails(
            datas, form_datas, get_root_directory(request), '2010', '1')

Example 142

Project: sider
Source File: test_set.py
View license
def test_intersection_update(session):
    def reset():
        return session.set(key('test_set_intersection_update'), S('abc'), Set)
    set_ = reset()
    set2 = session.set(key('test_set_intersection_update2'), S('bcd'), Set)
    set3 = session.set(key('test_set_intersection_update3'), S('bef'), Set)
    set_.intersection_update('bcde')
    assert set_ == S('bc')
    reset()
    set_.intersection_update('bcde', 'cdef')
    assert set_ == S('c')
    reset()
    set_.intersection_update(S('bcde'))
    assert set_ == S('bc')
    reset()
    set_.intersection_update(S('bcde'), 'cdef')
    assert set_ == S('c')
    reset()
    set_.intersection_update(S('bcde'), S('cdef'))
    assert set_ == S('c')
    reset()
    set_.intersection_update(set2)
    assert set_ == S('bc')
    reset()
    set_.intersection_update(set2, set3)
    assert set_ == S('b')
    reset()
    set_.intersection_update(set2, set3, 'bcfg')
    assert set_ == S('b')
    reset()
    set_.intersection_update(set2, set3, 'acfg')
    assert set_ == S()
    reset()
    set_ &= S('bcd')
    assert set_ == S('bc')
    reset()
    set_ &= set2
    assert set_ == S('bc')
    reset()
    with raises(TypeError):
        set_ &= 'cde'
    def resetx():
        return session.set(key('test_setx_intersection_update'),
                           S([1, 2, 3]), IntSet)
    setx = resetx()
    sety = session.set(key('test_setx_intersection_update2'),
                       S([2, 3, 4]), IntSet)
    setz = session.set(key('test_setx_intersection_update3'),
                       S([1, 2, 5]), IntSet)
    setx.intersection_update([2, 3, 4])
    assert setx == S([2, 3])
    resetx()
    setx.intersection_update([2, 3, 4], [1, 2, 5])
    assert setx == S([2])
    resetx()
    setx.intersection_update(S([2, 3, 4]))
    assert setx == S([2, 3])
    resetx()
    setx.intersection_update(S([2, 3, 4]), [1, 2, 5])
    assert setx == S([2])
    resetx()
    setx.intersection_update(S([2, 3, 4]), S([1, 2, 5]))
    assert setx == S([2])
    resetx()
    setx.intersection_update(sety)
    assert setx == S([2, 3])
    resetx()
    setx.intersection_update(sety, setz)
    assert setx == S([2])
    resetx()
    setx.intersection_update(sety, setz, [1, 2, 5])
    assert setx == S([2])
    resetx()
    setx &= S([2, 3, 4])
    assert setx == S([2, 3])
    resetx()
    setx &= sety
    assert setx == S([2, 3])
    resetx()
    with raises(TypeError):
        setx &= [3, 4, 5]
    resetx()
    set_.intersection_update(setx)
    assert set_ == S([])
    resetx()
    set_.intersection_update(setx, sety)
    assert set_ == S([])
    resetx()
    set_.intersection_update(set2, setx, sety)
    assert set_ == S([])

Example 143

Project: gaffer
Source File: test_lookup.py
View license
def test_registry_add_job():
    r = Registry()
    c1 = object()
    c2 = object()
    c3 = object()
    c4 = object()
    r.add_node(c1)
    r.identify(c1, "c1", "broadcast", 1.0)
    r.add_node(c2)
    r.identify(c2, "c2", "broadcast2", 1.0)
    r.add_node(c3)
    r.identify(c3, "c3", "broadcast3", 1.0)
    r.add_node(c4)
    r.identify(c4, "c4", "broadcast4", 1.0)

    n1 = r.get_node(c1)
    n2 = r.get_node(c2)
    n3 = r.get_node(c3)
    n4 = r.get_node(c4)

    assert r.sessions() == {}
    assert len(r.jobs()) == 0

    r.add_job(c1, 'a.job1')
    sessions = r.sessions()
    assert 'a' in sessions
    assert 'a.job1' in sessions['a']
    assert len(sessions['a']['a.job1']) == 1
    job = sessions['a']['a.job1'][0]
    assert isinstance(job, RemoteJob)
    assert job.name == 'a.job1'
    assert job.node is n1
    assert len(r.jobs()) == 1
    assert job == r.find_job('a.job1')[0]

    jobs = r.jobs()
    assert list(jobs) == ['a.job1']
    assert jobs['a.job1'][0] == job
    assert len(n1.sessions) == 1
    session = r.find_session('a')
    assert session[0].name == "a.job1"

    with pytest.raises(AlreadyRegistered):
        r.add_job(c1, 'a.job1')

    r.add_job(c2, 'a.job1')
    sessions = r.sessions()
    assert len(sessions['a']['a.job1']) == 2
    job = sessions['a']['a.job1'][1]
    assert isinstance(job, RemoteJob)
    assert job.name == 'a.job1'
    assert job.node is n2
    assert len(r.jobs()) == 1
    jobs = r.jobs()
    assert list(jobs) == ['a.job1']
    assert jobs['a.job1'][1] == job

    r.add_job(c3, 'a.job2')
    sessions = r.sessions()
    assert len(sessions['a']['a.job1']) == 2
    assert len(sessions['a']['a.job2']) == 1
    job = sessions['a']['a.job2'][0]
    assert isinstance(job, RemoteJob)
    assert job.name == 'a.job2'
    assert job.node is n3
    assert len(r.jobs()) == 2
    jobs = r.jobs()
    assert list(jobs) == ['a.job1', 'a.job2']
    assert jobs['a.job2'][0] == job

    r.add_job(c4, 'b.job1')
    sessions = r.sessions()
    assert len(sessions) == 2
    assert len(sessions['b']['b.job1']) == 1
    job = sessions['b']['b.job1'][0]
    assert isinstance(job, RemoteJob)
    assert job.name == 'b.job1'
    assert job.node is n4
    assert len(r.jobs()) == 3
    jobs = r.jobs()
    assert list(jobs) == ['a.job1', 'a.job2', 'b.job1']

    assert jobs['a.job1'][0].node == n1
    assert jobs['a.job1'][1].node == n2
    assert jobs['a.job2'][0].node == n3
    assert jobs['b.job1'][0].node == n4
    return r, c1, c2, c3, c3, n1, n2, n3, n4

Example 144

Project: sider
Source File: test_set.py
View license
def test_difference_update(session):
    def reset():
        return session.set(key('test_set_difference_update'), S('abcd'), Set)
    set_ = reset()
    set2 = session.set(key('test_set_difference_update2'), S('bde1'), Set)
    set3 = session.set(key('test_set_difference_update3'), S('az'), Set)
    set_.difference_update()
    assert set_ == S('abcd')
    reset()
    set_.difference_update(set2)
    assert set_ == S('ac')
    reset()
    set_.difference_update(set2, set3)
    assert set_ == S('c')
    reset()
    set_.difference_update(set2, 'az')
    assert set_ == S('c')
    reset()
    set_.difference_update(set2, S('az'))
    assert set_ == S('c')
    reset()
    set_.difference_update('bdef')
    assert set_ == S('ac')
    reset()
    set_.difference_update('bdef', set3)
    assert set_ == S('c')
    reset()
    set_.difference_update('bdef', 'az')
    assert set_ == S('c')
    reset()
    set_.difference_update('bdef', S('az'))
    assert set_ == S('c')
    reset()
    set_.difference_update(S('bdef'))
    assert set_ == S('ac')
    reset()
    set_.difference_update(S('bdef'), set3)
    assert set_ == S('c')
    reset()
    set_.difference_update(S('bdef'), 'az')
    assert set_ == S('c')
    reset()
    set_.difference_update(S('bdef'), S('az'))
    assert set_ == S('c')
    reset()
    set_ -= set2
    assert set_ == S('ac')
    reset()
    set_ -= S('bdef')
    assert set_ == S('ac')
    reset()
    with raises(TypeError):
        set_ -= 'bdef'
    def resetx():
        return session.set(key('test_setx_difference_update'),
                           S([1, 2, 3, 4]), IntSet)
    setx = resetx()
    sety = session.set(key('test_setx_difference_update2'),
                       S([2, 4, 5, 6]), IntSet)
    setz = session.set(key('test_setx_difference_update3'),
                       S([1, 7]), IntSet)
    setx.difference_update()
    assert setx == S([1, 2, 3, 4])
    resetx()
    setx.difference_update(sety)
    assert setx == S([1, 3])
    resetx()
    setx.difference_update(sety, setz)
    assert setx == S([3])
    resetx()
    setx.difference_update(sety, [1, 7])
    assert setx == S([3])
    resetx()
    setx.difference_update(sety, S([1, 7]))
    assert setx == S([3])
    resetx()
    setx.difference_update([2, 4, 5, 6])
    assert setx == S([1, 3])
    resetx()
    setx.difference_update([2, 4, 5, 6], setz)
    assert setx == S([3])
    resetx()
    setx.difference_update([2, 4, 5, 6], [1, 7])
    assert setx == S([3])
    resetx()
    setx.difference_update([2, 4, 5, 6], S([1, 7]))
    assert setx == S([3])
    resetx()
    setx.difference_update(S([2, 4, 5, 6]))
    assert setx == S([1, 3])
    resetx()
    setx.difference_update(S([2, 4, 5, 6]), [1, 7])
    assert setx == S([3])
    resetx()
    setx.difference_update(S([2, 4, 5, 6]), S([1, 7]))
    assert setx == S([3])
    resetx()
    setx.difference_update(['1', '2', 3])
    assert setx == S([1, 2, 4])
    resetx()
    setx -= sety
    assert setx == S([1, 3])
    resetx()
    setx -= S([2, 4, 5, 6])
    assert setx == S([1, 3])
    resetx()
    with raises(TypeError):
        setx - [2, 4, 5, 6]
    # mismatched value_type NInt vs. Bulk:
    reset()
    resetx()
    set2.difference_update(setx)
    assert set2 == S('bde1')
    reset()
    set2.difference_update(setx, setz)
    assert set2 == S('bde1')
    reset()
    resetx()
    setx.difference_update(set2)
    assert setx == S([1, 2, 3, 4])
    resetx()
    setx.difference_update(set2, set3)
    assert setx == S([1, 2, 3, 4])

Example 145

Project: bayeslite
Source File: test_nig_normal.py
View license
def test_nig_normal_latent_smoke():
    with bayesdb_open(':memory:') as bdb:
        bayesdb_register_metamodel(bdb, NIGNormalMetamodel())
        bdb.sql_execute('create table t(x)')
        for x in xrange(100):
            bdb.sql_execute('insert into t(x) values(?)', (x,))
        bdb.execute('create population p for t(x numerical)')
        bdb.execute('create generator g0 for p using nig_normal')
        bdb.execute('''
            create generator g1 for p using nig_normal(xe deviation(x))
        ''')
        bdb.execute('initialize 1 model for g0')
        bdb.execute('analyze g0 for 1 iteration wait')
        bdb.execute('initialize 1 model for g1')
        bdb.execute('analyze g1 for 1 iteration wait')

        # PROBABILITY OF x = v
        bdb.execute('estimate probability of x = 50 within p').fetchall()
        with pytest.raises(BQLError):
            bdb.execute('estimate probability of xe = 1 within p').fetchall()
        with pytest.raises(BQLError):
            bdb.execute('''
                estimate probability of xe = 1 within p modelled by g0
            ''').fetchall()
        bdb.execute('''
            estimate probability of xe = 1 within p modelled by g1
        ''').fetchall()

        # PREDICTIVE PROBABILITY OF x
        bdb.execute('estimate predictive probability of x from p').fetchall()
        with pytest.raises(ValueError):
            bdb.execute(
                'estimate predictive probability of xe from p').fetchall()
        with pytest.raises(ValueError):
            bdb.execute('''
                estimate predictive probability of xe from p modelled by g0
            ''').fetchall()
        for r, p_xe in bdb.execute('''
            estimate rowid, predictive probability of xe from p modelled by g1
        '''):
            assert p_xe is None, 'rowid %r p(xe) %r' % (r, p_xe)

        # INFER/PREDICT
        bdb.execute(
            'INFER EXPLICIT PREDICT x CONFIDENCE x_c FROM p').fetchall()
        with pytest.raises(ValueError):
            bdb.execute(
                'INFER EXPLICIT PREDICT xe CONFIDENCE xe_c FROM p').fetchall()
        with pytest.raises(ValueError):
            bdb.execute('''
                INFER EXPLICIT PREDICT xe CONFIDENCE xe_c FROM p
                    MODELLED BY g0
            ''').fetchall()
        bdb.execute('''
            INFER EXPLICIT PREDICT xe CONFIDENCE xe_c FROM p
                MODELLED BY g1
        ''').fetchall()

        # SIMULATE x
        bdb.execute('simulate x from p limit 1').fetchall()
        with pytest.raises(BQLError):
            bdb.execute('simulate x, xe from p limit 1').fetchall()
        with pytest.raises(BQLError):
            bdb.execute(
                'simulate x, xe from p modelled by g0 limit 1').fetchall()
        bdb.execute('simulate x, xe from p modelled by g1 limit 1').fetchall()

        assert 100 == len(bdb.execute('''
            estimate similarity from pairwise p limit 100
        ''').fetchall())
        assert 1 == len(bdb.execute('''
            estimate similarity from pairwise p modelled by g0 limit 1
        ''').fetchall())
        assert 1 == len(bdb.execute('''
            estimate similarity with respect to (x)
                from pairwise p modelled by g0 limit 1
        ''').fetchall())
        with pytest.raises(BQLError):
            assert 1 == len(bdb.execute('''
                estimate similarity with respect to (xe)
                    from pairwise p modelled by g0 limit 1
            ''').fetchall())
        with pytest.raises(BQLError):
            assert 1 == len(bdb.execute('''
                estimate similarity with respect to (x, xe)
                    from pairwise p modelled by g0 limit 1
            ''').fetchall())
        assert 1 == len(bdb.execute('''
            estimate similarity from pairwise p modelled by g1 limit 1
        ''').fetchall())
        assert 1 == len(bdb.execute('''
            estimate similarity with respect to (xe)
                from pairwise p modelled by g1 limit 1
        ''').fetchall())
        assert 1 == len(bdb.execute('''
            estimate similarity with respect to (x, xe)
                from pairwise p modelled by g1 limit 1
        ''').fetchall())

        bdb.execute('drop models from g0')
        bdb.execute('drop generator g0')
        bdb.execute('drop models from g1')
        bdb.execute('drop generator g1')
        bdb.execute('drop population p')
        bdb.execute('drop table t')

Example 146

View license
def test_tonality():

    duration = 60.0
    fs = 10025.0
    samples = int(fs*duration)
    times = np.arange(samples)/fs
    
    signal = Signal(np.sin(2.0*np.pi*1000.0*times), fs)  

    tonality = Tonality(signal, signal.fs)
    
    # Test methods before analysis
    tonality.spectrum
    tonality.plot_spectrum()
    
    tonality.frequency_resolution
    tonality.effective_analysis_bandwidth

    # No values yet, cannot print overview.
    with pytest.raises(ValueError):
        print(tonality.overview())
    tonality.results_as_dataframe()
    
    assert len(list(tonality.noise_pauses)) == 0
    assert len(list(tonality.tones)) == 0
    assert len(list(tonality.critical_bands)) == 0
    
    # Perform analysis
    tonality.determine_noise_pauses().analyse()
    
    assert len(list(tonality.noise_pauses)) == 1
    assert len(list(tonality.tones)) == 1
    assert len(list(tonality.critical_bands)) == 1
    
    tonality.critical_band_at(900.0)
    
    tonality.dominant_tone
    print(tonality.overview())
    tonality.results_as_dataframe()
    tonality.plot_results()
    


#Target = collections.namedtuple('Target', ['tonal_level', 
                                           #'masking_noise_level', 
                                           #'tonal_audibility', 
                                           #'adjustment'])
#TARGET_DATA = {
    #'example1': Target(46.7, 37.3, 13.7, 6.0),
    #'example2': Target(54.1, 45.2, 11.1, 6.0),
    #'example3': Target(54.6, 45.5, 10.6, 6.0),
    ##'example4': Target(53.6, 45.5, 10.7, 6.0), # Time-varying test.
    #}

#@pytest.fixture(params=TARGET_DATA.keys())
#def example(request):
    #return request.param

#def test_verify_standard_examples(example):
    #"""Verify against the examples shown in the standard.
    
       
    #.. note:: The FTP server is slow. Be patient.
    
    #For the examples in the standard:
    #- Number of spectra: 350
    #- Signal duration: 2 minutes
    #- Window: Hanning
    #- Averaging: Linear
    #- Effective analysis bandwidth: 4.39 Hz
    
    #The sample frequency is 44.1 kHz. Dividing this sample frequency by 
    #results in 360 spectra and an effective analysis bandwidth of 
    #4.5 Hz, which is very close to the example.

    #"""
    #from ftplib import FTP
    #import tempfile
    #import os
    
    #FOLDER = 'THP wave files/'
    #FILES = {'calibration' : 'cal. 93,8 dB.wav',
             #'example1': 'sam 03 eks 1.wav',
             #'example2': 'sam 03 eks 2.wav',
             #'example3': 'sam 03 eks 3.wav',
             #'example4': 'sam 03 eks 4.wav',
             #}
    #CHANNEL = 0
    
    #fs = 44100.0
    #duration = 120.0
    #samples = duration*fs
    #spectra = 350
    #bins = samples // spectra
    
    ## Obtain data from Delta
    ## https://noiselabdk.wordpress.com/demo-download/
    ##hostname = 'ftp.delta.dk'
    ##remote_data = 'Demo Files/Tone wave files.zip'
    ##username = 'nlpublic'
    ##password = 'noiselab'
    ##local_data = 'data.zip'

    ##with tempfile.TemporaryDirectory() as directory:
        ##ftp = FTP(hostname, username, password)
        ##with open(local_data, 'wb') as target_file:
            ##ftp.retrbinary('RETR %s' % remote_data, target_file.write)

    

    #calibration_at = 93.8 # decibel
    #signal = Signal.from_wav(os.path.join(FOLDER, FILES['calibration']))[CHANNEL]
    #calibration_factor = 10.0**((calibration_at - signal.leq())/20.0)
    
    #target = TARGET_DATA[example]
    #filename = os.path.join(FOLDER, FILES[example])
    
    #signal = Signal.from_wav(filename)[CHANNEL].pick(0.0, 60.0).weigh('A') * calibration_factor
    #tonality = Tonality(signal, signal.fs, bins=bins)
    #tonality.determine_noise_pauses().analyse()        

    #cb = tonality.dominant_tone.critical_band
    #print(tonality.results())
    #assert abs(tonality.dominant_tone.tone_level - target.tonal_level) < 1.0
    #assert abs(cb.masking_noise_level - target.masking_noise_level) < 1.0
    #assert abs(cb.tonal_audibility - target.tonal_audibility) < 1.0
    #assert abs(cb.adjustment - target.adjustment) < 1.0
    

Example 147

Project: sqlalchemy-imageattach
Source File: s3_test.py
View license
@mark.parametrize(('underlying_prefix', 'overriding_prefix'), [
    ('under', 'over'),
    ('', '')
])
@mark.slow
def test_s3_sandbox_store(underlying_prefix, overriding_prefix,
                          s3_sandbox_store_getter):
    s3 = s3_sandbox_store_getter(underlying_prefix=underlying_prefix,
                                 overriding_prefix=overriding_prefix)
    under = s3.underlying
    over = s3.overriding
    id_offset = uuid.uuid1().int
    if id_offset % 2:  # id_offset is always even
        id_offset -= 1
    if not underlying_prefix:
        id_offset *= -1
    # Store a fixture image for underlying store
    under_id = id_offset + 1
    under_image = TestingImage(thing_id=under_id, width=405, height=640,
                               mimetype='image/jpeg', original=True,
                               created_at=utcnow())
    image_path = os.path.join(sample_images_dir, 'iu.jpg')
    with open(image_path, 'rb') as image_file:
        expected_data = image_file.read()
        image_file.seek(0)
        under.store(under_image, image_file)
    # Underlying images have to be logically shown through sandbox
    with s3.open(under_image) as actual:
        actual_data = actual.read()
    assert expected_data == actual_data
    expected_url = under.get_url('testing', under_id, 405, 640, 'image/jpeg')
    actual_url = s3.locate(under_image)
    assert remove_query(expected_url) == remove_query(actual_url)
    # Store an image to sandbox store
    over_id = id_offset + 2
    image = TestingImage(thing_id=over_id, width=405, height=640,
                         mimetype='image/jpeg', original=True,
                         created_at=utcnow())
    image_path = os.path.join(sample_images_dir, 'iu.jpg')
    with open(image_path, 'rb') as image_file:
        s3.store(image, image_file)
    # Image has to be logically stored
    with s3.open(image) as actual:
        actual_data = actual.read()
    assert expected_data == actual_data
    expected_url = over.get_url('testing', over_id, 405, 640, 'image/jpeg')
    actual_url = s3.locate(image)
    assert remove_query(expected_url) == remove_query(actual_url)
    # Image has to be physically stored into the overriding store
    with over.open(image) as actual:
        actual_data = actual.read()
    assert expected_data == actual_data
    expected_url = over.get_url('testing', over_id, 405, 640, 'image/jpeg')
    actual_url = s3.locate(image)
    assert remove_query(expected_url) == remove_query(actual_url)
    # Images must not be physically stored into the underlying store
    with raises(IOError):
        under.open(image)
    # Deletion must not touch underlying
    s3.delete(under_image)
    with raises(IOError):
        s3.open(under_image)
    with under.open(under_image) as actual:
        actual_data = actual.read()
    assert expected_data == actual_data
    expected_url = over.get_url('testing', under_id, 405, 640, 'image/jpeg')
    actual_url = s3.locate(under_image)
    assert remove_query(expected_url) == remove_query(actual_url)
    # Clean up fixtures
    if underlying_prefix and overriding_prefix:
        no_prefix = s3_sandbox_store_getter()
        with raises(IOError):
            no_prefix.open(image)
    under.delete(under_image)

Example 148

View license
def test_agenttools_IterationAgent_depth_first_05():
    r'''Uncapped depth-first search with duplicates allowed.
    '''

    # LEFT-TO-RIGHT #

    iterator = iterate(staff[2]).depth_first(
        capped=False,
        unique=False,
        )

    assert next(iterator) is staff[2]
    assert next(iterator) is staff[2][0]
    assert next(iterator) is staff[2][0][0]
    assert next(iterator) is staff[2][0]
    assert next(iterator) is staff[2][0][1]
    assert next(iterator) is staff[2][0]
    assert next(iterator) is staff[2]
    assert next(iterator) is staff[2][1]
    assert next(iterator) is staff[2][1][0]
    assert next(iterator) is staff[2][1]
    assert next(iterator) is staff[2][1][1]
    assert next(iterator) is staff[2][1]
    assert next(iterator) is staff[2]
    assert next(iterator) is staff
    assert next(iterator) is staff[3]
    assert next(iterator) is staff
    assert next(iterator) is staff[4]
    assert next(iterator) is staff
    assert pytest.raises(StopIteration, 'next(iterator)')

    r'''
    Container(Voice(d'8, ef'8), Voice(e'8, f'8))
    Voice(d'8, ef'8)
    d'8
    Voice(d'8, ef'8)
    ef'8
    Voice(d'8, ef'8)
    Container(Voice(d'8, ef'8), Voice(e'8, f'8))
    Voice(e'8, f'8)
    e'8
    Voice(e'8, f'8)
    f'8
    Voice(e'8, f'8)
    Container(Voice(d'8, ef'8), Voice(e'8, f'8))
    Staff{5}
    fs'8
    Staff{5}
    iterator'8
    Staff{5}
    '''

    # RIGHT-TO-LEFT #

    iterator = iterate(staff[2]).depth_first(
        capped=False,
        direction=Right,
        unique=False,
        )

    assert next(iterator) is staff[2]
    assert next(iterator) is staff[2][1]
    assert next(iterator) is staff[2][1][1]
    assert next(iterator) is staff[2][1]
    assert next(iterator) is staff[2][1][0]
    assert next(iterator) is staff[2][1]
    assert next(iterator) is staff[2]
    assert next(iterator) is staff[2][0]
    assert next(iterator) is staff[2][0][1]
    assert next(iterator) is staff[2][0]
    assert next(iterator) is staff[2][0][0]
    assert next(iterator) is staff[2][0]
    assert next(iterator) is staff[2]
    assert next(iterator) is staff
    assert next(iterator) is staff[1]
    assert next(iterator) is staff
    assert next(iterator) is staff[0]
    assert next(iterator) is staff
    assert pytest.raises(StopIteration, 'next(iterator)')

    r'''

Example 149

Project: dask
Source File: test_arithmetics_reduction.py
View license
def test_frame_series_arithmetic_methods():
    pdf1 = pd.DataFrame({'A': np.arange(10),
                         'B': [np.nan, 1, 2, 3, 4] * 2,
                         'C': [np.nan] * 10,
                         'D': np.arange(10)},
                        index=list('abcdefghij'), columns=list('ABCD'))
    pdf2 = pd.DataFrame(np.random.randn(10, 4),
                        index=list('abcdefghjk'), columns=list('ABCX'))
    ps1 = pdf1.A
    ps2 = pdf2.A
    ps3 = pd.Series(np.random.randn(10), index=list('ABCDXabcde'))

    ddf1 = dd.from_pandas(pdf1, 2)
    ddf2 = dd.from_pandas(pdf2, 2)
    ds1 = ddf1.A
    ds2 = ddf2.A

    s = dd.core.Scalar({('s', 0): 4}, 's', 'i8')

    for l, r, el, er in [(ddf1, ddf2, pdf1, pdf2), (ds1, ds2, ps1, ps2),
                         (ddf1.repartition(['a', 'f', 'j']), ddf2, pdf1, pdf2),
                         (ds1.repartition(['a', 'b', 'f', 'j']), ds2, ps1, ps2),
                         (ddf1, ddf2.repartition(['a', 'k']), pdf1, pdf2),
                         (ds1, ds2.repartition(['a', 'b', 'd', 'h', 'k']), ps1, ps2),
                         (ddf1, 3, pdf1, 3), (ds1, 3, ps1, 3),
                         (ddf1, s, pdf1, 4), (ds1, s, ps1, 4)]:
        # l, r may be repartitioned, test whether repartition keeps original data
        assert_eq(l, el)
        assert_eq(r, er)

        assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
        assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
        assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
        assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
        assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
        assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
        assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
        assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))

        assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
        assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
        assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
        assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
        assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
        assert_eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
        assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
        assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))

    for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
        assert_eq(l, el)
        assert_eq(r, er)

        # must specify axis=0 to add Series to each column
        # axis=1 is not supported (add to each row)
        assert_eq(l.add(r, axis=0), el.add(er, axis=0))
        assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))
        assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))
        assert_eq(l.div(r, axis=0), el.div(er, axis=0))
        assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
        assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
        assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))
        assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))

        assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))
        assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
        assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
        assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
        assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
        assert_eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
        assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
        assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))

        pytest.raises(ValueError, lambda: l.add(r, axis=1))

    for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:
        assert_eq(l, el)
        assert_eq(r, er)

        for axis in [0, 1, 'index', 'columns']:
            assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))
            assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
            assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
            assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))
            assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
            assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
            assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
            assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))

            assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
            assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
            assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
            assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
            assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
            assert_eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
            assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
            assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))

Example 150

Project: datreant.core
Source File: test_collections.py
View license
        def test_tags_set_behavior(self, collection, tmpdir):
            with tmpdir.as_cwd():

                t1 = dtr.Treant('maple')
                t2 = dtr.Treant('pine')
                t3 = dtr.Treant('juniper')
                t1.tags.add({'tree', 'new jersey', 'deciduous'})
                t2.tags.add({'tree', 'new york', 'evergreen'})
                t3.tags.add({'shrub', 'new york', 'evergreen'})
                collection.add(t1, t2, t3)
                trees = dtr.Bundle('maple', 'pine')
                evergreens = dtr.Bundle('pine', 'juniper')
                tags = collection.tags

                assert len(tags.any) == 6

                # test equality: __eq__ (==)
                assert t1.tags == {'tree', 'new jersey', 'deciduous'}
                assert t2.tags == {'tree', 'new york', 'evergreen'}
                assert t3.tags == {'shrub', 'new york', 'evergreen'}

                # test subset: __lt__ (<)
                assert not t1.tags < {'tree', 'new jersey', 'deciduous'}
                assert tags < {'tree', 'new jersey', 'deciduous'}
                assert t1.tags < tags.any
                assert t2.tags < tags.any
                assert t3.tags < tags.any

                # test difference: __sub__ (-)
                assert t1.tags - {'tree'} == {'new jersey', 'deciduous'}
                assert trees.tags - {'tree'} == set()

                # test right difference: __rsub__ (-)
                evergreen_ny_shrub = {'evergreen', 'new york', 'shrub'}
                dec_nj_sh = {'deciduous', 'new jersey', 'shrub'}
                assert tags.any - t1.tags == evergreen_ny_shrub
                assert tags.any - evergreens.tags - trees.tags == dec_nj_sh
                assert {'tree'} - trees.tags == set()

                # test union: __or__ (|)
                evergreen_ny_shrub = {'evergreen', 'new york', 'shrub'}
                assert evergreens.tags | t3.tags == evergreen_ny_shrub
                assert t1.tags | t2.tags | t3.tags == tags.any

                # test right union: __ror__ (|)
                assert {'shrub'} | evergreens.tags == evergreen_ny_shrub
                assert t3.tags | {'tree'} == {'tree'} | t3.tags

                # test intersection: __and__ (&)
                evergreen_ny = {'evergreen', 'new york'}
                assert evergreens.tags & t3.tags == evergreen_ny
                assert t1.tags & t2.tags & t3.tags == tags.all

                # test right intersection: __rand__ (&)
                assert evergreen_ny_shrub & evergreens.tags == evergreen_ny
                assert t3.tags & {'shrub'} == {'shrub'} & t3.tags

                # test symmetric difference: __xor__ (^)
                evergreen_ny_tree = {'evergreen', 'new york', 'tree'}
                assert trees.tags ^ evergreens.tags == evergreen_ny_tree
                assert evergreens.tags ^ t3.tags == {'shrub'}
                assert t1.tags ^ t2.tags ^ t3.tags == dec_nj_sh

                # test right symmetric difference: __rxor__ (^)
                assert {'new york'} ^ evergreens.tags == {'evergreen'}
                assert {'shrub'} ^ trees.tags == t2.tags ^ t3.tags

                # type_error_msg = "Operands must be AggTags, Tags, or a set."
                with pytest.raises(TypeError) as e:
                    ['tree'] == trees.tags
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    trees.tags < ['tree']
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    ['tree'] - trees.tags
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    trees.tags - ['tree']
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    ['tree'] | trees.tags
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    trees.tags | ['tree']
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    ['tree'] & trees.tags
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    trees.tags & ['tree']
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    ['tree'] ^ trees.tags
                # assert e.value.message == type_error_msg
                with pytest.raises(TypeError) as e:
                    trees.tags ^ ['tree']