pytest.raises

Here are the examples of the python api pytest.raises taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

199 Examples 7

Example 51

Project: bayeslite
Source File: test_read_csv.py
View license
def test_read_csv():
    with bayeslite.bayesdb_open(builtin_metamodels=False) as bdb:

        f = StringIO.StringIO(csv_data)
        with pytest.raises(ValueError):
            # Table must already exist for create=False.
            bayeslite.bayesdb_read_csv(bdb, 't', f, header=False, create=False,
                ifnotexists=False)

        f = StringIO.StringIO(csv_data)
        with pytest.raises(ValueError):
            # Must pass create=True for ifnotexists=True.
            bayeslite.bayesdb_read_csv(bdb, 't', f, header=False, create=False,
                ifnotexists=True)

        f = StringIO.StringIO(csv_data)
        with pytest.raises(ValueError):
            # Must pass create=False for header=False.
            bayeslite.bayesdb_read_csv(bdb, 't', f, header=False, create=True,
                ifnotexists=False)

        f = StringIO.StringIO(csv_data)
        with pytest.raises(ValueError):
            # Must pass create=False for header=False.
            bayeslite.bayesdb_read_csv(bdb, 't', f, header=False, create=True,
                ifnotexists=True)

        f = StringIO.StringIO(csv_hdrdata)
        with pytest.raises(ValueError):
            # Table must already exist for create=False.
            bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=False,
                ifnotexists=False)

        f = StringIO.StringIO(csv_hdrdata)
        with pytest.raises(ValueError):
            # Must pass create=True for ifnotexists=True.
            bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=False,
                ifnotexists=True)

        f = StringIO.StringIO(csv_hdrdata)
        with pytest.raises(ValueError):
            with bdb.savepoint():
                # Table must not exist if ifnotexists=False.
                bdb.sql_execute('CREATE TABLE t(x)')
                bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
                    create=True, ifnotexists=False)
        with pytest.raises(IOError):
            # Table must have no empty values in header.
            csv_hdrdata_prime = csv_hdrdata[1:]
            f = StringIO.StringIO(csv_hdrdata_prime)
            with bdb.savepoint():
                bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
                    create=True, ifnotexists=False)

        f = StringIO.StringIO(csv_hdrdata)
        bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True,
            ifnotexists=False)
        data = bdb.sql_execute('SELECT * FROM t').fetchall()
        assert data == [
            # XXX Would be nice if the NaN could actually be that, or
            # at least None/NULL.
            (1,2,3,'foo','bar',u'nan',u'',u'quagga'),
            (4,5,6,'baz','quux',42.0,u'',u'eland'),
            (7,8,6,'zot','mumble',87.0,u'zoot',u'caribou'),
        ]

        f = StringIO.StringIO(csv_hdr)
        bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True,
            ifnotexists=True)
        assert bdb.sql_execute('SELECT * FROM t').fetchall() == data
        assert cursor_value(bdb.sql_execute('SELECT sql FROM sqlite_master'
                    ' WHERE name = ?', ('t',))) == \
            'CREATE TABLE "t"' \
            '("a" NUMERIC,"b" NUMERIC,"c" NUMERIC,"name" NUMERIC,' \
            '"nick" NUMERIC,"age" NUMERIC,"muppet" NUMERIC,"animal" NUMERIC)'

        f = StringIO.StringIO(csv_data)
        bayeslite.bayesdb_read_csv(bdb, 't', f, header=False, create=False,
            ifnotexists=False)
        assert bdb.sql_execute('SELECT * FROM t').fetchall() == data + data

        f = StringIO.StringIO(csv_hdrdata)
        bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=False,
            ifnotexists=False)
        assert bdb.sql_execute('SELECT * FROM t').fetchall() == \
            data + data + data
        with tempfile.NamedTemporaryFile(prefix='bayeslite') as temp:
            with open(temp.name, 'w') as f:
                f.write(csv_hdrdata)
            bayeslite.bayesdb_read_csv_file(bdb, 't', temp.name, header=True,
                create=False, ifnotexists=False)
        assert bdb.sql_execute('SELECT * FROM t').fetchall() == \
            data + data + data + data

        # Test the BQL CREATE TABLE FROM <csv-file> syntax.
        f = StringIO.StringIO(csv_hdrdata)
        with tempfile.NamedTemporaryFile(prefix='bayeslite') as temp:
            with open(temp.name, 'w') as f:
                f.write(csv_hdrdata)
            bdb.execute('CREATE TABLE t2 FROM \'%s\'' % (temp.name,))
            assert bdb.sql_execute('SELECT * FROM t2').fetchall() == data

        # Trying to read a csv with an empty column name should fail.
        csv_header_corrupt = csv_hdr.replace('a,b',',')
        csv_hdrdata_corrupt = csv_header_corrupt + csv_data
        with tempfile.NamedTemporaryFile(prefix='bayeslite') as temp:
            with open(temp.name, 'w') as f:
                f.write(csv_hdrdata_corrupt)
            with pytest.raises(IOError):
                bayeslite.bayesdb_read_csv_file(
                    bdb, 't3', temp.name, header=True, create=True)

Example 52

Project: crosscat
Source File: test_pred_prob.py
View license
def test_predictive_probability_unobserved(seed=0):
    # This function tests the predictive probability for the joint distirbution.
    # Throughout, we will check that the result is the same for the joint and
    # simple calls.
    T, M_r, M_c, X_L, X_D, engine = quick_le(seed)

    # Hypothetical column number should throw an error.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 10, 2)]
    Y = []
    with pytest.raises(ValueError):
        vals = engine.predictive_probability(M_c, X_L, X_D, Y, Q)

    # Inconsistent row numbers should throw an error.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS-1, 10, 2)]
    Y = []
    with pytest.raises(ValueError):
        vals = engine.predictive_probability(M_c, X_L, X_D, Y, Q)

    # Duplicate column numbers should throw an error,
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 1, 2)]
    Y = []
    with pytest.raises(ValueError):
        val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)

    # Different row numbers should throw an error.
    Q = [(N_ROWS, 0, 1.5), (N_ROWS+1, 1, 2)]
    Y = [(N_ROWS, 1, 1.5), (N_ROWS, 2, 3)]
    with pytest.raises(Exception):
        val = engine.predictive_probability(M_c, X_L, X_D, Y, Q[0])

    # Inconsistent with constraints should be negative infinity.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 0, 1.3)]
    Y = [(N_ROWS, 1, 1.6)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val == -float('inf')
    assert isinstance(val, float)

    # Consistent with constraints should be log(1) == 0.
    Q = [(N_ROWS, 0, 1.3)]
    Y = [(N_ROWS, 0, 1.3)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val == 0

    # Consistent with constraints should not impact other queries.
    Q = [(N_ROWS, 1, 1.5), (N_ROWS, 0, 1.3)]
    Y = [(N_ROWS, 1, 1.5), (N_ROWS, 2, 3)]
    val_0 = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    val_1 = engine.predictive_probability(M_c, X_L, X_D, Y, Q[1:])
    assert val_0 == val_1

    # Predictive and simple should be the same in univariate case (cont).
    Q = [(N_ROWS, 0, 0.5)]
    Y = [(0, 0, 1), (N_ROWS//2, 4, 5), (N_ROWS, 1, 0.5), (N_ROWS+1, 0, 1.2)]
    val_0 = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    val_1 = engine.simple_predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val_0 == val_1

    # Predictive and simple should be the same in univariate case (disc).
    Q = [(N_ROWS, 2, 1)]
    Y = [(0, 0, 1), (N_ROWS//2, 4, 5), (N_ROWS, 1, 0.5), (N_ROWS+1, 0, 1.2)]
    val_0 = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    val_1 = engine.simple_predictive_probability(M_c, X_L, X_D, Y, Q)
    assert val_0 == val_1

    # Do some full joint queries, all on the same row.
    Q = [(N_ROWS, 3, 4), (N_ROWS, 4, 1.3)]
    Y = [(N_ROWS, 0, 1), (N_ROWS, 1, -0.7), (N_ROWS, 2, 3)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert isinstance(val, float)

    Q = [(N_ROWS, 0, 1), (N_ROWS, 1, -0.7), (N_ROWS, 2, 3)]
    Y = [(N_ROWS, 3, 4), (N_ROWS, 4, 1.3)]
    val = engine.predictive_probability(M_c, X_L, X_D, Y, Q)
    assert isinstance(val, float)

Example 53

Project: schematics
Source File: test_datastructures.py
View license
def test_context():

    class FooContext(Context):
        _fields = ('x', 'y', 'z')

    assert bool(FooContext()) is True

    c = FooContext(x=1, y=2)
    assert c.__dict__ == dict(x=1, y=2)

    with pytest.raises(ValueError):
        FooContext(a=1)

    with pytest.raises(Exception):
        c.x = 0

    c.z = 3
    assert c.__dict__ == dict(x=1, y=2, z=3)

    c = FooContext._new(1, 2, 3)
    assert c.__dict__ == dict(x=1, y=2, z=3)

    with pytest.raises(TypeError):
        FooContext._new(1, 2, 3, 4)

    d = c._branch()
    assert d is c

    d = c._branch(x=None)
    assert d is c

    d = c._branch(x=0)
    assert d is not c
    assert d.__dict__ == dict(x=0, y=2, z=3)

    e = d._branch(x=0)
    assert e is d

    c = FooContext(x=1, y=2)
    c._setdefaults(dict(x=9, z=9))
    assert c.__dict__ == dict(x=1, y=2, z=9)

    c = FooContext(x=1, y=2)
    c._setdefaults(FooContext(x=9, z=9))
    assert c.__dict__ == dict(x=1, y=2, z=9)

Example 54

Project: overloading.py
Source File: test_overloading.py
View license
@requires_typing
def test_typing_tuple():

    @overloaded
    def f(arg: Tuple[int, str]):
        return int, str

    assert f.__complex_positions == {}
    assert f.__complex_parameters == {}

    @overloads(f)
    def f(arg: Tuple[str, int]):
        return str, int

    assert f.__complex_positions == {0: 8}
    assert f.__complex_parameters == {'arg': 8}

    for _ in range(rounds):
        assert f((1, b)) == (int, str)
        assert f((a, 2)) == (str, int)
        with pytest.raises(TypeError):
            f((1, 2))
        with pytest.raises(TypeError):
            f(())

    @overloads(f)
    def f(arg: Tuple):
        return ()

    for _ in range(rounds):
        assert f((1, b)) == (int, str)
        assert f((a, 2)) == (str, int)
        assert f((1, 2)) == ()
        assert f(())     == ()

    @overloaded
    def f(arg: Tuple[int, ...]):
        return int

    @overloads(f)
    def f(arg: Tuple[str, ...]):
        return str

    for _ in range(rounds):
        assert f((1, 2, 3)) == int
        assert f((a, b, c)) == str
        with pytest.raises(TypeError):
            f((x, 2, 3))
        if overloading.DEBUG:
            with pytest.raises(AssertionError):
                f(())

    @overloads(f)
    def f(arg: Tuple):
        return ()

    for _ in range(rounds):
        assert f((1, 2, 3)) == int
        assert f((a, b, c)) == str
        assert f((x, 2, 3)) == ()
        if overloading.DEBUG:
            with pytest.raises(AssertionError):
                f(())

Example 55

Project: pydelphin
Source File: derivation_test.py
View license
    def test_fromstring(self):
        with pytest.raises(ValueError): D.from_string('')
        # root with no children
        with pytest.raises(ValueError): D.from_string('(some-root)')
        # does not start with `(` or end with `)`
        with pytest.raises(ValueError):
            D.from_string(' (1 some-thing -1 -1 -1 ("token"))')
        with pytest.raises(ValueError):
            D.from_string(' (1 some-thing -1 -1 -1 ("token")) ')
        # uneven parens
        with pytest.raises(ValueError):
            D.from_string('(1 some-thing -1 -1 -1 ("token")')
        # ok
        t = D.from_string('(1 some-thing -1 -1 -1 ("token"))')
        assert t.id == 1
        assert t.entity == 'some-thing'
        assert t.score == -1.0
        assert t.start == -1
        assert t.end == -1
        assert t.daughters == [T('token')]
        # newlines in tree
        t = D.from_string('''(1 some-thing -1 -1 -1
                                ("token"))''')
        assert t.id == 1
        assert t.entity == 'some-thing'
        assert t.score == -1.0
        assert t.start == -1
        assert t.end == -1
        assert t.daughters == [T('token')]
        # LKB-style terminals
        t = D.from_string('''(1 some-thing -1 -1 -1
                                ("to ken" 1 2))''')
        assert t.id == 1
        assert t.entity == 'some-thing'
        assert t.score == -1.0
        assert t.start == -1
        assert t.end == -1
        assert t.daughters == [T('to ken')]  # start/end ignored
        # TFS-style terminals
        t = D.from_string(r'''(1 some-thing -1 -1 -1
                                ("to ken" 2 "token [ +FORM \"to\" ]"
                                          3 "token [ +FORM \"ken\" ]"))''')
        assert t.id == 1
        assert t.entity == 'some-thing'
        assert t.score == -1.0
        assert t.start == -1
        assert t.end == -1
        assert t.daughters == [
            T('to ken', [Tk(2, r'token [ +FORM \"to\" ]'),
                         Tk(3, r'token [ +FORM \"ken\" ]')])
        ]
        # longer example
        t = D.from_string(r'''(root
            (1 some-thing 0.4 0 5
                (2 a-lex 0.8 0 1
                    ("a" 1 "token [ +FORM \"a\" ]"))
                (3 bcd-lex 0.5 2 5
                    ("bcd" 2 "token [ +FORM \"bcd\" ]")))
        )''')
        assert t.entity == 'root'
        assert len(t.daughters) == 1
        top = t.daughters[0]
        assert top.id == 1
        assert top.entity == 'some-thing'
        assert top.score == 0.4
        assert top.start == 0
        assert top.end == 5
        assert len(top.daughters) == 2
        lex = top.daughters[0]
        assert lex.id == 2
        assert lex.entity == 'a-lex'
        assert lex.score == 0.8
        assert lex.start == 0
        assert lex.end == 1
        assert lex.daughters == [T('a', [Tk(1, r'token [ +FORM \"a\" ]')])]
        lex = top.daughters[1]
        assert lex.id == 3
        assert lex.entity == 'bcd-lex'
        assert lex.score == 0.5
        assert lex.start == 2
        assert lex.end == 5
        assert lex.daughters == [T('bcd',
                                   [Tk(2, r'token [ +FORM \"bcd\" ]')])]

Example 56

Project: overloading.py
Source File: test_overloading.py
View license
def test_errors():

    # Invalid signature
    with pytest.raises(OverloadingError):
        @overloaded
        def f(foo: 1):
            pass

    # Recurring signature
    with pytest.raises(OverloadingError):
        @overloaded
        def f(foo):
            pass
        @overloads(f)
        def f(foox):
            pass

    # Recurring signature
    with pytest.raises(OverloadingError):
        @overloaded
        def f(foo:int, bar, baz=None):
            pass
        @overloads(f)
        def f(foo:int, bar):
            pass

    # Recurring signature based on names
    with pytest.raises(OverloadingError):
        @overloaded
        def f(foo:int, bar:str):
            pass
        @overloads(f)
        def f(bar:str, foo:int):
            pass

    # Recurring signature: ambiguous for f(1, foo='a', bar=2)
    with pytest.raises(OverloadingError):
        @overloaded
        def f(x:int, foo:str, bar:int):
            pass
        @overloads(f)
        def f(y:int, bar:int, foo:str):
            pass

    # Recurring signature with `*args`
    with pytest.raises(OverloadingError):
        @overloaded
        def f(foo, *args):
            pass
        @overloads(f)
        def f(foo, bar=None, *args):
            pass

    # `overloads` without `overloaded`
    with pytest.raises(OverloadingError):
        def f(*args):
            pass
        @overloads(f)
        def f(foo):
            pass

    # Invalid object
    with pytest.raises(OverloadingError):
        @overloaded
        class Foo:
            pass
    with pytest.raises(OverloadingError):
        @overloaded
        def f(*args):
            pass
        @overloads(f)
        class Foo:
            pass

Example 57

Project: umongo
Source File: test_data_proxy.py
View license
    def test_partial(self):

        class MySchema(EmbeddedSchema):
            with_default = fields.StrField(default='default_value')
            with_missing = fields.StrField(missing='missing_value')
            normal = fields.StrField()
            loaded = fields.StrField()
            loaded_but_empty = fields.StrField()
            normal_with_attribute = fields.StrField(attribute='in_mongo_field')

        MyDataProxy = data_proxy_factory('My', MySchema())
        d = MyDataProxy()
        d.from_mongo({'loaded': "foo", 'loaded_but_empty': missing}, partial=True)
        assert d.partial is True
        for field in ('with_default', 'with_missing', 'normal'):
            with pytest.raises(exceptions.FieldNotLoadedError):
                d.get(field)
            with pytest.raises(exceptions.FieldNotLoadedError):
                d.set(field, "test")
            with pytest.raises(exceptions.FieldNotLoadedError):
                d.delete(field)
        assert d.get('loaded') == "foo"
        assert d.get('loaded_but_empty') is missing
        d.set('loaded_but_empty', "bar")
        assert d.get('loaded_but_empty') == "bar"
        d.delete('loaded')
        # Can still access the deleted field
        assert d.get('loaded') is missing

        # Same test, but using `load`
        d = MyDataProxy()
        d.load({'loaded': "foo", 'loaded_but_empty': missing}, partial=True)
        assert d.partial is True
        for field in ('with_default', 'with_missing', 'normal'):
            with pytest.raises(exceptions.FieldNotLoadedError):
                d.get(field)
            with pytest.raises(exceptions.FieldNotLoadedError):
                d.set(field, "test")
            with pytest.raises(exceptions.FieldNotLoadedError):
                d.delete(field)
        assert d.get('loaded') == "foo"
        assert d.get('loaded_but_empty') is missing
        d.set('loaded_but_empty', "bar")
        assert d.get('loaded_but_empty') == "bar"
        d.delete('loaded')
        # Can still access the deleted field
        assert d.get('loaded') is missing

        # Not partial
        d = MyDataProxy()
        d.from_mongo({'loaded': "foo", 'loaded_but_empty': missing})
        assert d.partial is False
        assert d.get('with_default') == 'default_value'
        assert d.get('with_missing') == 'missing_value'
        assert d.get('normal') is missing
        assert d.get('loaded') == "foo"
        assert d.get('loaded_but_empty') == missing
        # Same test with load
        d = MyDataProxy()
        d.load({'loaded': "foo", 'loaded_but_empty': missing})
        assert d.partial is False
        assert d.partial is False
        assert d.get('with_default') == 'default_value'
        assert d.get('with_missing') == 'missing_value'
        assert d.get('normal') is missing
        assert d.get('loaded') == "foo"
        assert d.get('loaded_but_empty') == missing

        # Partial, then not partial
        d = MyDataProxy()
        d.from_mongo({'loaded': "foo", 'loaded_but_empty': missing}, partial=True)
        assert d.partial is True
        d.from_mongo({'loaded': "foo", 'loaded_but_empty': missing})
        assert d.partial is False
        # Same test with load
        d = MyDataProxy()
        d.load({'loaded': "foo", 'loaded_but_empty': missing}, partial=True)
        assert d.partial is True
        d.load({'loaded': "foo", 'loaded_but_empty': missing})
        assert d.partial is False

        # Partial, then update turns it into not partial
        d = MyDataProxy()
        d.from_mongo({'loaded': "foo", 'loaded_but_empty': missing}, partial=True)
        assert len(d.not_loaded_fields) == 4
        d.update({'with_default': 'test', 'with_missing': 'test', 'normal_with_attribute': 'foo'})
        assert len(d.not_loaded_fields) == 1
        assert d.partial is True
        d.update({'normal': 'test'})
        assert d.partial is False
        assert not d.not_loaded_fields

Example 58

Project: cocopot
Source File: test_response.py
View license
def test_basic_response():
    r = make_response('text')
    assert r.body == 'text'
    assert r.status_line == '200 OK'
    assert r.status_code == 200
    assert r.charset.lower() == 'utf-8'

    r = make_response('redirect', 302)
    assert r.status_line == '302 Found'
    assert r.status_code == 302

    r = make_response('', 999)
    assert r.status_line == '999 Unknown'
    assert r.status_code == 999

    with pytest.raises(ValueError):
        r = make_response('', 1099)

    with pytest.raises(ValueError):
        r = make_response('', 99)

    r = make_response('', '999 Who knows?') # Illegal, but acceptable three digit code
    assert r.status_line == '999 Who knows?'
    assert r.status_code == 999

    with pytest.raises(ValueError):
        r = make_response(None)

    with pytest.raises(ValueError):
        r = make_response('', '555')

    assert r.status_line == '999 Who knows?'
    assert r.status_code == 999

    r = make_response('', [('Custom-Header', 'custom-value')])
    assert r.status_code == 200
    assert 'Custom-Header' in r

    with pytest.raises(ValueError):
        r = make_response(object())

    r0 = make_response('text')
    r = make_response(r0, 200, [('Custom-Header', 'custom-value')])
    assert r.status_code == 200
    assert 'Custom-Header' in r

    r0 = make_response('text')
    r = make_response(r0, '200 OK', {'Custom-Header':'custom-value'})
    assert r.status_code == 200
    assert 'Custom-Header' in r
    assert r.get_header('Custom-Header') == 'custom-value'
    assert 'Custom-Header' in dict(r.iter_headers())
    assert r.status_line == '200 OK'

    r.set_cookie('name1', 'value')
    r1 = r.copy()
    assert r1.status_line == r.status_line
    assert r1.headers == r.headers
    assert r1.body == r.body
    assert repr(r1) == repr(r)

    r = make_response('', 304)
    assert r.status_code == 304
    assert 'Content-Type' not in dict(r.iter_headers())

    r = make_response(BadRequest(''))
    assert r.status_code == 400

Example 59

View license
def test_spread():
    p = 0x7d00007d
    g = 0x7d00FF00
    b = 0x7dFF0000
    data = np.array([[p, p, 0, 0, 0],
                     [p, g, 0, 0, 0],
                     [0, 0, 0, 0, 0],
                     [0, 0, 0, b, 0],
                     [0, 0, 0, 0, 0]], dtype='uint32')
    coords = [np.arange(5), np.arange(5)]
    img = tf.Image(data, coords=coords, dims=dims)

    s = tf.spread(img)
    o = np.array([[0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],
                  [0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],
                  [0xbc00a82a, 0xbc00a82a, 0xbca85600, 0x7dff0000, 0x7dff0000],
                  [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000],
                  [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])
    np.testing.assert_equal(s.data, o)
    assert (s.x_axis == img.x_axis).all()
    assert (s.y_axis == img.y_axis).all()
    assert s.dims == img.dims

    s = tf.spread(img, px=2)
    o = np.array([[0xed00863b, 0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000],
                  [0xed00863b, 0xed00863b, 0xf581411c, 0xdc904812, 0x7dff0000],
                  [0xed00863b, 0xf581411c, 0xed864419, 0xbca85600, 0x7dff0000],
                  [0xbc00a82a, 0xdc904812, 0xbca85600, 0x7dff0000, 0x7dff0000],
                  [0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])
    np.testing.assert_equal(s.data, o)

    s = tf.spread(img, shape='square')
    o = np.array([[0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],
                  [0xed00863b, 0xed00863b, 0xbc00a82a, 0x00000000, 0x00000000],
                  [0xbc00a82a, 0xbc00a82a, 0xbca85600, 0x7dff0000, 0x7dff0000],
                  [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000],
                  [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])
    np.testing.assert_equal(s.data, o)

    s = tf.spread(img, how='add')
    o = np.array([[0xff007db7, 0xff007db7, 0xfa007f3e, 0x00000000, 0x00000000],
                  [0xff007db7, 0xff007db7, 0xfa007f3e, 0x00000000, 0x00000000],
                  [0xfa007f3e, 0xfa007f3e, 0xfa7f7f00, 0x7dff0000, 0x7dff0000],
                  [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000],
                  [0x00000000, 0x00000000, 0x7dff0000, 0x7dff0000, 0x7dff0000]])
    np.testing.assert_equal(s.data, o)

    mask = np.array([[1, 0, 1],
                     [0, 1, 0],
                     [1, 0, 1]])
    s = tf.spread(img, mask=mask)
    o = np.array([[0xbc00a82a, 0xbc00007d, 0x7d00ff00, 0x00000000, 0x00000000],
                  [0xbc00007d, 0xbc00a82a, 0x7d00007d, 0x00000000, 0x00000000],
                  [0x7d00ff00, 0x7d00007d, 0xbca85600, 0x00000000, 0x7dff0000],
                  [0x00000000, 0x00000000, 0x00000000, 0x7dff0000, 0x00000000],
                  [0x00000000, 0x00000000, 0x7dff0000, 0x00000000, 0x7dff0000]])
    np.testing.assert_equal(s.data, o)

    s = tf.spread(img, px=0)
    np.testing.assert_equal(s.data, img.data)

    pytest.raises(ValueError, lambda: tf.spread(img, px=-1))
    pytest.raises(ValueError, lambda: tf.spread(img, mask=np.ones(2)))
    pytest.raises(ValueError, lambda: tf.spread(img, mask=np.ones((2, 2))))

Example 60

Project: QNET
Source File: test_qsd_codegen.py
View license
def test_qsd_codegen_observables(caplog, slh_Sec6, slh_Sec6_vals):
    A2 = Destroy(hs1)
    Sp = LocalSigma(hs2, 1, 0)
    Sm = Sp.dag()
    codegen = QSDCodeGen(circuit=slh_Sec6, num_vals=slh_Sec6_vals)

    with pytest.raises(QSDCodeGenError) as excinfo:
        scode = codegen._observables_lines(indent=0)
    assert "Must register at least one observable" in str(excinfo.value)

    name = 'a_1 sigma_10^[2]'
    codegen.add_observable(Sp*A2*Sm*Sp, name=name)
    filename = codegen._observables[name][1]
    assert filename == 'a_1_sigma_10_2.out'
    codegen.add_observable(Sp*A2*Sm*Sp, name=name)
    assert 'Overwriting existing operator' in caplog.text()

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(Sp*A2*A2*Sm*Sp, name="xxxx"*20)
    assert "longer than limit" in str(exc_info.value)
    name = 'A2^2'
    codegen.add_observable(Sp*A2*A2*Sm*Sp, name=name)
    assert name in codegen._observables
    filename = codegen._observables[name][1]
    assert filename == 'A2_2.out'

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name='A2_2')
    assert "Cannot generate unique filename" in str(exc_info.value)

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name="A2\t2")
    assert "invalid characters" in str(exc_info.value)

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name="A"*100)
    assert "longer than limit" in str(exc_info.value)

    with pytest.raises(ValueError) as exc_info:
        codegen.add_observable(A2, name="()")
    assert "Cannot generate filename" in str(exc_info.value)

    codegen = QSDCodeGen(circuit=slh_Sec6, num_vals=slh_Sec6_vals)
    codegen.add_observable(Sp*A2*Sm*Sp, name="X1")
    codegen.add_observable(Sm*Sp*A2*Sm, name="X2")
    assert codegen._observables["X2"] == (Sm*Sp*A2*Sm, 'X2.out')
    codegen.add_observable(A2, name="A2")
    assert codegen._observables["A2"] == (A2, 'A2.out')
    scode = codegen._observables_lines(indent=0)
    assert dedent(scode).strip() == dedent(r'''
    const int nOfOut = 3;
    Operator outlist[nOfOut] = {
      (A1 * S2_1_0),
      (A1 * S2_0_1),
      A1
    };
    char *flist[nOfOut] = {"X1.out", "X2.out", "A2.out"};
    int pipe[4] = {1,2,3,4};
    ''').strip()
    # Note how the observables have been simplified
    assert Sp*A2*Sm*Sp == Sp*A2
    assert codegen._operator_str(Sp*A2) == '(A1 * S2_1_0)'
    assert Sm*Sp*A2*Sm == Sm*A2
    assert codegen._operator_str(Sm*A2) == '(A1 * S2_0_1)'
    # If the oberservables introduce new operators or symbols, these should
    # extend the existing ones
    P1 = LocalSigma(hs2, 1, 1)
    zeta = symbols("zeta", real=True)
    codegen.add_observable(zeta*P1, name="P1")
    assert P1 in codegen._local_ops
    assert str(codegen._qsd_ops[P1]) == 'S2_1_1'
    assert zeta in codegen.syms
    codegen.num_vals.update({zeta: 1.0})
    assert 'zeta' in codegen._parameters_lines(indent=0)
    assert str(codegen._qsd_ops[P1]) in codegen._operator_basis_lines(indent=0)
    assert Sp*A2 in set(codegen.observables)
    assert Sm*A2 in set(codegen.observables)
    assert zeta*P1 in set(codegen.observables)
    assert list(codegen.observable_names) == ['X1', 'X2', 'A2', 'P1']
    assert codegen.get_observable('X1') == Sp*A2*Sm*Sp

Example 61

Project: python-consul
Source File: test_std.py
View license
    def test_catalog(self, consul_port):
        c = consul.Consul(port=consul_port)

        # grab the node our server created, so we can ignore it
        _, nodes = c.catalog.nodes()
        assert len(nodes) == 1
        current = nodes[0]

        # test catalog.datacenters
        assert c.catalog.datacenters() == ['dc1']

        # test catalog.register
        pytest.raises(
            consul.ConsulException,
            c.catalog.register, 'foo', '10.1.10.11', dc='dc2')

        assert c.catalog.register(
            'n1',
            '10.1.10.11',
            service={'service': 's1'},
            check={'name': 'c1'}) is True
        assert c.catalog.register(
            'n1', '10.1.10.11', service={'service': 's2'}) is True
        assert c.catalog.register(
            'n2', '10.1.10.12',
            service={'service': 's1', 'tags': ['master']}) is True

        # test catalog.nodes
        pytest.raises(consul.ConsulException, c.catalog.nodes, dc='dc2')
        _, nodes = c.catalog.nodes()
        nodes.remove(current)
        assert [x['Node'] for x in nodes] == ['n1', 'n2']

        # test catalog.services
        pytest.raises(consul.ConsulException, c.catalog.services, dc='dc2')
        _, services = c.catalog.services()
        assert services == {'s1': [u'master'], 's2': [], 'consul': []}

        # test catalog.node
        pytest.raises(consul.ConsulException, c.catalog.node, 'n1', dc='dc2')
        _, node = c.catalog.node('n1')
        assert set(node['Services'].keys()) == set(['s1', 's2'])
        _, node = c.catalog.node('n3')
        assert node is None

        # test catalog.service
        pytest.raises(
            consul.ConsulException, c.catalog.service, 's1', dc='dc2')
        _, nodes = c.catalog.service('s1')
        assert set([x['Node'] for x in nodes]) == set(['n1', 'n2'])
        _, nodes = c.catalog.service('s1', tag='master')
        assert set([x['Node'] for x in nodes]) == set(['n2'])

        # test catalog.deregister
        pytest.raises(
            consul.ConsulException, c.catalog.deregister, 'n2', dc='dc2')
        assert c.catalog.deregister('n1', check_id='c1') is True
        assert c.catalog.deregister('n2', service_id='s1') is True
        # check the nodes weren't removed
        _, nodes = c.catalog.nodes()
        nodes.remove(current)
        assert [x['Node'] for x in nodes] == ['n1', 'n2']
        # check n2's s1 service was removed though
        _, nodes = c.catalog.service('s1')
        assert set([x['Node'] for x in nodes]) == set(['n1'])

        # cleanup
        assert c.catalog.deregister('n1') is True
        assert c.catalog.deregister('n2') is True
        _, nodes = c.catalog.nodes()
        nodes.remove(current)
        assert [x['Node'] for x in nodes] == []

Example 62

Project: QNET
Source File: test_qsd_codegen.py
View license
def test_qsd_codegen_traj(slh_Sec6):
    A2 = Destroy(hs1)
    Sp = LocalSigma(hs2, 1, 0)
    Sm = Sp.dag()
    codegen = QSDCodeGen(circuit=slh_Sec6)
    codegen.add_observable(Sp*A2*Sm*Sp, name="X1")
    codegen.add_observable(Sm*Sp*A2*Sm, name="X2")
    codegen.add_observable(A2, name="A2")

    with pytest.raises(QSDCodeGenError) as excinfo:
        scode = codegen._trajectory_lines(indent=0)
    assert "No trajectories set up"  in str(excinfo.value)

    codegen.set_trajectories(psi_initial=None, stepper='AdaptiveStep', dt=0.01,
            nt_plot_step=100, n_plot_steps=5, n_trajectories=1,
            traj_save=10)
    scode = codegen._trajectory_lines(indent=0)
    assert dedent(scode).strip() == dedent(r'''
    ACG gen(rndSeed); // random number generator
    ComplexNormal rndm(&gen); // Complex Gaussian random numbers

    double dt = 0.01;
    int dtsperStep = 100;
    int nOfSteps = 5;
    int nTrajSave = 10;
    int nTrajectory = 1;
    int ReadFile = 0;

    AdaptiveStep stepper(psiIni, H, nL, L);
    Trajectory traj(psiIni, dt, stepper, &rndm);

    traj.sumExp(nOfOut, outlist, flist , dtsperStep, nOfSteps,
                nTrajectory, nTrajSave, ReadFile);
    ''').strip()

    with pytest.raises(ValueError) as excinfo:
        codegen.set_moving_basis(move_dofs=0, delta=0.01, width=2,
                                 move_eps=0.01)
    assert "move_dofs must be an integer >0" in str(excinfo.value)
    with pytest.raises(ValueError) as excinfo:
        codegen.set_moving_basis(move_dofs=4, delta=0.01, width=2,
                                 move_eps=0.01)
    assert "move_dofs must not be larger" in str(excinfo.value)
    with pytest.raises(QSDCodeGenError) as excinfo:
        codegen.set_moving_basis(move_dofs=3, delta=0.01, width=2,
                                 move_eps=0.01)
    assert "A moving basis cannot be used" in str(excinfo.value)
    codegen.set_moving_basis(move_dofs=2, delta=0.01, width=2, move_eps=0.01)
    scode = codegen._trajectory_lines(indent=0)
    assert dedent(scode).strip() == dedent(r'''
    ACG gen(rndSeed); // random number generator
    ComplexNormal rndm(&gen); // Complex Gaussian random numbers

    double dt = 0.01;
    int dtsperStep = 100;
    int nOfSteps = 5;
    int nTrajSave = 10;
    int nTrajectory = 1;
    int ReadFile = 0;

    AdaptiveStep stepper(psiIni, H, nL, L);
    Trajectory traj(psiIni, dt, stepper, &rndm);

    int move = 2;
    double delta = 0.01;
    int width = 2;
    double moveEps = 0.01;

    traj.sumExp(nOfOut, outlist, flist , dtsperStep, nOfSteps,
                nTrajectory, nTrajSave, ReadFile, move,
                delta, width, moveEps);
    ''').strip()

Example 63

Project: python-consul
Source File: test_std.py
View license
    def test_acl_implicit_token_use(self, acl_consul):
        # configure client to use the master token by default
        c = consul.Consul(port=acl_consul.port, token=acl_consul.token)
        master_token = acl_consul.token

        acls = c.acl.list()
        assert set([x['ID'] for x in acls]) == \
            set(['anonymous', master_token])

        assert c.acl.info('foo') is None
        compare = [c.acl.info(master_token), c.acl.info('anonymous')]
        compare.sort(key=operator.itemgetter('ID'))
        assert acls == compare

        rules = """
            key "" {
                policy = "read"
            }
            key "private/" {
                policy = "deny"
            }
        """
        token = c.acl.create(rules=rules)
        assert c.acl.info(token)['Rules'] == rules

        token2 = c.acl.clone(token)
        assert c.acl.info(token2)['Rules'] == rules

        assert c.acl.update(token2, name='Foo') == token2
        assert c.acl.info(token2)['Name'] == 'Foo'

        assert c.acl.destroy(token2) is True
        assert c.acl.info(token2) is None

        c.kv.put('foo', 'bar')
        c.kv.put('private/foo', 'bar')

        c_limited = consul.Consul(port=acl_consul.port, token=token)
        assert c_limited.kv.get('foo')[1]['Value'] == six.b('bar')
        pytest.raises(
            consul.ACLPermissionDenied, c_limited.kv.put, 'foo', 'bar2')
        pytest.raises(
            consul.ACLPermissionDenied, c_limited.kv.delete, 'foo')

        assert c.kv.get('private/foo')[1]['Value'] == six.b('bar')
        assert c_limited.kv.get('private/foo')[1] is None
        pytest.raises(
            consul.ACLPermissionDenied,
            c_limited.kv.put, 'private/foo', 'bar2')
        pytest.raises(
            consul.ACLPermissionDenied,
            c_limited.kv.delete, 'private/foo')

        # check we can override the client's default token
        assert c.kv.get('private/foo', token=token)[1] is None
        pytest.raises(
            consul.ACLPermissionDenied,
            c.kv.put, 'private/foo', 'bar2', token=token)
        pytest.raises(
            consul.ACLPermissionDenied,
            c.kv.delete, 'private/foo', token=token)

        # clean up
        c.acl.destroy(token)
        acls = c.acl.list()
        assert set([x['ID'] for x in acls]) == \
            set(['anonymous', master_token])

Example 64

Project: pycapnp
Source File: test_capability.py
View license
def test_simple_client():
    client = capability.TestInterface._new_client(Server())

    remote = client._send('foo', i=5)
    response = remote.wait()

    assert response.x == '26'


    remote = client.foo(i=5)
    response = remote.wait()

    assert response.x == '26'

    remote = client.foo(i=5, j=True)
    response = remote.wait()

    assert response.x == '27'

    remote = client.foo(5)
    response = remote.wait()

    assert response.x == '26'

    remote = client.foo(5, True)
    response = remote.wait()

    assert response.x == '27'

    remote = client.foo(5, j=True)
    response = remote.wait()

    assert response.x == '27'

    remote = client.buz(capability.TestSturdyRefHostId.new_message(host='localhost'))
    response = remote.wait()

    assert response.x == 'localhost_test'

    remote = client.bam(i=5)
    response = remote.wait()

    assert response.x == '5_test'
    assert response.i == 5

    with pytest.raises(Exception):
        remote = client.foo(5, 10)

    with pytest.raises(Exception):
        remote = client.foo(5, True, 100)

    with pytest.raises(Exception):
        remote = client.foo(i='foo')

    with pytest.raises(AttributeError):
        remote = client.foo2(i=5)

    with pytest.raises(Exception):
        remote = client.foo(baz=5)

Example 65

Project: python-arango
Source File: test_document.py
View license
def test_update():
    doc = doc1.copy()
    col.insert(doc)

    # Test update with default options
    doc['val'] = {'foo': 1}
    doc = col.update(doc)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert col['1']['val'] == {'foo': 1}
    current_rev = doc['_rev']

    # Test update with merge
    doc['val'] = {'bar': 2}
    doc = col.update(doc, merge=True)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert col['1']['val'] == {'foo': 1, 'bar': 2}
    current_rev = doc['_rev']

    # Test update without merge
    doc['val'] = {'baz': 3}
    doc = col.update(doc, merge=False)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert col['1']['val'] == {'baz': 3}
    current_rev = doc['_rev']

    # Test update with keep_none
    doc['val'] = None
    doc = col.update(doc, keep_none=True)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert col['1']['val'] is None
    current_rev = doc['_rev']

    # Test update without keep_none
    doc['val'] = None
    doc = col.update(doc, keep_none=False)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert 'val' not in col['1']
    current_rev = doc['_rev']

    # Test update with return_new and return_old
    doc['val'] = 300
    doc = col.update(doc, return_new=True, return_old=True)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert doc['new']['_key'] == '1'
    assert doc['new']['val'] == 300
    assert doc['old']['_key'] == '1'
    assert 'val' not in doc['old']
    assert col['1']['val'] == 300
    current_rev = doc['_rev']

    # Test update without return_new and return_old
    doc['val'] = 400
    doc = col.update(doc, return_new=False, return_old=False)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert 'new' not in doc
    assert 'old' not in doc
    assert col['1']['val'] == 400
    current_rev = doc['_rev']

    # Test update with check_rev
    doc['val'] = 500
    doc['_rev'] = current_rev + '000'
    with pytest.raises(DocumentRevisionError):
        col.update(doc, check_rev=True)
    assert col['1']['val'] == 400

    # Test update with sync
    doc['val'] = 600
    doc = col.update(doc, sync=True)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert doc['sync'] is True
    assert col['1']['val'] == 600
    current_rev = doc['_rev']

    # Test update without sync
    doc['val'] = 700
    doc = col.update(doc, sync=False)
    assert doc['_id'] == '{}/1'.format(col.name)
    assert doc['_key'] == '1'
    assert isinstance(doc['_rev'], string_types)
    assert doc['_old_rev'] == current_rev
    assert doc['sync'] is False
    assert col['1']['val'] == 700
    current_rev = doc['_rev']

    # Test update missing document
    with pytest.raises(DocumentUpdateError):
        col.update(doc2)
    assert '2' not in col
    assert col['1']['val'] == 700
    assert col['1']['_rev'] == current_rev

    # Test update in missing collection
    with pytest.raises(DocumentUpdateError):
        bad_col.update(doc)

Example 66

Project: arctic
Source File: test_version_store.py
View license
def test_prunes_previous_version_append_interaction(library):
    ts = ts1
    ts2 = ts1.append(pd.DataFrame(index=[ts.index[-1] + dtd(days=1),
                                         ts.index[-1] + dtd(days=2), ],
                                  data=[3.7, 3.8],
                                  columns=['near']))
    ts2.index.name = ts1.index.name
    ts3 = ts.append(pd.DataFrame(index=[ts2.index[-1] + dtd(days=1),
                                        ts2.index[-1] + dtd(days=2)],
                                 data=[4.8, 4.9],
                                 columns=['near']))
    ts3.index.name = ts1.index.name
    ts4 = ts
    ts5 = ts2
    ts6 = ts3
    now = dt.utcnow()
    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=130)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=129)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts2, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=128)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts3, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=127)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts4, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol, as_of=3).data)
    assert_frame_equal(ts4, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=126)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts5, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol, as_of=3).data)
    assert_frame_equal(ts4, library.read(symbol, as_of=4).data)
    assert_frame_equal(ts5, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts6, prune_previous_version=True)

    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=1)
    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=2)
    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=3)
    assert_frame_equal(ts5, library.read(symbol, as_of=5).data)
    assert_frame_equal(ts6, library.read(symbol).data)

Example 67

Project: giraffez
Source File: test_load.py
View license
    def test_load_from_file_invalid_header(self, mocker, tmpfiles):
        mock_connect = mocker.patch("giraffez.Cmd._connect")
        mock_execute = mocker.patch("giraffez.Cmd._execute")

        columns = Columns([
            ("col1", VARCHAR_NN, 50, 0, 0),
            ("col2", VARCHAR_N, 50, 0, 0),
            ("col3", VARCHAR_N, 50, 0, 0),
        ])

        mock_columns = mocker.patch("giraffez.Cmd.get_columns")

        mock_columns.return_value = columns

        # Invalid column (blank string)
        with open(tmpfiles.load_file, 'w') as f:
            f.write("|".join(["col1", "col2", "col3", "", ""]))
            f.write("\n")
            f.write("|".join(["value1", "value2", "value3"]))
            f.write("\n")

        with giraffez.Load() as load:
            load.panic = True
            with pytest.raises(GiraffeError):
                result = load.from_file("db1.test", tmpfiles.load_file, delimiter="|")
                print(result)

        # Invalid column (wrong name)
        with open(tmpfiles.load_file, 'w') as f:
            f.write("|".join(["col1", "col2", "col4"]))
            f.write("\n")
            f.write("|".join(["value1", "value2", "value3"]))
            f.write("\n")

        with giraffez.Load() as load:
            load.panic = True
            with pytest.raises(GiraffeError):
                result = load.from_file("db1.test", tmpfiles.load_file, delimiter="|")
                print(result)

        # Too many columns (duplicate name)
        with open(tmpfiles.load_file, 'w') as f:
            f.write("|".join(["col1", "col2", "col3", "col3"]))
            f.write("\n")
            f.write("|".join(["value1", "value2", "value3"]))
            f.write("\n")

        with giraffez.Load() as load:
            load.panic = True
            with pytest.raises(GiraffeEncodeError):
                result = load.from_file("db1.test", tmpfiles.load_file, delimiter="|")
                print(result)

Example 68

Project: binarytree
Source File: tests.py
View license
def test_inspect():
    for invalid_argument in [None, 1, 'foo']:
        with pytest.raises(ValueError) as err:
            inspect(invalid_argument)
        assert str(err.value) == 'Expecting a list or a node'

    def convert_inspect(target):
        return inspect(convert(target))

    def self_inspect(target):
        return target.inspect()

    for inspect_func in [inspect, convert_inspect, self_inspect]:

        root = Node(1)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': True,
            'is_min_heap': True,
            'is_bst': True,
            'is_full': True,
            'height': 0,
            'max_value': 1,
            'min_value': 1,
            'leaf_count': 1,
            'node_count': 1,
            'max_leaf_depth': 0,
            'min_leaf_depth': 0,
        }
        root.left = Node(2)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': True,
            'is_bst': False,
            'is_full' : False,
            'height': 1,
            'max_value': 2,
            'min_value': 1,
            'node_count': 2,
            'leaf_count': 1,
            'max_leaf_depth': 1,
            'min_leaf_depth': 1,
        }
        root.right = Node(3)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': True,
            'is_bst': False,
            'is_full' : True,
            'height': 1,
            'max_value': 3,
            'min_value': 1,
            'leaf_count': 2,
            'node_count': 3,
            'max_leaf_depth': 1,
            'min_leaf_depth': 1,
        }
        root.value = 2
        root.left.value = 1
        root.right.value = 3
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': False,
            'is_bst': True,
            'is_full' : True,
            'height': 1,
            'max_value': 3,
            'min_value': 1,
            'leaf_count': 2,
            'node_count': 3,
            'max_leaf_depth': 1,
            'min_leaf_depth': 1,
        }
        root.value = 1
        root.left.value = 2
        root.right.value = 3
        root.left.right = Node(4)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': False,
            'is_bst': False,
            'is_full' : False,
            'height': 2,
            'max_value': 4,
            'min_value': 1,
            'leaf_count': 2,
            'node_count': 4,
            'max_leaf_depth': 2,
            'min_leaf_depth': 1,
        }
        root.left.left = Node(5)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': True,
            'is_bst': False,
            'is_full' : True,
            'height': 2,
            'max_value': 5,
            'min_value': 1,
            'leaf_count': 3,
            'node_count': 5,
            'max_leaf_depth': 2,
            'min_leaf_depth': 1,
        }
        root.right.right = Node(6)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': False,
            'is_bst': False,
            'is_full' : False,
            'height': 2,
            'max_value': 6,
            'min_value': 1,
            'leaf_count': 3,
            'node_count': 6,
            'max_leaf_depth': 2,
            'min_leaf_depth': 2,
        }

        root.right.right = Node(None)
        with pytest.raises(ValueError) as err:
            assert inspect_func(root)
        assert str(err.value) == 'A node cannot have a null value'

        root.right.right = {}
        with pytest.raises(ValueError) as err:
            assert inspect_func(root)
        assert str(err.value) == 'Found an invalid node in the tree'

Example 69

Project: binarytree
Source File: tests.py
View license
def test_inspect():
    for invalid_argument in [None, 1, 'foo']:
        with pytest.raises(ValueError) as err:
            inspect(invalid_argument)
        assert str(err.value) == 'Expecting a list or a node'

    def convert_inspect(target):
        return inspect(convert(target))

    def self_inspect(target):
        return target.inspect()

    for inspect_func in [inspect, convert_inspect, self_inspect]:

        root = Node(1)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': True,
            'is_min_heap': True,
            'is_bst': True,
            'is_full': True,
            'height': 0,
            'max_value': 1,
            'min_value': 1,
            'leaf_count': 1,
            'node_count': 1,
            'max_leaf_depth': 0,
            'min_leaf_depth': 0,
        }
        root.left = Node(2)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': True,
            'is_bst': False,
            'is_full' : False,
            'height': 1,
            'max_value': 2,
            'min_value': 1,
            'node_count': 2,
            'leaf_count': 1,
            'max_leaf_depth': 1,
            'min_leaf_depth': 1,
        }
        root.right = Node(3)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': True,
            'is_bst': False,
            'is_full' : True,
            'height': 1,
            'max_value': 3,
            'min_value': 1,
            'leaf_count': 2,
            'node_count': 3,
            'max_leaf_depth': 1,
            'min_leaf_depth': 1,
        }
        root.value = 2
        root.left.value = 1
        root.right.value = 3
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': False,
            'is_bst': True,
            'is_full' : True,
            'height': 1,
            'max_value': 3,
            'min_value': 1,
            'leaf_count': 2,
            'node_count': 3,
            'max_leaf_depth': 1,
            'min_leaf_depth': 1,
        }
        root.value = 1
        root.left.value = 2
        root.right.value = 3
        root.left.right = Node(4)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': False,
            'is_bst': False,
            'is_full' : False,
            'height': 2,
            'max_value': 4,
            'min_value': 1,
            'leaf_count': 2,
            'node_count': 4,
            'max_leaf_depth': 2,
            'min_leaf_depth': 1,
        }
        root.left.left = Node(5)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': True,
            'is_bst': False,
            'is_full' : True,
            'height': 2,
            'max_value': 5,
            'min_value': 1,
            'leaf_count': 3,
            'node_count': 5,
            'max_leaf_depth': 2,
            'min_leaf_depth': 1,
        }
        root.right.right = Node(6)
        assert inspect_func(root) == {
            'is_height_balanced': True,
            'is_weight_balanced': True,
            'is_max_heap': False,
            'is_min_heap': False,
            'is_bst': False,
            'is_full' : False,
            'height': 2,
            'max_value': 6,
            'min_value': 1,
            'leaf_count': 3,
            'node_count': 6,
            'max_leaf_depth': 2,
            'min_leaf_depth': 2,
        }

        root.right.right = Node(None)
        with pytest.raises(ValueError) as err:
            assert inspect_func(root)
        assert str(err.value) == 'A node cannot have a null value'

        root.right.right = {}
        with pytest.raises(ValueError) as err:
            assert inspect_func(root)
        assert str(err.value) == 'Found an invalid node in the tree'

Example 70

Project: binarytree
Source File: tests.py
View license
def test_setup():
    null = -1

    class GoodNode(Node):

        def __init__(self, val, bar=-1, baz=-1):
            self.foo = val
            self.bar = bar
            self.baz = baz

    class BadNode1(object):

        def __init__(self, val, bar=-1, baz=-1):
            self.foo = val
            self.bar = bar
            self.baz = baz

    class BadNode2(object):

        def __init__(self, val, bar=-2, baz=-2):
            self.foo = val
            self.bar = bar
            self.baz = baz

    setup_node(
        node_init_func=lambda v: GoodNode(v),
        node_class=GoodNode,
        null_value=null,
        value_attr='foo',
        left_attr='bar',
        right_attr='baz'
    )
    for _ in range(repetitions):
        nodes_to_visit = [tree(height=10)]
        while nodes_to_visit:
            node = nodes_to_visit.pop()

            # Check that the new node class is used
            assert isinstance(node, GoodNode)

            # Check that the original attributes do not exist
            assert not hasattr(node, 'left')
            assert not hasattr(node, 'right')
            assert not hasattr(node, 'value')

            # Check that the new attributes are as expected
            left = attr(node, 'bar')
            right = attr(node, 'baz')
            value = attr(node, 'foo')
            assert isinstance(value, int)

            if left != null:
                assert isinstance(left, GoodNode)
                nodes_to_visit.append(left)
            if right != null:
                assert isinstance(right, GoodNode)
                nodes_to_visit.append(right)

    setup_node(
        node_init_func=lambda v: GoodNode(v),
        node_class=GoodNode,
        null_value=null,
        value_attr='foo',
        left_attr='bar',
        right_attr='baz'
    )
    for _ in range(repetitions):
        nodes_to_visit = [tree(height=10)]
        while nodes_to_visit:
            node = nodes_to_visit.pop()

            # Check that the new node class is used
            assert isinstance(node, GoodNode)

            # Check that the original attributes do not exist
            assert not hasattr(node, 'left')
            assert not hasattr(node, 'right')
            assert not hasattr(node, 'value')

            # Check that the new attributes are as expected
            left = attr(node, 'bar')
            right = attr(node, 'baz')
            value = attr(node, 'foo')
            assert isinstance(value, int)

            if left != null:
                assert isinstance(left, GoodNode)
                nodes_to_visit.append(left)
            if right != null:
                assert isinstance(right, GoodNode)
                nodes_to_visit.append(right)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: BadNode1(v),
            node_class=None,
            null_value=-1,
            value_attr='foo',
            left_attr='bar',
            right_attr='baz',
        )
    assert 'Invalid class given' in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=None,
            node_class=BadNode1,
            null_value=-1,
            value_attr='foo',
            left_attr='bar',
            right_attr='baz',
        )
    assert 'function must be a callable' in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: GoodNode(v),
            node_class=BadNode1,
            null_value=-1,
            value_attr='foo',
            left_attr='bar',
            right_attr='baz',
        )
    assert 'returns an instance of "BadNode1"' in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: GoodNode(v),
            node_class=GoodNode,
            null_value=-1,
            value_attr='value',
            left_attr='bar',
            right_attr='baz',
        )
    assert 'required attribute "value"' in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: GoodNode(v),
            node_class=GoodNode,
            null_value=-1,
            value_attr='foo',
            left_attr='left',
            right_attr='baz',
        )
    assert 'required attribute "left"' in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: GoodNode(v),
            node_class=GoodNode,
            null_value=-1,
            value_attr='foo',
            left_attr='bar',
            right_attr='right',
        )
    assert 'required attribute "right"' in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: GoodNode(v),
            node_class=GoodNode,
            null_value=-1,
            value_attr='foo',
            left_attr='bar',
            right_attr='right',
        )
    assert 'required attribute "right"' in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: BadNode2(v, -2),
            node_class=BadNode2,
            null_value=-1,
            value_attr='foo',
            left_attr='bar',
            right_attr='baz',
        )
    assert (
        'expected null/sentinel value "-1" for its '
        'left child node attribute "bar"'
    ) in str(err.value)

    with pytest.raises(ValueError) as err:
        setup_node(
            node_init_func=lambda v: BadNode2(v, -1, -2),
            node_class=BadNode2,
            null_value=-1,
            value_attr='foo',
            left_attr='bar',
            right_attr='baz',
        )
    assert (
        'expected null/sentinel value "-1" for its '
        'right child node attribute "baz"'
    ) in str(err.value)

Example 71

Project: bayeslite
Source File: test_metamodels.py
View license
def _test_example(bdb, exname):
    mm, t, t_sql, data_sql, data, p, g, p_bql, g_bql, g_bqlbad0, g_bqlbad1 = \
        examples[exname]
    qt = bql_quote_name(t)
    qg = bql_quote_name(g)

    bayeslite.bayesdb_register_metamodel(bdb, mm())

    # Create a table.
    assert not core.bayesdb_has_table(bdb, t)
    with bdb.savepoint_rollback():
        bdb.sql_execute(t_sql)
        assert core.bayesdb_has_table(bdb, t)
    assert not core.bayesdb_has_table(bdb, t)
    bdb.sql_execute(t_sql)
    assert core.bayesdb_has_table(bdb, t)

    # Insert data into the table.
    assert bdb.execute('SELECT COUNT(*) FROM %s' % (qt,)).fetchvalue() == 0
    for row in data:
        bdb.sql_execute(data_sql, row)
    n = len(data)
    assert bdb.execute('SELECT COUNT(*) FROM %s' % (qt,)).fetchvalue() == n

    # Create a population.
    assert not core.bayesdb_has_population(bdb, p)
    bdb.execute(p_bql)
    p_id = core.bayesdb_get_population(bdb, p)

    # Create a generator.  Make sure savepoints work for this.
    assert not core.bayesdb_has_generator(bdb, p_id, g)
    with pytest.raises(Exception):
        with bdb.savepoint():
            bdb.execute(g_bqlbad0)
    assert not core.bayesdb_has_generator(bdb, p_id, g)
    with pytest.raises(Exception):
        with bdb.savepoint():
            bdb.execute(g_bqlbad1)
    assert not core.bayesdb_has_generator(bdb, p_id, g)
    with bdb.savepoint_rollback():
        bdb.execute(g_bql)
        assert core.bayesdb_has_generator(bdb, p_id, g)
    assert not core.bayesdb_has_generator(bdb, p_id, g)
    bdb.execute(g_bql)
    assert core.bayesdb_has_generator(bdb, p_id, g)
    assert not core.bayesdb_has_generator(bdb, p_id+1, g)
    with pytest.raises(Exception):
        bdb.execute(g_bql)
    assert core.bayesdb_has_generator(bdb, p_id, g)

    gid = core.bayesdb_get_generator(bdb, p_id, g)
    assert not core.bayesdb_generator_has_model(bdb, gid, 0)
    assert [] == core.bayesdb_generator_modelnos(bdb, gid)
    with bdb.savepoint_rollback():
        bdb.execute('INITIALIZE 1 MODEL FOR %s' % (qg,))
        assert core.bayesdb_generator_has_model(bdb, gid, 0)
        assert [0] == core.bayesdb_generator_modelnos(bdb, gid)
    with bdb.savepoint_rollback():
        bdb.execute('INITIALIZE 10 MODELS FOR %s' % (qg,))
        for i in range(10):
            assert core.bayesdb_generator_has_model(bdb, gid, i)
            assert range(10) == core.bayesdb_generator_modelnos(bdb, gid)
    bdb.execute('INITIALIZE 2 MODELS FOR %s' % (qg,))

    # Test dropping things.
    with pytest.raises(bayeslite.BQLError):
        bdb.execute('DROP TABLE %s' % (qt,))
    with bdb.savepoint_rollback():
        # Note that sql_execute does not protect us!
        bdb.sql_execute('DROP TABLE %s' % (qt,))
        assert not core.bayesdb_has_table(bdb, t)
    assert core.bayesdb_has_table(bdb, t)
    # XXX Should we reject dropping a generator when there remain
    # models?  Should we not reject dropping a table when there remain
    # generators?  A table can be dropped when there remain indices.
    #
    # with pytest.raises(bayeslite.BQLError):
    #     # Models remain.
    #     bdb.execute('DROP GENERATOR %s' % (qg,))
    with bdb.savepoint_rollback():
        bdb.execute('DROP GENERATOR %s' % (qg,))
        assert not core.bayesdb_has_generator(bdb, None, g)
    assert core.bayesdb_has_generator(bdb, p_id, g)
    with bdb.savepoint_rollback():
        bdb.execute('DROP GENERATOR %s' % (qg,))
        assert not core.bayesdb_has_generator(bdb, None, g)
        bdb.execute(g_bql)
        assert core.bayesdb_has_generator(bdb, None, g)
    assert core.bayesdb_has_generator(bdb, p_id, g)
    assert core.bayesdb_has_generator(bdb, None, g)
    assert gid == core.bayesdb_get_generator(bdb, p_id, g)

    # Test dropping models.
    with bdb.savepoint_rollback():
        bdb.execute('DROP MODEL 1 FROM %s' % (qg,))
        assert core.bayesdb_generator_has_model(bdb, gid, 0)
        assert not core.bayesdb_generator_has_model(bdb, gid, 1)
        assert [0] == core.bayesdb_generator_modelnos(bdb, gid)

    # Test analyzing models.
    bdb.execute('ANALYZE %s FOR 1 ITERATION WAIT' % (qg,))
    bdb.execute('ANALYZE %s MODEL 0 FOR 1 ITERATION WAIT' % (qg,))
    bdb.execute('ANALYZE %s MODEL 1 FOR 1 ITERATION WAIT' % (qg,))

Example 72

Project: keraflow
Source File: test_layer_exception.py
View license
def test_feed_exceptions():

    # Forget to feed d1
    with pytest.raises(KError):
        d1 = Dense(1)
        Dense(1)(d1)

    # Forget to feed d1
    with pytest.raises(KError):
        d1 = Dense(1)
        Dense(1)(d1)

    # First layer of sequential should be input
    with pytest.raises(KError):
        s1 = Sequential([Dense(1)])
        s1.compile('sgd', 'mse')

    # Recursive feeding
    with pytest.raises(KError):
        input1 = Input(1)
        d = Dense(1)
        d1 = d(input1)
        d(d1)

    # Recursive feeding
    with pytest.raises(KError):
        i1 = Input(1)
        i2 = Input(1)
        i3 = Input(1)
        i4 = Input(1)
        m = ElementWiseSum()
        m1 = m([i1, i2])
        m2 = m([i3, i4])
        m([m1, m2])  # m'th output feeds to m again

    # shape should be assigned as a tuple, i.e. Input((1,2))
    with pytest.raises(KError):
        input1 = Input(1, 2)

    # You should not feed an Input layer
    with pytest.raises(KError):
        input1 = Input(1)(Input(1))

Example 73

Project: cocrawler
Source File: test_useragent.py
View license
def test_useragent():

    config = {'UserAgent': {'Style': 'crawler',
                            'MyPrefix': 'something',
                            'URL': 'http://example.com/cocrawler.html'}}

    version = '1.0'

    robotname, ua = useragent.useragent(config, version)

    assert version in ua
    assert 'http://example.com/cocrawler.html' in ua
    assert robotname == 'something-cocrawler'

    config['UserAgent']['Style'] = 'laptopplus'
    robotname, ua = useragent.useragent(config, version)
    assert 'Mozilla/5.0' in ua
    config['UserAgent']['Style'] = 'tabletplus'
    robotname, ua = useragent.useragent(config, version)
    assert 'Mozilla/5.0' in ua
    config['UserAgent']['Style'] = 'phoneplus'
    robotname, ua = useragent.useragent(config, version)
    assert 'Mozilla/5.0' in ua

    bad_config = copy.deepcopy(config)
    bad_config['UserAgent']['Style'] = 'error'
    with pytest.raises(ValueError):
        robotname, ua = useragent.useragent(bad_config, version)

    bad_config = copy.deepcopy(config)
    bad_config['UserAgent']['URL'] = 'ha ha I left this off'
    with pytest.raises(ValueError):
        robotname, ua = useragent.useragent(bad_config, version)

    bad_config = copy.deepcopy(config)
    bad_config['UserAgent']['URL'] = 'http://cocrawler.com/cocrawler.html'
    with pytest.raises(ValueError):
        robotname, ua = useragent.useragent(bad_config, version)

    bad_config = copy.deepcopy(config)
    bad_config['UserAgent']['MyPrefix'] = 'test'
    with pytest.raises(ValueError):
        robotname, ua = useragent.useragent(bad_config, version)

    bad_config = copy.deepcopy(config)
    bad_config['UserAgent']['MyPrefix'] = ''
    with pytest.raises(ValueError):
        robotname, ua = useragent.useragent(bad_config, version)

Example 74

Project: arctic
Source File: test_version_store.py
View license
def test_prunes_previous_version_append_interaction(library):
    ts = ts1
    ts2 = ts1.append(pd.DataFrame(index=[ts.index[-1] + dtd(days=1),
                                         ts.index[-1] + dtd(days=2), ],
                                  data=[3.7, 3.8],
                                  columns=['near']))
    ts2.index.name = ts1.index.name
    ts3 = ts.append(pd.DataFrame(index=[ts2.index[-1] + dtd(days=1),
                                        ts2.index[-1] + dtd(days=2)],
                                 data=[4.8, 4.9],
                                 columns=['near']))
    ts3.index.name = ts1.index.name
    ts4 = ts
    ts5 = ts2
    ts6 = ts3
    now = dt.utcnow()
    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=130)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=129)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts2, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=128)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts3, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=127)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts4, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol, as_of=3).data)
    assert_frame_equal(ts4, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now - dtd(minutes=126)),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts5, prune_previous_version=False)
    assert_frame_equal(ts, library.read(symbol, as_of=1).data)
    assert_frame_equal(ts2, library.read(symbol, as_of=2).data)
    assert_frame_equal(ts3, library.read(symbol, as_of=3).data)
    assert_frame_equal(ts4, library.read(symbol, as_of=4).data)
    assert_frame_equal(ts5, library.read(symbol).data)

    with patch("bson.ObjectId", return_value=bson.ObjectId.from_datetime(now),
                                from_datetime=bson.ObjectId.from_datetime):
        library.write(symbol, ts6, prune_previous_version=True)

    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=1)
    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=2)
    with pytest.raises(NoDataFoundException):
        library.read(symbol, as_of=3)
    assert_frame_equal(ts5, library.read(symbol, as_of=5).data)
    assert_frame_equal(ts6, library.read(symbol).data)

Example 75

Project: kapsel
Source File: test_pip_api.py
View license
def test_pip_errors(monkeypatch):
    monkeypatch_conda_not_to_use_links(monkeypatch)

    def do_test(dirname):
        envdir = os.path.join(dirname, "myenv")

        conda_api.create(prefix=envdir, pkgs=['python'])

        # no packages to install
        with pytest.raises(TypeError) as excinfo:
            pip_api.install(prefix=envdir, pkgs=[])
        assert 'must specify a list' in repr(excinfo.value)

        # no packages to remove
        with pytest.raises(TypeError) as excinfo:
            pip_api.remove(prefix=envdir, pkgs=[])
        assert 'must specify a list' in repr(excinfo.value)

        # pip command not installed
        from os.path import exists as real_exists

        def mock_exists(path):
            if path.endswith("pip") or path.endswith("pip.exe"):
                return False
            else:
                return real_exists(path)

        monkeypatch.setattr('os.path.exists', mock_exists)
        with pytest.raises(pip_api.PipNotInstalledError) as excinfo:
            pip_api.install(prefix=envdir, pkgs=['foo'])
        assert 'command is not installed in the environment' in repr(excinfo.value)

        installed = pip_api.installed(prefix=envdir)
        assert dict() == installed  # with pip not installed, no packages are listed.

        # pip command exits nonzero
        error_script = """from __future__ import print_function
import sys
print("TEST_ERROR", file=sys.stderr)
sys.exit(1)
"""

        def get_failed_command(prefix, extra_args):
            return tmp_script_commandline(error_script)

        monkeypatch.setattr('conda_kapsel.internal.pip_api._get_pip_command', get_failed_command)
        with pytest.raises(pip_api.PipError) as excinfo:
            pip_api.install(prefix=envdir, pkgs=['flake8'])
        assert 'TEST_ERROR' in repr(excinfo.value)

        # pip command exits zero printing stuff on stderr
        error_message_but_success_script = """from __future__ import print_function
import sys
print("TEST_ERROR", file=sys.stderr)
sys.exit(0)
"""

        def get_failed_command(prefix, extra_args):
            return tmp_script_commandline(error_message_but_success_script)

        monkeypatch.setattr('conda_kapsel.internal.pip_api._get_pip_command', get_failed_command)
        pip_api.install(prefix=envdir, pkgs=['flake8'])

        # cannot exec pip
        def mock_popen(args, stdout=None, stderr=None):
            raise OSError("failed to exec")

        monkeypatch.setattr('subprocess.Popen', mock_popen)
        with pytest.raises(pip_api.PipError) as excinfo:
            pip_api.install(prefix=envdir, pkgs=['flake8'])
        assert 'failed to exec' in repr(excinfo.value)

    with_directory_contents(dict(), do_test)

Example 76

Project: brainiak
Source File: test_htfa.py
View license
def test_X():
    from brainiak.factoranalysis.htfa import HTFA
    import numpy as np

    n_voxel = 100
    n_tr = 20
    K = 5
    max_global_iter = 3
    max_local_iter = 3
    max_voxel = n_voxel
    max_tr = n_tr

    R = []
    n_subj = 2
    for s in np.arange(n_subj):
        R.append(np.random.randint(2, high=102, size=(n_voxel, 3)))

    htfa = HTFA(
        K,
        max_global_iter=max_global_iter,
        max_local_iter=max_local_iter,
        max_voxel=max_voxel,
        max_tr=max_tr)

    X = np.random.rand(n_voxel, n_tr)
    # Check that does NOT run with wrong data type
    with pytest.raises(TypeError) as excinfo:
        htfa.fit(X, R=R)
    assert "Input data should be a list" in str(excinfo.value)

    X = []
    # Check that does NOT run with wrong array dimension
    with pytest.raises(ValueError) as excinfo:
        htfa.fit(X, R=R)
    assert "Need at leat one subject to train the model" in str(excinfo.value)

    X = []
    X.append([1, 2, 3])
    # Check that does NOT run with wrong array dimension
    with pytest.raises(TypeError) as excinfo:
        htfa.fit(X, R=R)
    assert "data should be an array" in str(excinfo.value)

    X = []
    X.append(np.random.rand(n_voxel))
    # Check that does NOT run with wrong array dimension
    with pytest.raises(TypeError) as excinfo:
        htfa.fit(X, R=R)
    assert "subject data should be 2D array" in str(excinfo.value)

    X = []
    for s in np.arange(n_subj):
        X.append(np.random.rand(n_voxel, n_tr))
    R = np.random.randint(2, high=102, size=(n_voxel, 3))

    # Check that does NOT run with wrong data type
    with pytest.raises(TypeError) as excinfo:
        htfa.fit(X, R=R)
    assert "Coordinates should be a list" in str(excinfo.value)

    R = []
    R.append([1, 2, 3])
    # Check that does NOT run with wrong data type
    with pytest.raises(TypeError) as excinfo:
        htfa.fit(X, R=R)
    assert "Each scanner coordinate matrix should be an array" in str(excinfo.value)

    R = []
    R.append(np.random.rand(n_voxel))
    # Check that does NOT run with wrong array dimension
    with pytest.raises(TypeError) as excinfo:
        htfa.fit(X, R=R)
    assert "Each scanner coordinate matrix should be 2D array" in str(excinfo.value)

    R = []
    for s in np.arange(n_subj):
        R.append(np.random.rand(n_voxel - 1, 3))
    # Check that does NOT run with wrong array dimension
    with pytest.raises(TypeError) as excinfo:
        htfa.fit(X, R=R)
    assert "n_voxel should be the same in X[idx] and R[idx]" in str(excinfo.value)

Example 77

Project: brainiak
Source File: test_tfa.py
View license
def test_tfa():
    from brainiak.factoranalysis.tfa import TFA
    import numpy as np

    n_voxel = 100
    n_tr = 20
    K = 5
    max_iter = 5
    max_num_voxel = n_voxel
    max_num_tr = n_tr
    sample_scaling = 0.5
    tfa = TFA(
        K=K,
        max_iter=max_iter,
        verbose=True,
        max_num_voxel=max_num_voxel,
        max_num_tr=max_num_tr)
    assert tfa, "Invalid TFA instance!"

    R = np.random.randint(2, high=102, size=(n_voxel, 3))
    X = [1, 2, 3]
    # Check that does NOT run with wrong data type
    with pytest.raises(TypeError) as excinfo:
        tfa.fit(X, R=R)
    assert "Input data should be an array" in str(excinfo.value)

    X = np.random.rand(n_voxel)
    # Check that does NOT run with wrong array dimension
    with pytest.raises(TypeError) as excinfo:
        tfa.fit(X, R=R)
    assert "Input data should be 2D array" in str(excinfo.value)

    X = np.random.rand(n_voxel, n_tr)
    R = [1, 2, 3]
    # Check that does NOT run with wrong data type
    with pytest.raises(TypeError) as excinfo:
        tfa.fit(X, R=R)
    assert "coordinate matrix should be an array" in str(excinfo.value)

    R = np.random.rand(n_voxel)
    # Check that does NOT run with wrong array dimension
    with pytest.raises(TypeError) as excinfo:
        tfa.fit(X, R=R)
    assert "coordinate matrix should be 2D array" in str(excinfo.value)

    R = np.random.randint(2, high=102, size=(n_voxel - 1, 3))
    # Check that does NOT run if n_voxel in X and R does not match
    with pytest.raises(TypeError) as excinfo:
        tfa.fit(X, R=R)
    assert "The number of voxels should be the same in X and R" in str(
        excinfo.value)

    R = np.random.randint(2, high=102, size=(n_voxel, 3))
    tfa.fit(X, R=R)
    assert True, "Success running TFA with one subject!"
    posterior_size = K * (tfa.n_dim + 1)
    assert tfa.local_posterior_.shape[
        0] == posterior_size,\
        "Invalid result of TFA! (wrong # element in local_posterior)"

    weight_method = 'ols'
    tfa = TFA(
        weight_method=weight_method,
        K=K,
        max_iter=max_iter,
        verbose=True,
        max_num_voxel=max_num_voxel,
        max_num_tr=max_num_tr)
    assert tfa, "Invalid TFA instance!"

    X = np.random.rand(n_voxel, n_tr)
    tfa.fit(X, R=R)
    assert True, "Success running TFA with one subject!"

    template_prior, _, _ = tfa.get_template(R)
    tfa.set_K(K)
    tfa.set_seed(200)
    tfa.fit(X, R=R, template_prior=template_prior)
    assert True, "Success running TFA with one subject and template prior!"
    assert tfa.local_posterior_.shape[
        0] == posterior_size,\
        "Invalid result of TFA! (wrong # element in local_posterior)"

    weight_method = 'odd'
    tfa = TFA(
        weight_method=weight_method,
        K=K,
        max_iter=max_iter,
        verbose=True,
        max_num_voxel=max_num_voxel,
        max_num_tr=max_num_tr)
    with pytest.raises(ValueError) as excinfo:
        tfa.fit(X, R=R)
    assert "'rr' and 'ols' are accepted as weight_method!" in str(
        excinfo.value)

Example 78

Project: hydrachain
Source File: test_base.py
View license
def test_LockSet():
    ls = LockSet(num_eligible_votes=len(privkeys))
    assert not ls
    assert len(ls) == 0

    bh = '0' * 32
    r, h = 2, 3
    v1 = VoteBlock(h, r, bh)

    # add not signed
    with pytest.raises(InvalidVoteError):
        ls.add(v1)
    assert not ls
    assert v1 not in ls

    # add signed
    v1.sign(privkeys[0])
    ls.add(v1)

    assert ls
    assert len(ls) == 1
    lsh = ls.hash
    ls.add(v1)
    assert lsh == ls.hash
    assert len(ls) == 1

    # second vote same sender
    v2 = VoteBlock(h, r, bh)
    v2.sign(privkeys[0])
    ls.add(v1)
    ls.add(v2)
    assert lsh == ls.hash
    assert len(ls) == 1

    # third vote
    v3 = VoteBlock(h, r, bh)
    v3.sign(privkeys[1])
    ls.add(v1)
    ls.add(v3)
    assert lsh != ls.hash
    assert len(ls) == 2
    assert v3 in ls

    lsh = ls.hash

    # vote wrong round
    v4 = VoteBlock(h, r + 1, bh)
    v4.sign(privkeys[2])
    with pytest.raises(InvalidVoteError):
        ls.add(v4)
    assert lsh == ls.hash
    assert len(ls) == 2
    assert v4 not in ls

    # vote twice
    v3_2 = VoteBlock(h, r, blockhash='1' * 32)
    v3_2.sign(privkeys[1])
    with pytest.raises(DoubleVotingError):
        ls.add(v3_2)
    assert lsh == ls.hash
    assert len(ls) == 2
    assert v3_2 not in ls

Example 79

Project: hydrachain
Source File: test_base.py
View license
def test_blockproposal():
    s = tester.state()

    # block 1
    s.mine(n=1)
    genesis = s.blocks[0]
    assert genesis.header.number == 0
    blk1 = s.blocks[1]
    assert blk1.header.number == 1
    gls = genesis_signing_lockset(genesis, privkeys[0])
    bp = BlockProposal(height=1, round=0, block=blk1, signing_lockset=gls, round_lockset=None)
    assert bp.lockset == gls
    assert isinstance(bp, Proposal)
    bp.sign(tester.k0)

    with pytest.raises(InvalidProposalError):  # round >0 needs round_lockset
        bp = BlockProposal(height=1, round=1, block=blk1, signing_lockset=gls, round_lockset=None)
    bp.validate_votes(validators, validators[:1])

    # block 2
    s.mine(n=1)
    blk2 = s.blocks[2]
    assert blk2.header.number == 2

    ls = LockSet(len(validators))
    for privkey in privkeys:
        v = VoteBlock(height=1, round=0, blockhash=blk1.hash)
        v.sign(privkey)
        ls.add(v)

    bp = BlockProposal(height=2, round=0, block=blk2, signing_lockset=ls, round_lockset=None)
    assert bp.lockset == ls
    with pytest.raises(InvalidProposalError):  # signature missing
        bp.validate_votes(validators, validators)

    with pytest.raises(InvalidProposalError):
        bp.sign(privkeys[0])  # privkey doesnt match coinbase
        bp.validate_votes(validators, validators)

    with pytest.raises(InvalidSignature):  # already signed
        bp.sign(tester.k0)

    bp.v = 0  # reset sigcheck hack
    bp.sign(tester.k0)

    bp.validate_votes(validators, validators)

    with pytest.raises(InvalidProposalError):  # round >0 needs round_lockset
        bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=gls, round_lockset=None)

    # block 2 round 1, timeout in round=0
    rls = LockSet(len(validators))
    for privkey in privkeys:
        v = VoteNil(height=2, round=0)
        v.sign(privkey)
        rls.add(v)
    bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls)
    assert bp.lockset == rls
    bp.sign(tester.k0)
    bp.validate_votes(validators, validators)

    # serialize
    s = rlp.encode(bp)
    dbp = rlp.decode(s, BlockProposal)
    assert dbp.block == blk2

    dbp.validate_votes(validators, validators)

    # check quorumpossible lockset failure
    rls = LockSet(len(validators))
    for i, privkey in enumerate(privkeys):
        if i < 4:
            v = VoteBlock(height=2, round=0, blockhash='0' * 32)
        else:
            v = VoteNil(height=2, round=0)
        v.sign(privkey)
        rls.add(v)
    assert not rls.has_noquorum
    assert rls.has_quorum_possible
    with pytest.raises(InvalidProposalError):  # NoQuorum necessary R0
        bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls)

Example 80

Project: gunicorn
Source File: test_config.py
View license
def test_property_access():
    c = config.Config()
    for s in config.KNOWN_SETTINGS:
        getattr(c, s.name)

    # Class was loaded
    assert c.worker_class == SyncWorker

    # logger class was loaded
    assert c.logger_class == glogging.Logger

    # Workers defaults to 1
    assert c.workers == 1
    c.set("workers", 3)
    assert c.workers == 3

    # Address is parsed
    assert c.address == [("127.0.0.1", 8000)]

    # User and group defaults
    assert os.geteuid() == c.uid
    assert os.getegid() == c.gid

    # Proc name
    assert "gunicorn" == c.proc_name

    # Not a config property
    pytest.raises(AttributeError, getattr, c, "foo")
    # Force to be not an error
    class Baz(object):
        def get(self):
            return 3.14
    c.settings["foo"] = Baz()
    assert c.foo == 3.14

    # Attempt to set a cfg not via c.set
    pytest.raises(AttributeError, setattr, c, "proc_name", "baz")

    # No setting for name
    pytest.raises(AttributeError, c.set, "baz", "bar")

Example 81

Project: brainiak
Source File: test_sssrm.py
View license
def test_wrong_input():
    import os
    os.environ['THEANO_FLAGS'] = 'device=cpu, floatX=float64'

    from sklearn.utils.validation import NotFittedError
    import numpy as np
    import brainiak.funcalign.sssrm

    voxels = 100
    align_samples = 400
    samples = 500
    subjects = 2
    features = 3
    n_labels = 4

    model = brainiak.funcalign.sssrm.SSSRM(n_iter=5, features=features, gamma=10.0, alpha=0.1)
    assert model, "Invalid SSSRM instance!"

    # Create a Shared response S with K = 3
    theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)
    z = np.linspace(-2, 2, samples)
    r = z**2 + 1
    x = r * np.sin(theta)
    y = r * np.cos(theta)

    S = np.vstack((x, y, z))
    S_align = S[:, :align_samples]
    S_classify = S[:, align_samples:]
    X = []
    Z = []
    Z2 = []
    W = []
    y = []
    Q, R = np.linalg.qr(np.random.random((voxels, features)))
    W.append(Q)
    X.append(Q.dot(S_align) + 0.1 * np.random.random((voxels, align_samples)))
    Z.append(Q.dot(S_classify) + 0.1 * np.random.random((voxels, samples - align_samples)))
    Z2.append(Q.dot(S_classify) + 0.1 * np.random.random((voxels, samples - align_samples)))
    y.append(np.repeat(np.arange(n_labels), (samples - align_samples)/n_labels))

    # Check that transform does NOT run before fitting the model
    with pytest.raises(NotFittedError) as excinfo:
        model.transform(X)
    print("Test: transforming before fitting the model")

    # Check that predict does NOT run before fitting the model
    with pytest.raises(NotFittedError) as excinfo:
        model.predict(X)
    print("Test: predicting before fitting the model")

    # Check that it does NOT run with 1 subject on X
    with pytest.raises(ValueError) as excinfo:
        model.fit(X, y, Z)
    print("Test: running SSSRM with 1 subject (alignment)")

    # Create more subjects align and classification data
    for subject in range(1, subjects):
        Q, R = np.linalg.qr(np.random.random((voxels, features)))
        W.append(Q)
        X.append(Q.dot(S_align) + 0.1 * np.random.random((voxels, align_samples)))
        Z2.append(Q.dot(S_classify) + 0.1 * np.random.random((voxels, samples - align_samples)))

    # Check that it does NOT run with 1 subject on y
    with pytest.raises(ValueError) as excinfo:
        model.fit(X, y, Z)
    print("Test: running SSSRM with 1 subject (labels)")

    # Create more subjects labels data
    for subject in range(1, subjects):
        y.append(np.repeat(np.arange(n_labels), (samples - align_samples)/n_labels))

    # Check that it does NOT run with 1 subject on Z
    with pytest.raises(ValueError) as excinfo:
        model.fit(X, y, Z)
    print("Test: running SSSRM with 1 subject (classif.)")

    # Check that alpha is in (0,1) range
    model_bad = brainiak.funcalign.sssrm.SSSRM(n_iter=1, features=features, gamma=10.0, alpha=1.5)
    assert model_bad, "Invalid SSSRM instance!"
    with pytest.raises(ValueError) as excinfo:
        model_bad.fit(X, y, Z)
    print("Test: running SSSRM with wrong alpha")

    # Check that gamma is positive
    model_bad = brainiak.funcalign.sssrm.SSSRM(n_iter=1, features=features, gamma=-0.1, alpha=0.2)
    assert model_bad, "Invalid SSSRM instance!"
    with pytest.raises(ValueError) as excinfo:
        model_bad.fit(X, y, Z)
    print("Test: running SSSRM with wrong gamma")

Example 82

Project: audiolazy
Source File: test_core.py
View license
  @p("is_delitem", [True, False])
  def test_delitem_delattr(self, is_delitem):
    sd = StrategyDict()
    sd.strategy("sum")(lambda *args: reduce(operator.add, args))
    sd.strategy("prod")(lambda *args: reduce(operator.mul, args))

    # They work...
    assert sd.sum(7, 2, 3) == 12 == sd(7, 2, 3) == sd.default(7, 2, 3)
    assert sd.prod(7, 2, 3) == 42
    assert sd["sum"](2, 3) == 5 == sd(2, 3) == sd.default(2, 3)
    assert sd["prod"](2, 3) == 6
    with pytest.raises(KeyError): # Default isn't an item
      sd["default"](5, 4)

    # Their names are there
    assert set(sd.keys()) == {("sum",), ("prod",)}
    assert "sum" in dir(sd)
    assert "prod" in dir(sd)
    assert "sum" in vars(sd)
    assert "prod" in vars(sd)
    assert "default" in dir(sd)
    assert "default" in vars(sd)

    # Not anymore!
    if is_delitem:
      del sd["sum"]
    else:
      del sd.sum
    assert "sum" not in dir(sd)
    assert "sum" not in vars(sd)
    assert "default" in dir(sd)
    assert "default" not in vars(sd)
    with pytest.raises(AttributeError):
      sd.sum(-1, 2, 3)
    with pytest.raises(KeyError):
      sd["sum"](5, 4)
    with pytest.raises(KeyError): # About this one, nothing changed
      sd["default"](5, 4)

    # But prod is still there
    assert list(sd.keys()) == [("prod",)]
    assert len(sd) == 1
    assert "prod" in dir(sd)
    assert "prod" in vars(sd)
    assert sd.prod(-1, 2, 3) == -6
    assert sd["prod"](5, 4) == 20

    # And now there's no default strategy
    assert sd(3, 2) == NotImplemented == sd.default(3, 2)

Example 83

Project: flask-resty
Source File: test_testing.py
View license
def test_objects():
    complex_object = {
        'a': 1,
        'b': [1, 2, 3],
        'c': [{}, {'a': 1}],
        'd': {
            'a': 1,
            'b': [],
        },
    }

    assert_value(complex_object, complex_object)

    assert_value(complex_object, {})

    assert_value(complex_object, {'a': 1})

    assert_value(complex_object, {
        'b': [1, 2, 3],
        'c': [{}, {}],
    })

    assert_value(complex_object, {
        'd': {
            'a': 1,
        },
    })

    with pytest.raises(AssertionError):
        assert_value(complex_object, [])

    with pytest.raises(AssertionError):
        assert_value(complex_object, None)

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'b': [1, 2],
        })

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'a': 1,
            'foo': 1,
        })

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'c': [{}, {'b': 2}],
        })

    with pytest.raises(AssertionError):
        assert_value(complex_object, {
            'b': [1, 2, 3, 4],
        })

Example 84

Project: dask
Source File: test_groupby.py
View license
def test_split_apply_combine_on_series():
    pdf = pd.DataFrame({'a': [1, 2, 6, 4, 4, 6, 4, 3, 7],
                        'b': [4, 2, 7, 3, 3, 1, 1, 1, 2]},
                       index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
    ddf = dd.from_pandas(pdf, npartitions=3)

    for ddkey, pdkey in [('b', 'b'), (ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:
        assert_eq(ddf.groupby(ddkey).a.min(), pdf.groupby(pdkey).a.min())
        assert_eq(ddf.groupby(ddkey).a.max(), pdf.groupby(pdkey).a.max())
        assert_eq(ddf.groupby(ddkey).a.count(), pdf.groupby(pdkey).a.count())
        assert_eq(ddf.groupby(ddkey).a.mean(), pdf.groupby(pdkey).a.mean())
        assert_eq(ddf.groupby(ddkey).a.nunique(), pdf.groupby(pdkey).a.nunique())
        assert_eq(ddf.groupby(ddkey).a.size(), pdf.groupby(pdkey).a.size())
        for ddof in [0, 1, 2]:
            assert_eq(ddf.groupby(ddkey).a.var(ddof),
                      pdf.groupby(pdkey).a.var(ddof))
            assert_eq(ddf.groupby(ddkey).a.std(ddof),
                      pdf.groupby(pdkey).a.std(ddof))

        assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())
        assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())
        assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())
        assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())
        assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean())
        assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())
        for ddof in [0, 1, 2]:
            assert_eq(ddf.groupby(ddkey).var(ddof),
                      pdf.groupby(pdkey).var(ddof), check_dtype=False)
            assert_eq(ddf.groupby(ddkey).std(ddof),
                      pdf.groupby(pdkey).std(ddof), check_dtype=False)

    for ddkey, pdkey in [(ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:
        assert_eq(ddf.a.groupby(ddkey).sum(), pdf.a.groupby(pdkey).sum(), check_names=False)
        assert_eq(ddf.a.groupby(ddkey).max(), pdf.a.groupby(pdkey).max(), check_names=False)
        assert_eq(ddf.a.groupby(ddkey).count(), pdf.a.groupby(pdkey).count(), check_names=False)
        assert_eq(ddf.a.groupby(ddkey).mean(), pdf.a.groupby(pdkey).mean(), check_names=False)
        assert_eq(ddf.a.groupby(ddkey).nunique(), pdf.a.groupby(pdkey).nunique(), check_names=False)
        for ddof in [0, 1, 2]:
            assert_eq(ddf.a.groupby(ddkey).var(ddof),
                      pdf.a.groupby(pdkey).var(ddof))
            assert_eq(ddf.a.groupby(ddkey).std(ddof),
                      pdf.a.groupby(pdkey).std(ddof))

    for i in [0, 4, 7]:
        assert_eq(ddf.groupby(ddf.b > i).a.sum(), pdf.groupby(pdf.b > i).a.sum())
        assert_eq(ddf.groupby(ddf.b > i).a.min(), pdf.groupby(pdf.b > i).a.min())
        assert_eq(ddf.groupby(ddf.b > i).a.max(), pdf.groupby(pdf.b > i).a.max())
        assert_eq(ddf.groupby(ddf.b > i).a.count(), pdf.groupby(pdf.b > i).a.count())
        assert_eq(ddf.groupby(ddf.b > i).a.mean(), pdf.groupby(pdf.b > i).a.mean())
        assert_eq(ddf.groupby(ddf.b > i).a.nunique(), pdf.groupby(pdf.b > i).a.nunique())
        assert_eq(ddf.groupby(ddf.b > i).a.size(), pdf.groupby(pdf.b > i).a.size())

        assert_eq(ddf.groupby(ddf.a > i).b.sum(), pdf.groupby(pdf.a > i).b.sum())
        assert_eq(ddf.groupby(ddf.a > i).b.min(), pdf.groupby(pdf.a > i).b.min())
        assert_eq(ddf.groupby(ddf.a > i).b.max(), pdf.groupby(pdf.a > i).b.max())
        assert_eq(ddf.groupby(ddf.a > i).b.count(), pdf.groupby(pdf.a > i).b.count())
        assert_eq(ddf.groupby(ddf.a > i).b.mean(), pdf.groupby(pdf.a > i).b.mean())
        assert_eq(ddf.groupby(ddf.a > i).b.nunique(), pdf.groupby(pdf.a > i).b.nunique())
        assert_eq(ddf.groupby(ddf.b > i).b.size(), pdf.groupby(pdf.b > i).b.size())

        assert_eq(ddf.groupby(ddf.b > i).sum(), pdf.groupby(pdf.b > i).sum())
        assert_eq(ddf.groupby(ddf.b > i).min(), pdf.groupby(pdf.b > i).min())
        assert_eq(ddf.groupby(ddf.b > i).max(), pdf.groupby(pdf.b > i).max())
        assert_eq(ddf.groupby(ddf.b > i).count(), pdf.groupby(pdf.b > i).count())
        assert_eq(ddf.groupby(ddf.b > i).mean(), pdf.groupby(pdf.b > i).mean())
        assert_eq(ddf.groupby(ddf.b > i).size(), pdf.groupby(pdf.b > i).size())

        assert_eq(ddf.groupby(ddf.a > i).sum(), pdf.groupby(pdf.a > i).sum())
        assert_eq(ddf.groupby(ddf.a > i).min(), pdf.groupby(pdf.a > i).min())
        assert_eq(ddf.groupby(ddf.a > i).max(), pdf.groupby(pdf.a > i).max())
        assert_eq(ddf.groupby(ddf.a > i).count(), pdf.groupby(pdf.a > i).count())
        assert_eq(ddf.groupby(ddf.a > i).mean(), pdf.groupby(pdf.a > i).mean())
        assert_eq(ddf.groupby(ddf.a > i).size(), pdf.groupby(pdf.a > i).size())

        for ddof in [0, 1, 2]:
            assert_eq(ddf.groupby(ddf.b > i).std(ddof),
                      pdf.groupby(pdf.b > i).std(ddof))

    for ddkey, pdkey in [('a', 'a'), (ddf.a, pdf.a),
                         (ddf.a + 1, pdf.a + 1), (ddf.a > 3, pdf.a > 3)]:
        assert_eq(ddf.groupby(ddkey).b.sum(), pdf.groupby(pdkey).b.sum())
        assert_eq(ddf.groupby(ddkey).b.min(), pdf.groupby(pdkey).b.min())
        assert_eq(ddf.groupby(ddkey).b.max(), pdf.groupby(pdkey).b.max())
        assert_eq(ddf.groupby(ddkey).b.count(), pdf.groupby(pdkey).b.count())
        assert_eq(ddf.groupby(ddkey).b.mean(), pdf.groupby(pdkey).b.mean())
        assert_eq(ddf.groupby(ddkey).b.nunique(), pdf.groupby(pdkey).b.nunique())
        assert_eq(ddf.groupby(ddkey).b.size(), pdf.groupby(pdkey).b.size())

        assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())
        assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())
        assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())
        assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())
        assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean().astype(float))
        assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())

        for ddof in [0, 1, 2]:
            assert_eq(ddf.groupby(ddkey).b.std(ddof),
                      pdf.groupby(pdkey).b.std(ddof))

    assert (sorted(ddf.groupby('b').a.sum().dask) ==
            sorted(ddf.groupby('b').a.sum().dask))
    assert (sorted(ddf.groupby(ddf.a > 3).b.mean().dask) ==
            sorted(ddf.groupby(ddf.a > 3).b.mean().dask))

    # test raises with incorrect key
    pytest.raises(KeyError, lambda: ddf.groupby('x'))
    pytest.raises(KeyError, lambda: ddf.groupby(['a', 'x']))
    pytest.raises(KeyError, lambda: ddf.groupby('a')['x'])
    pytest.raises(KeyError, lambda: ddf.groupby('a')['b', 'x'])
    pytest.raises(KeyError, lambda: ddf.groupby('a')[['b', 'x']])

    # test graph node labels
    assert_dask_graph(ddf.groupby('b').a.sum(), 'series-groupby-sum')
    assert_dask_graph(ddf.groupby('b').a.min(), 'series-groupby-min')
    assert_dask_graph(ddf.groupby('b').a.max(), 'series-groupby-max')
    assert_dask_graph(ddf.groupby('b').a.count(), 'series-groupby-count')
    assert_dask_graph(ddf.groupby('b').a.var(), 'series-groupby-var')
    # mean consists from sum and count operations
    assert_dask_graph(ddf.groupby('b').a.mean(), 'series-groupby-sum')
    assert_dask_graph(ddf.groupby('b').a.mean(), 'series-groupby-count')
    assert_dask_graph(ddf.groupby('b').a.nunique(), 'series-groupby-nunique')
    assert_dask_graph(ddf.groupby('b').a.size(), 'series-groupby-size')

    assert_dask_graph(ddf.groupby('b').sum(), 'dataframe-groupby-sum')
    assert_dask_graph(ddf.groupby('b').min(), 'dataframe-groupby-min')
    assert_dask_graph(ddf.groupby('b').max(), 'dataframe-groupby-max')
    assert_dask_graph(ddf.groupby('b').count(), 'dataframe-groupby-count')
    # mean consists from sum and count operations
    assert_dask_graph(ddf.groupby('b').mean(), 'dataframe-groupby-sum')
    assert_dask_graph(ddf.groupby('b').mean(), 'dataframe-groupby-count')
    assert_dask_graph(ddf.groupby('b').size(), 'dataframe-groupby-size')

Example 85

Project: psq
Source File: task_test.py
View license
def test_result():
    t = psq.Task('1', sum, (), {})
    q = MockQueue()
    q.storage.get_task.return_value = None

    with q.queue_context():
        r = psq.TaskResult('1')

    assert r.storage == q.storage

    with pytest.raises(TimeoutError):
        r.result(timeout=0.1)

    q.storage.get_task.return_value = t

    with pytest.raises(TimeoutError):
        r.result(timeout=0.1)

    t.start()

    with pytest.raises(TimeoutError):
        r.result(timeout=0.1)

    t.finish(42)

    assert r.result(timeout=0.1) == 42

    t.fail(ValueError())

    with pytest.raises(ValueError):
        r.result(timeout=0.1)

    # Test without timeout. This is tricky.

    t.reset()

    def side_effect(_):
        t.finish(43)
        q.storage.get_task.return_value = t
        q.storage.get_task.side_effect = None

    q.storage.get_task.side_effect = side_effect

    assert r.result() == 43

Example 86

Project: hansel
Source File: test_utils.py
View license
def test_intersection():

    crumb = Crumb("{base_dir}/raw/{subject_id}/{session_id}/{modality}/{image}")
    base_dir1 = tempfile.mkdtemp(prefix='crumbtest1_')
    tmp_crumb1 = crumb.replace(base_dir=base_dir1)

    base_dir2 = tempfile.mkdtemp(prefix='crumbtest2_')
    tmp_crumb2 = crumb.replace(base_dir=base_dir2)

    assert not op.exists(tmp_crumb1._path)
    assert not op.exists(tmp_crumb2._path)

    assert not tmp_crumb1.has_files()
    assert not tmp_crumb2.has_files()

    values_dict1 = {'session_id': ['session_{:02}'.format(i) for i in range( 2)],
                    'subject_id': ['subj_{:03}'.format(i)    for i in range( 3)],
                    'modality':   ['anat'],
                    'image':      ['mprage1.nii', 'mprage2.nii', 'mprage3.nii'],
                    }

    values_dict2 = {'session_id': ['session_{:02}'.format(i) for i in range(20)],
                    'subject_id': ['subj_{:03}'.format(i)    for i in range(30)],
                    'modality':   ['anat'],
                    'image':      ['mprage1.nii', 'mprage2.nii', 'mprage3.nii'],
                    }

    _ = mktree(tmp_crumb1, list(ParameterGrid(values_dict1)))
    _ = mktree(tmp_crumb2, list(ParameterGrid(values_dict2)))

    assert op.exists(tmp_crumb1.split()[0])
    assert op.exists(tmp_crumb2.split()[0])

    assert intersection(tmp_crumb1, tmp_crumb2, on=['subject_id']) == [(('subject_id', val), ) for val in tmp_crumb1['subject_id']]


    assert intersection(tmp_crumb1, tmp_crumb2,
                        on=['subject_id', 'modality']) == [(('subject_id', 'subj_000'), ('modality', 'anat')),
                                                           (('subject_id', 'subj_001'), ('modality', 'anat')),
                                                           (('subject_id', 'subj_002'), ('modality', 'anat'))]

    han_crumb = tmp_crumb2.replace(subject_id='hansel')
    assert intersection(tmp_crumb1, han_crumb, on=['subject_id']) == []

    s0_crumb = tmp_crumb2.replace(subject_id='subj_000')
    assert intersection(tmp_crumb1, s0_crumb, on=['subject_id']) == [(('subject_id', 'subj_000'), )]

    assert intersection(tmp_crumb1, s0_crumb, on=['subject_id', 'modality']) == [(('subject_id', 'subj_000'), ('modality', 'anat'))]

    assert intersection(tmp_crumb1, s0_crumb, on=['subject_id', 'image']) == [(('subject_id', 'subj_000'), ('image', 'mprage1.nii')),
                                                                              (('subject_id', 'subj_000'), ('image', 'mprage2.nii')),
                                                                              (('subject_id', 'subj_000'), ('image', 'mprage3.nii'))]

    # test raises
    pytest.raises(KeyError, intersection, tmp_crumb1, tmp_crumb2, on=['hansel'])

    pytest.raises(KeyError, intersection, tmp_crumb1, tmp_crumb2, on=['subject_id', 'modality', 'hansel'])

    pytest.raises(KeyError, intersection, tmp_crumb1, Crumb(op.expanduser('~/{files}')))

    pytest.raises(KeyError, intersection, tmp_crumb1, Crumb(op.expanduser('~/{files}')), on=['files'])

Example 87

Project: swarmci
Source File: test_task.py
View license
def describe_task():
    def describe_init():
        def given_valid_task_type():
            def sets_task_type_property():
                subject = Task('test', TaskType.JOB, dummy_func)
                assert_that(subject.task_type).is_equal_to(TaskType.JOB)

        def given_invalid_task_type():
            def raises_error():
                with pytest.raises(ValueError) as excinfo:
                    Task('foo', 'mytype', dummy_func)
                assert_that(str(excinfo.value)).is_equal_to('task_type must be of type TaskType')

        def given_valid_name():
            def sets_name_property():
                subject = Task('test', TaskType.JOB, dummy_func)
                assert_that(subject.name).is_equal_to('test')

        def given_invalid_name():
            def raises_error():
                with pytest.raises(ValueError) as excinfo:
                    Task(None, 'mytype', dummy_func)
                assert_that(str(excinfo.value)).is_equal_to('tasks must have a name')

        def given_invalid_func():
            def raises_error():
                with pytest.raises(ValueError) as excinfo:
                    Task('foo', TaskType.JOB, 'foo')
                assert_that(str(excinfo.value)).is_equal_to('exec_func must be a callable')

    def describe_execute():
        def given_callable_exec_func():
            def records_timing_of_the_task_execution():
                parent_mock = Mock()
                parent_mock.time = Mock()
                parent_mock.time.side_effect = [1, 5]
                parent_mock.exec_func = Mock()

                subject = Task('foo', TaskType.JOB, parent_mock.exec_func, tm=parent_mock.time)
                subject.execute()

                parent_mock.assert_has_calls([call.time(), call.exec_func(), call.time()])
                assert_that(subject.runtime).is_equal_to(4)

            def expect_args_kwargs_passed_to_exec_func():
                exec_func_mock = Mock()
                subject = Task('foo', TaskType.JOB, exec_func_mock)
                exp_args = ['hello', 'world']
                exp_kwargs = {'foo': 'bar'}
                subject.execute(*exp_args, **exp_kwargs)
                exec_func_mock.assert_called_once_with(*exp_args, **exp_kwargs)

Example 88

Project: two1-python
Source File: test_paymentchannel.py
View license
def test_paymentchannel_typical():
    # Create mocked dependencies
    bc = mock.MockBlockchain()
    wallet = walletwrapper.Two1WalletWrapper(mock.MockTwo1Wallet(), bc)
    db = database.Sqlite3Database(":memory:")

    # Link the mock blockchain to the mock payment channel server as it is a
    # non-injected dependency.
    mock.MockPaymentChannelServer.blockchain = bc
    # Clear mock payment channel server channels.
    mock.MockPaymentChannelServer.channels = {}

    # Open a payment channel with 100000 deposit, 86400 seconds expiration, and 30000 fee
    pc = paymentchannel.PaymentChannel.open(db, wallet, bc, 'mock://test', 100000, 86400, 30000, False)

    # Assert payment channel properties
    expected_state = {}
    expected_state['url'] = "mock://test/" + pc.deposit_txid
    expected_state['state'] = statemachine.PaymentChannelState.CONFIRMING_DEPOSIT
    expected_state['ready'] = False
    expected_state['balance'] = 100000
    expected_state['deposit'] = 100000
    expected_state['fee'] = 30000
    expected_state['creation_time'] = lambda pc: pc.creation_time > 0
    expected_state['expiration_time'] = int(pc.creation_time + 86400)
    expected_state['expired'] = False
    expected_state['refund_tx'] = lambda pc: pc.refund_tx
    expected_state['refund_txid'] = lambda pc: pc.refund_txid
    expected_state['deposit_tx'] = lambda pc: pc.deposit_tx
    expected_state['deposit_txid'] = lambda pc: pc.deposit_txid
    expected_state['payment_tx'] = None
    expected_state['spend_tx'] = None
    expected_state['spend_txid'] = None
    assert_paymentchannel_state(expected_state, pc)

    # Check database
    with db:
        assert db.list() == [pc.url]
        assert db.read(pc.url)

    # Check blockchain
    assert bc.check_confirmed(pc.deposit_txid) is False
    assert bc.lookup_tx(pc.deposit_txid) == pc.deposit_tx

    # Try premature payment
    with pytest.raises(paymentchannel.NotReadyError):
        pc.pay(1)

    # Try premature close
    with pytest.raises(paymentchannel.NotReadyError):
        pc.close()

    # Sync payment channel
    pc.sync()
    expected_state['state'] = statemachine.PaymentChannelState.CONFIRMING_DEPOSIT
    expected_state['ready'] = False
    assert_paymentchannel_state(expected_state, pc)

    # Confirm deposit
    bc.mock_confirm(pc.deposit_txid)

    # Sync payment channel
    pc.sync()
    expected_state['state'] = statemachine.PaymentChannelState.READY
    expected_state['ready'] = True
    assert_paymentchannel_state(expected_state, pc)

    # Try excess payment
    with pytest.raises(paymentchannel.InsufficientBalanceError):
        pc.pay(pc.balance + 1)

    # Try premature close
    with pytest.raises(paymentchannel.NoPaymentError):
        pc.close()

    # Make regular payments
    assert pc.pay(1500)
    expected_state['payment_tx'] = lambda pc: pc.payment_tx
    expected_state['balance'] = 97000
    assert_paymentchannel_state(expected_state, pc)
    assert pc.pay(1)
    expected_state['payment_tx'] = lambda pc: pc.payment_tx
    expected_state['balance'] = 96999
    assert_paymentchannel_state(expected_state, pc)
    assert pc.pay(15)
    expected_state['payment_tx'] = lambda pc: pc.payment_tx
    expected_state['balance'] = 96984
    assert_paymentchannel_state(expected_state, pc)
    assert pc.pay(20000)
    expected_state['payment_tx'] = lambda pc: pc.payment_tx
    expected_state['balance'] = 76984
    assert_paymentchannel_state(expected_state, pc)

    # Close payment channel
    pc.close()

    # Check payment channel properties
    expected_state['state'] = statemachine.PaymentChannelState.CONFIRMING_SPEND
    expected_state['ready'] = False
    expected_state['spend_txid'] = str(mock.MockPaymentChannelServer.channels[pc.deposit_txid]['payment_tx'].hash)
    assert_paymentchannel_state(expected_state, pc)

    # Sync payment channel
    pc.sync()
    assert_paymentchannel_state(expected_state, pc)

    # Confirm spend
    bc.mock_confirm(pc.spend_txid)

    # Sync payment channel
    pc.sync()
    expected_state['state'] = statemachine.PaymentChannelState.CLOSED
    expected_state['spend_tx'] = mock.MockPaymentChannelServer.channels[pc.deposit_txid]['payment_tx'].to_hex()
    assert_paymentchannel_state(expected_state, pc)

    # Try payment on closed channel
    with pytest.raises(paymentchannel.ClosedError):
        pc.pay(1)

Example 89

Project: marshmallow
Source File: test_serialization.py
View license
    def test_decimal_field(self, user):
        user.m1 = 12
        user.m2 = '12.355'
        user.m3 = decimal.Decimal(1)
        user.m4 = None
        user.m5 = 'abc'
        user.m6 = [1, 2]

        field = fields.Decimal()
        assert isinstance(field.serialize('m1', user), decimal.Decimal)
        assert field.serialize('m1', user) == decimal.Decimal(12)
        assert isinstance(field.serialize('m2', user), decimal.Decimal)
        assert field.serialize('m2', user) == decimal.Decimal('12.355')
        assert isinstance(field.serialize('m3', user), decimal.Decimal)
        assert field.serialize('m3', user) == decimal.Decimal(1)
        assert field.serialize('m4', user) is None
        with pytest.raises(ValidationError):
            field.serialize('m5', user)
        with pytest.raises(ValidationError):
            field.serialize('m6', user)

        field = fields.Decimal(1)
        assert isinstance(field.serialize('m1', user), decimal.Decimal)
        assert field.serialize('m1', user) == decimal.Decimal(12)
        assert isinstance(field.serialize('m2', user), decimal.Decimal)
        assert field.serialize('m2', user) == decimal.Decimal('12.4')
        assert isinstance(field.serialize('m3', user), decimal.Decimal)
        assert field.serialize('m3', user) == decimal.Decimal(1)
        assert field.serialize('m4', user) is None
        with pytest.raises(ValidationError):
            field.serialize('m5', user)
        with pytest.raises(ValidationError):
            field.serialize('m6', user)

        field = fields.Decimal(1, decimal.ROUND_DOWN)
        assert isinstance(field.serialize('m1', user), decimal.Decimal)
        assert field.serialize('m1', user) == decimal.Decimal(12)
        assert isinstance(field.serialize('m2', user), decimal.Decimal)
        assert field.serialize('m2', user) == decimal.Decimal('12.3')
        assert isinstance(field.serialize('m3', user), decimal.Decimal)
        assert field.serialize('m3', user) == decimal.Decimal(1)
        assert field.serialize('m4', user) is None
        with pytest.raises(ValidationError):
            field.serialize('m5', user)
        with pytest.raises(ValidationError):
            field.serialize('m6', user)

Example 90

Project: two1-python
Source File: test_statemachine.py
View license
def test_statemachine_create():
    """Test state machine transitions from initial state OPENING.

        Valid transitions:
            OPENING -> CONFIRMING_DEPOSIT   via create()
            OPENING -> READY                via create()

        Invalid transitions:
            OPENING -> OUTSTANDING          via pay()
            OPENING -> READY                via pay_ack() or pay_nack()
            OPENING -> CONFIRMING_SPEND     via close()
            OPENING -> CLOSED               via finalize()

    """
    # Create state machine
    model_data = {
        'url': 'test',
    }
    wallet = walletwrapper.Two1WalletWrapper(mock.MockTwo1Wallet(), mock.MockBlockchain())
    model = statemachine.PaymentChannelModel(**model_data)
    sm = statemachine.PaymentChannelStateMachine(model, wallet)

    # Expected state machine state
    expected_state = {}
    expected_state['state'] = statemachine.PaymentChannelState.OPENING
    expected_state['balance_amount'] = None
    expected_state['deposit_amount'] = None
    expected_state['fee_amount'] = None
    expected_state['creation_time'] = None
    expected_state['expiration_time'] = None
    expected_state['deposit_tx_utxo_index'] = None
    expected_state['deposit_tx'] = None
    expected_state['deposit_txid'] = None
    expected_state['deposit_txid_signature'] = None
    expected_state['refund_tx'] = None
    expected_state['refund_txid'] = None
    expected_state['spend_tx'] = None
    expected_state['spend_txid'] = None

    # Assert state machine state
    assert_statemachine_state(expected_state, sm)

    # Check invalid transition OPENING -> READY via confirm()
    with pytest.raises(statemachine.StateTransitionError):
        sm.confirm()
    # Check invalid transition OPENING -> OUTSTANDING via pay()
    with pytest.raises(statemachine.StateTransitionError):
        sm.pay(1)
    # Check invalid transition OPENING -> READY via pay_ack()
    with pytest.raises(statemachine.StateTransitionError):
        sm.pay_ack()
    # Check invalid transition OPENING -> READY via pay_nack()
    with pytest.raises(statemachine.StateTransitionError):
        sm.pay_nack()
    # Check invalid transition OPENING -> CONFIRMING_SPEND via close()
    with pytest.raises(statemachine.StateTransitionError):
        sm.close(None)
    # Check invalid transition OPENING -> CLOSED via finalize()
    with pytest.raises(statemachine.StateTransitionError):
        sm.finalize("")

    # Channel parameters
    merchant_public_key = mock.MockPaymentChannelServer.PRIVATE_KEY.public_key.to_hex()
    deposit_amount = 100000
    expiration_time = 1450223410
    fee_amount = 10000

    # Check valid transition OPENING -> CONFIRMING_DEPOSIT via create()
    (deposit_tx, redeem_script) = sm.create(
        merchant_public_key, deposit_amount, expiration_time, fee_amount, zeroconf=False)
    assert deposit_tx == "010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100efbcffe9c800c517232c5f4417482a650c8e23a5171a3d02f94961355a8c232a022070bef91a8c956e70b673631806971994e8d0745977961c3972bbbaebc0254957012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0168b901000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000"  # nopep8
    assert redeem_script == "63210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac"  # nopep8
    expected_state['state'] = statemachine.PaymentChannelState.CONFIRMING_DEPOSIT
    expected_state['balance_amount'] = 100000
    expected_state['deposit_amount'] = 100000
    expected_state['fee_amount'] = 10000
    expected_state['creation_time'] = lambda sm: sm.creation_time > 0
    expected_state['expiration_time'] = 1450223410
    expected_state['deposit_tx_utxo_index'] = 0
    expected_state['deposit_tx'] = "010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100efbcffe9c800c517232c5f4417482a650c8e23a5171a3d02f94961355a8c232a022070bef91a8c956e70b673631806971994e8d0745977961c3972bbbaebc0254957012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0168b901000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000"  # nopep8
    expected_state['deposit_txid'] = "ec822c4539a8b12f80fe921669b79adf6439ede3669ee4d42d1199d1f9868e72"
    expected_state['deposit_txid_signature'] = "3045022100fa23cba0e65d48ddf2e98fa64bb578f9ae3642f416462f3f991162b56dca428702205dbcf1f067034cf31564fa6c665397c2fb1c99a47afe1105fc1847e07c5ceb41"  # nopep8
    expected_state['refund_tx'] = "0100000001728e86f9d199112dd4e49e66e3ed3964df9ab7691692fe802fb1a839452c82ec000000009c473044022025a91aed42aa97486a5592face6e17f0249c90d8ca16d8fcf9db1bf3201e6e4002206374bb2fa72f424afa702e23fc161961c7ba539727093abf0caf079eef38686f0101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacfeffffff0158920100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056"  # nopep8
    expected_state['refund_txid'] = "5efe2f7db01b74efc71054ead6ad96203d55f6cbf8172039b718c244200f7127"
    assert_statemachine_state(expected_state, sm)

    # Reset state machine
    model = statemachine.PaymentChannelModel(**model_data)
    sm = statemachine.PaymentChannelStateMachine(model, wallet)

    # Check valid transition OPENING -> READY via create() with zeroconf=True
    (deposit_tx, redeem_script) = sm.create(
        merchant_public_key, deposit_amount, expiration_time, fee_amount, zeroconf=True)
    assert deposit_tx == "010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100efbcffe9c800c517232c5f4417482a650c8e23a5171a3d02f94961355a8c232a022070bef91a8c956e70b673631806971994e8d0745977961c3972bbbaebc0254957012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0168b901000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000"  # nopep8
    assert redeem_script == "63210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac"  # nopep8
    expected_state['state'] = statemachine.PaymentChannelState.READY
    assert_statemachine_state(expected_state, sm)

Example 91

Project: pyethereum
Source File: test_abi.py
View license
def test_encode_int():
    int8 = ('int', '8', [])
    int32 = ('int', '32', [])
    int256 = ('int', '256', [])

    int256_maximum = (
        b'\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
        b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
    )
    int256_minimum = (
        b'\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
    )
    int256_128 = (
        b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
        b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80'
    )
    int256_2_to_31 = (
        b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
        b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x00\x00\x00'
    )
    int256_negative_one = (
        b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
        b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
    )

    assert encode_single(int256, int256_minimum) == int256_minimum

    assert encode_single(int8, 0) == zpad(b'\x00', 32)
    assert encode_single(int8, 2 ** 7 - 1) == zpad(b'\x7f', 32)
    assert encode_single(int8, -1) == zpad(b'\xff', 32)
    assert encode_single(int8, -2 ** 7) == zpad(b'\x80', 32)

    with pytest.raises(ValueOutOfBounds):
        encode_single(int8, 128)

    with pytest.raises(ValueOutOfBounds):
        encode_single(int8, -129)

    assert encode_single(int32, 0) == zpad(b'\x00', 32)
    assert encode_single(int32, 2 ** 7 - 1) == zpad(b'\x7f', 32)
    assert encode_single(int32, 2 ** 31 - 1) == zpad(b'\x7f\xff\xff\xff', 32)
    assert encode_single(int32, -1) == zpad(b'\xff\xff\xff\xff', 32)
    assert encode_single(int32, -2 ** 7) == zpad(b'\xff\xff\xff\x80', 32)
    assert encode_single(int32, -2 ** 31) == zpad(b'\x80\x00\x00\x00', 32)

    with pytest.raises(ValueOutOfBounds):
        encode_single(int32, 2 ** 32)

    with pytest.raises(ValueOutOfBounds):
        encode_single(int32, -(2 ** 32))

    assert encode_single(int256, 0) == zpad(b'\x00', 32)
    assert encode_single(int256, 2 ** 7 - 1) == zpad(b'\x7f', 32)
    assert encode_single(int256, 2 ** 31 - 1) == zpad(b'\x7f\xff\xff\xff', 32)
    assert encode_single(int256, 2 ** 255 - 1) == int256_maximum
    assert encode_single(int256, -1) == int256_negative_one
    assert encode_single(int256, -2 ** 7) == int256_128
    assert encode_single(int256, -2 ** 31) == int256_2_to_31
    assert encode_single(int256, -2 ** 255) == int256_minimum

    with pytest.raises(ValueOutOfBounds):
        encode_single(int256, 2 ** 256)

    with pytest.raises(ValueOutOfBounds):
        encode_single(int256, -(2 ** 256))

Example 92

Project: two1-python
Source File: test_statemachine.py
View license
def test_statemachine_finalize():
    """Test state machine finalize.

    Valid transitions:
        CONFIRMING_SPEND -> CLOSED  via finalize()
        READY -> CLOSED             via finalize()

    """
    # Create state machine
    model_data = {
        'url': 'test',
        'state': statemachine.PaymentChannelState.CONFIRMING_SPEND,
        'creation_time': 42,
        'deposit_tx': bitcoin.Transaction.from_hex("010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100c45e5bd8d00caa1cd3ad46e078ec132c9c505b3168d1d1ffe6285cf054f54ed302203ea12c4203ccee8a9de616cc22f081eed47a78660ce0a01cb3a97e302178a573012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0198b101000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000"),  # nopep8
        'refund_tx': bitcoin.Transaction.from_hex("0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056"),  # nopep8
        'payment_tx': bitcoin.Transaction.from_hex("0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c483045022100bd2a89446c9d5985ee711747f35b8e367a90eb13970aec1b3a3ad11e01da7ac602205405fe99d5fe590fb13f0b7698e306e3bbcdd83855e156eb8e9a8901f887229f01514c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacffffffff020a520000000000001976a914a5f30391271dfccc133d321960ffe1dccc88e1b488ac7e380100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac00000000"),  # nopep8
        'spend_tx': None,
        'spend_txid': None,
        'min_output_amount': 1000,
    }
    wallet = walletwrapper.Two1WalletWrapper(mock.MockTwo1Wallet(), mock.MockBlockchain())
    model = statemachine.PaymentChannelModel(**model_data)
    sm = statemachine.PaymentChannelStateMachine(model, wallet)

    # Assert state machine state
    expected_state = {}
    expected_state['state'] = statemachine.PaymentChannelState.CONFIRMING_SPEND
    expected_state['balance_amount'] = 78998
    expected_state['deposit_amount'] = 100000
    expected_state['fee_amount'] = 10000
    expected_state['creation_time'] = lambda sm: sm.creation_time > 0
    expected_state['expiration_time'] = 1450223410
    expected_state['deposit_tx_utxo_index'] = 0
    expected_state['deposit_tx'] = "010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100c45e5bd8d00caa1cd3ad46e078ec132c9c505b3168d1d1ffe6285cf054f54ed302203ea12c4203ccee8a9de616cc22f081eed47a78660ce0a01cb3a97e302178a573012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0198b101000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000"  # nopep8
    expected_state['deposit_txid'] = "7e1a558c84abd5aaf57999557f4a7205d4b69241b7c9cab6c0795fdd663a51ef"
    expected_state['deposit_txid_signature'] = "30450221008f51b6565a8ee67c32529ed840116c44e1f60a628c51ac59720cc8c6df1b5eab02204ccc32c89f81425f483c64c6f8dd77e57eefd3b6a5b7548d1875f5ef3f86cf27"  # nopep8
    expected_state['refund_tx'] = "0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056"  # nopep8
    expected_state['refund_txid'] = "e49cef2fbaf7b6590eb502e4b143f24d5d95ca2e255b166f3b40bef786a32bba"
    expected_state['payment_tx'] = "0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c483045022100bd2a89446c9d5985ee711747f35b8e367a90eb13970aec1b3a3ad11e01da7ac602205405fe99d5fe590fb13f0b7698e306e3bbcdd83855e156eb8e9a8901f887229f01514c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacffffffff020a520000000000001976a914a5f30391271dfccc133d321960ffe1dccc88e1b488ac7e380100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac00000000"  # nopep8
    expected_state['spend_tx'] = None
    expected_state['spend_txid'] = None
    assert_statemachine_state(expected_state, sm)

    # Valid finalize with refund tx
    sm.finalize("0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056")  # nopep8
    expected_state['state'] = statemachine.PaymentChannelState.CLOSED
    expected_state['balance_amount'] = 100000
    expected_state['spend_tx'] = "0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056"  # nopep8
    expected_state['spend_txid'] = "e49cef2fbaf7b6590eb502e4b143f24d5d95ca2e255b166f3b40bef786a32bba"
    assert_statemachine_state(expected_state, sm)

    # Reset state machine
    model = statemachine.PaymentChannelModel(**model_data)
    sm = statemachine.PaymentChannelStateMachine(model, wallet)

    # Valid finalize with payment tx
    sm.finalize("0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e00000000e5483045022100bd2a89446c9d5985ee711747f35b8e367a90eb13970aec1b3a3ad11e01da7ac602205405fe99d5fe590fb13f0b7698e306e3bbcdd83855e156eb8e9a8901f887229f01483045022100ee02cd312b33e78d7dd6d9044f47577a224038fa731ad34ca0ea4870575d6223022073124ecd6c63042ec6a99b34ba6d926524c6491fb1440eaa21177329f542e97501514c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacffffffff020a520000000000001976a914a5f30391271dfccc133d321960ffe1dccc88e1b488ac7e380100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac00000000")  # nopep8
    expected_state['state'] = statemachine.PaymentChannelState.CLOSED
    expected_state['balance_amount'] = 78998
    expected_state['spend_tx'] = "0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e00000000e5483045022100bd2a89446c9d5985ee711747f35b8e367a90eb13970aec1b3a3ad11e01da7ac602205405fe99d5fe590fb13f0b7698e306e3bbcdd83855e156eb8e9a8901f887229f01483045022100ee02cd312b33e78d7dd6d9044f47577a224038fa731ad34ca0ea4870575d6223022073124ecd6c63042ec6a99b34ba6d926524c6491fb1440eaa21177329f542e97501514c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacffffffff020a520000000000001976a914a5f30391271dfccc133d321960ffe1dccc88e1b488ac7e380100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac00000000"  # nopep8
    expected_state['spend_txid'] = "247412297242f1849a9fd8ef7b1acabdb07465da32ab5240d8ba425876a43104"
    assert_statemachine_state(expected_state, sm)

    # Reset state machine
    model = statemachine.PaymentChannelModel(**model_data)
    sm = statemachine.PaymentChannelStateMachine(model, wallet)

    # Invalid finalize with wrong refund tx
    with pytest.raises(statemachine.InvalidTransactionError):
        sm.finalize("010000000191efdfed621ebd3b3ad4044097086c5df75589f424261bfec6371e186a86725d010000009c47304402204ececbed85c20f3bae5393d68d1717cb258a9532e9976bf1e75103b1876427010220273c1440fff7b330d41407f83f871fcaf178ea0a413045424a48825821736aae0101004c50632102f1fff97def324ddea032fed4c8249113b8dce12aaf614d11bb833e587072c8a9ad6704a9a07056b1756821026179020dba5ad8275cf6389a85a00c08f3597bb8617af8148f249a4cd719ab39ac0000000001888a0100000000001976a914314d768ce14fc1f5dffdac1e4a0ed13705d4a4a688aca9a07056")  # nopep8

    # Invalid finalize with wrong payment tx
    with pytest.raises(statemachine.InvalidTransactionError):
        sm.finalize("010000000191efdfed621ebd3b3ad4044097086c5df75589f424261bfec6371e186a86725d01000000e4473044022049d1f41a867aa84266a1f5d2f6283d2b1e6ee07d068a098615d7a7868a96ed78022060c798cbb4740277cb095c399fb10ebd2716894c203527b0e6e3ed797400d10701483045022100de7c4c35c263cc2e1df1f2b06925225a72b9cb28e3b8bae4db7b078a1e4ac25c022031103dbe8993e94daa119d4c78e0bf52d3513e152d8b5bc192f8cb26ac3c683901514c50632102f1fff97def324ddea032fed4c8249113b8dce12aaf614d11bb833e587072c8a9ad6704a9a07056b1756821026179020dba5ad8275cf6389a85a00c08f3597bb8617af8148f249a4cd719ab39acffffffff02a0860100000000001976a914ffffb9d45c6cb46133f55a83c2fde9edb1c5f50688ace8030000000000001976a914314d768ce14fc1f5dffdac1e4a0ed13705d4a4a688ac00000000")  # nopep8

    # Invalid finalize with half-signed payment tx
    with pytest.raises(statemachine.InvalidTransactionError):
        sm.finalize("0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c483045022100bd2a89446c9d5985ee711747f35b8e367a90eb13970aec1b3a3ad11e01da7ac602205405fe99d5fe590fb13f0b7698e306e3bbcdd83855e156eb8e9a8901f887229f01514c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacffffffff020a520000000000001976a914a5f30391271dfccc133d321960ffe1dccc88e1b488ac7e380100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac00000000")  # nopep8

    # Invalid finalize with invalid tx
    with pytest.raises(statemachine.InvalidTransactionError):
        sm.finalize("010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100c45e5bd8d00caa1cd3ad46e078ec132c9c505b3168d1d1ffe6285cf054f54ed302203ea12c4203ccee8a9de616cc22f081eed47a78660ce0a01cb3a97e302178a573012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0198b101000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000")  # nopep8

    # Valid transition CLOSED -> CLOSED via finalize()
    # Finalize with valid payment
    sm.finalize("0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e00000000e5483045022100bd2a89446c9d5985ee711747f35b8e367a90eb13970aec1b3a3ad11e01da7ac602205405fe99d5fe590fb13f0b7698e306e3bbcdd83855e156eb8e9a8901f887229f01483045022100ee02cd312b33e78d7dd6d9044f47577a224038fa731ad34ca0ea4870575d6223022073124ecd6c63042ec6a99b34ba6d926524c6491fb1440eaa21177329f542e97501514c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacffffffff020a520000000000001976a914a5f30391271dfccc133d321960ffe1dccc88e1b488ac7e380100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac00000000")  # nopep8
    expected_state['state'] = statemachine.PaymentChannelState.CLOSED
    expected_state['balance_amount'] = 78998
    expected_state['spend_tx'] = "0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e00000000e5483045022100bd2a89446c9d5985ee711747f35b8e367a90eb13970aec1b3a3ad11e01da7ac602205405fe99d5fe590fb13f0b7698e306e3bbcdd83855e156eb8e9a8901f887229f01483045022100ee02cd312b33e78d7dd6d9044f47577a224038fa731ad34ca0ea4870575d6223022073124ecd6c63042ec6a99b34ba6d926524c6491fb1440eaa21177329f542e97501514c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dacffffffff020a520000000000001976a914a5f30391271dfccc133d321960ffe1dccc88e1b488ac7e380100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac00000000"  # nopep8
    expected_state['spend_txid'] = "247412297242f1849a9fd8ef7b1acabdb07465da32ab5240d8ba425876a43104"
    assert_statemachine_state(expected_state, sm)
    # Finalize with refund
    sm.finalize("0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056")  # nopep8
    expected_state['state'] = statemachine.PaymentChannelState.CLOSED
    expected_state['balance_amount'] = 100000
    expected_state['spend_tx'] = "0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056"  # nopep8
    expected_state['spend_txid'] = "e49cef2fbaf7b6590eb502e4b143f24d5d95ca2e255b166f3b40bef786a32bba"
    assert_statemachine_state(expected_state, sm)

    # Invalid transition CLOSED -> OUTSTANDING via pay()
    with pytest.raises(statemachine.StateTransitionError):
        sm.pay(1)
    # Invalid transition CLOSED -> READY via pay_ack()
    with pytest.raises(statemachine.StateTransitionError):
        sm.pay_ack()
    # Invalid transition CLOSED -> READY via pay_nack()
    with pytest.raises(statemachine.StateTransitionError):
        sm.pay_nack()
    # Invalid transition CLOSED -> CONFIRMING_DEPOSIT via close()
    with pytest.raises(statemachine.StateTransitionError):
        sm.close(None)

Example 93

Project: f5-common-python
Source File: test_unix_ls.py
View license
def test_E_unix_ls(mgmt_root):
    ntf = NamedTemporaryFile(delete=False)
    ntf_basename = os.path.basename(ntf.name)
    ntf.write('text for test file')
    ntf.seek(0)
    mgmt_root.shared.file_transfer.uploads.upload_file(ntf.name)
    tpath_name = '/var/config/rest/downloads/{0}'.format(ntf_basename)

    # create
    fls1 = mgmt_root.tm.util.unix_ls.exec_cmd('run', utilCmdArgs=tpath_name)
    # grab tmos version for later use in version discrepancy
    tmos_ver = fls1._meta_data['bigip']._meta_data['tmos_version']

    # validate object was created
    assert fls1.utilCmdArgs == tpath_name

    # commandResult should be present with successful listing
    assert 'commandResult' in fls1.__dict__

    # commandResult listing should match the file we requested a listing for
    assert '{0}\n'.format(fls1.utilCmdArgs) == fls1.commandResult

    # UtilError should be raised when non-existent file is mentioned
    with pytest.raises(UtilError) as err:
        mgmt_root.tm.util.unix_ls.exec_cmd('run',
                                           utilCmdArgs='/configs/testfile.txt')
        assert 'No such file or directory' in err.response.text

    # clean up created file
    mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name)

    # test that a bad command option errors out
    if LooseVersion(tmos_ver) < LooseVersion('12.0.0'):
        with pytest.raises(UtilError) as err:
            mgmt_root.tm.util.unix_ls.exec_cmd('run',
                                               utilCmdArgs='-9')
            assert 'invalid option -- 9' in err.response.text

    else:
        with pytest.raises(iControlUnexpectedHTTPError) as err:
            mgmt_root.tm.util.unix_ls.exec_cmd('run',
                                               utilCmdArgs='-9')
            assert err.response.status_code == 400
            assert 'unix-ls does not support' in err.response.text

Example 94

Project: zerodb
Source File: test_ssl.py
View license
def _test_basic(root_cert=True, root_password=False,
                user_cert=True, user_password=False,
                ):

    if root_password:
        root_pwd, _ = kdf.hash_password(
                'root', 'root_password',
                key_file=None, cert_file=None,
                appname='zerodb.com', key=None)

    # zerodb.server took care of setting up a databasw with a root
    # user and starting a server for it.  The root user's cert is from
    # ZEO.testing.  The server is using a server cert from ZEO.tests.
    addr, stop = zerodb.server(
        init=dict(
            cert=ZEO.tests.testssl.client_cert if root_cert else None,
            password='root_password' if root_password else None,
            ),
        )

    # Create an admin client.  Admin data aren't encrypted, so we use
    # a regular ZEO client.
    # XXX this should be in zerodb.db
    # XXX along with kdf
    def admin_db_factory():
        return ZEO.DB(
            addr,
            ssl=ZEO.tests.testssl.client_ssl() if root_cert else nobody_ssl(),
            credentials=dict(name='root', password=root_pwd)
            if root_password else None,
            wait_timeout=19999,
            )

    admin_db = admin_db_factory()
    with admin_db.transaction() as conn:

        # The get_admin function gets us an admin object with CRUD methods.
        admin = zerodb.permissions.base.get_admin(conn)
        [root] = admin.users.values()
        if root_cert:
            [root_der] = root.certs

            assert (set(pem.strip()
                        for pem in admin.certs.data.strip().split('\n\n')) ==
                    set(pem.strip()
                        for pem in (nobody_pem, root.certs[root_der]))
                    )
            assert admin.uids[root_der] == root.id
        else:
            assert admin.certs.data.strip() == nobody_pem.strip()
        assert len(admin.uids) == 2 if root_cert else 1
        assert len(admin.users_by_name) == 1
        assert admin.users_by_name[root.name] is root

        # Let's add a user:
        admin.add_user('user0',
                       pem_data=(pem_data('cert0') if user_cert else None),
                       password=('password0' if user_password else None),
                       )

        [uid0] = [uid for uid in admin.users if uid != root.id]

    admin_db.close()

    # Now, let's try connecting
    def user_db_factory(n='0'):
        return zerodb.DB(
            addr, username='user0', key=user_key,
            cert_file=pem_path('cert' + n) if user_cert else None,
            key_file=pem_path('key' + n) if user_cert else None,
            server_cert=ZEO.tests.testssl.server_cert,
            password='password' + n if user_password else None,
            wait_timeout=1
            )

    db = user_db_factory()

    # we can access the root object.
    assert db._root._p_oid == uid0

    # It's empty now:
    assert len(db._root) == 0

    # Let's put something it:
    db._root['x'] = 1
    db._root['s'] = db._root.__class__()
    db._root['s']['x'] = 2

    db._connection.transaction_manager.commit()

    # Close the db and reopen:
    db._db.close()

    # Reopen, and make sure the data are there:
    db = user_db_factory()

    assert db._root._p_oid == uid0
    assert len(db._root) == 2
    assert db._root['x'] == 1
    assert db._root['s']['x'] == 2
    db._db.close()

    # The admin user can no longer access the user's folder:
    admin_db = admin_db_factory()
    with admin_db.transaction() as conn:
        admin = zerodb.permissions.base.get_admin(conn)
        user_root = admin.users[uid0].root
        with pytest.raises(ZODB.POSException.StorageError) as exc_info:
            len(user_root)

        assert ('Attempt to access encrypted data of others'
                in str(exc_info.value))

    # Note that we had to close and reopen the admin connection
    # because invalidations aren't sent accross users. (Even clearing
    # the cache doesn't work (maybe a misfeature))

    # The user's data are encrypted:
    server_server = zerodb.forker.last_server
    storage = server_server.server.storages['1']
    assert storage.loadBefore(uid0, maxtid)[0].startswith(b'.e')

    # Let's change the user's credentials:

    with admin_db.transaction() as conn:
        admin = zerodb.permissions.base.get_admin(conn)
        admin.change_cert(
            'user0',
            pem_data('cert1') if user_cert else None,
            'password1' if user_password else None,
            )

    # Now login with the old cert will fail:
    with pytest.raises(ZEO.Exceptions.ClientDisconnected):
        user_db_factory()

    # But login with the new one will work:
    db = user_db_factory('1')
    assert len(db._root) == 2
    db._db.close()

    # Finally, let's remove the user:
    with admin_db.transaction() as conn:
        admin = zerodb.permissions.base.get_admin(conn)
        admin.del_user('user0')

    # Now, they can't log in at all:
    for i in '01':
        with pytest.raises(ZEO.Exceptions.ClientDisconnected):
            user_db_factory(i)

    admin_db.close()

    # The admin user can login as an ordinary ZeroDB user:
    db = zerodb.DB(
        addr, username='root', key=root_key,
        cert_file=ZEO.tests.testssl.client_cert if root_cert else None,
        key_file=ZEO.tests.testssl.client_key if root_cert else None,
        server_cert=ZEO.tests.testssl.server_cert,
        password='root_password' if root_password else None,
        wait_timeout=1
        )
    # They have an empty root
    assert len(db._root) == 0

    stop()

Example 95

Project: memsql-python
Source File: test_select_result.py
View license
def test_result_order():
    raw_data = [[random.randint(1, 2 ** 32) for _ in range(len(FIELDS))] for _ in range(256)]
    res = database.SelectResult(FIELDS, raw_data)

    for i, row in enumerate(res):
        reference = dict(zip(FIELDS, raw_data[i]))
        ordered = OrderedDict(zip(FIELDS, raw_data[i]))
        doppel = database.Row(FIELDS, raw_data[i])

        assert doppel == row
        assert row == reference
        assert row == ordered
        assert list(row.keys()) == FIELDS
        assert list(row.values()) == raw_data[i]
        assert sorted(row) == sorted(FIELDS)
        assert list(row.items()) == list(zip(FIELDS, raw_data[i]))
        assert list(row.values()) == raw_data[i]
        assert list(row.keys()) == FIELDS
        assert list(row.items()) == list(zip(FIELDS, raw_data[i]))

        for f in FIELDS:
            assert f in row
            assert f in row
            assert row[f] == reference[f]
            assert row['cloud'] == reference['cloud']
            assert row[f] == ordered[f]
            assert row['cloud'] == ordered['cloud']

        assert dict(row) == reference
        assert dict(row) == dict(ordered)

        with pytest.raises(KeyError):
            row['derp']

        with pytest.raises(AttributeError):
            row.derp

        with pytest.raises(NotImplementedError):
            row.pop()

        with pytest.raises(NotImplementedError):
            reversed(row)

        with pytest.raises(NotImplementedError):
            row.update({'a': 'b'})

        with pytest.raises(NotImplementedError):
            row.setdefault('foo', 'bar')

        with pytest.raises(NotImplementedError):
            row.fromkeys((1,))

        with pytest.raises(NotImplementedError):
            row.clear()

        with pytest.raises(NotImplementedError):
            del row['mega']

        reference['foo'] = 'bar'
        reference['cloud'] = 'blah'
        ordered['foo'] = 'bar'
        ordered['cloud'] = 'blah'
        row['foo'] = 'bar'
        row['cloud'] = 'blah'

        assert row == reference
        assert dict(row) == reference
        assert len(row) == len(reference)
        assert row == ordered
        assert dict(row) == dict(ordered)
        assert len(row) == len(ordered)

        assert row.get('cloud') == reference.get('cloud')
        assert row.get('cloud') == ordered.get('cloud')
        assert row.get('NOPE', 1) == reference.get('NOPE', 1)
        assert row.get('NOPE', 1) == ordered.get('NOPE', 1)

        assert json.dumps(row, sort_keys=True) == json.dumps(reference, sort_keys=True)
        assert json.dumps(row, sort_keys=True) == json.dumps(ordered, sort_keys=True)

Example 96

Project: memsql-python
Source File: test_select_result.py
View license
def test_result_order():
    raw_data = [[random.randint(1, 2 ** 32) for _ in range(len(FIELDS))] for _ in range(256)]
    res = database.SelectResult(FIELDS, raw_data)

    for i, row in enumerate(res):
        reference = dict(zip(FIELDS, raw_data[i]))
        ordered = OrderedDict(zip(FIELDS, raw_data[i]))
        doppel = database.Row(FIELDS, raw_data[i])

        assert doppel == row
        assert row == reference
        assert row == ordered
        assert list(row.keys()) == FIELDS
        assert list(row.values()) == raw_data[i]
        assert sorted(row) == sorted(FIELDS)
        assert list(row.items()) == list(zip(FIELDS, raw_data[i]))
        assert list(row.values()) == raw_data[i]
        assert list(row.keys()) == FIELDS
        assert list(row.items()) == list(zip(FIELDS, raw_data[i]))

        for f in FIELDS:
            assert f in row
            assert f in row
            assert row[f] == reference[f]
            assert row['cloud'] == reference['cloud']
            assert row[f] == ordered[f]
            assert row['cloud'] == ordered['cloud']

        assert dict(row) == reference
        assert dict(row) == dict(ordered)

        with pytest.raises(KeyError):
            row['derp']

        with pytest.raises(AttributeError):
            row.derp

        with pytest.raises(NotImplementedError):
            row.pop()

        with pytest.raises(NotImplementedError):
            reversed(row)

        with pytest.raises(NotImplementedError):
            row.update({'a': 'b'})

        with pytest.raises(NotImplementedError):
            row.setdefault('foo', 'bar')

        with pytest.raises(NotImplementedError):
            row.fromkeys((1,))

        with pytest.raises(NotImplementedError):
            row.clear()

        with pytest.raises(NotImplementedError):
            del row['mega']

        reference['foo'] = 'bar'
        reference['cloud'] = 'blah'
        ordered['foo'] = 'bar'
        ordered['cloud'] = 'blah'
        row['foo'] = 'bar'
        row['cloud'] = 'blah'

        assert row == reference
        assert dict(row) == reference
        assert len(row) == len(reference)
        assert row == ordered
        assert dict(row) == dict(ordered)
        assert len(row) == len(ordered)

        assert row.get('cloud') == reference.get('cloud')
        assert row.get('cloud') == ordered.get('cloud')
        assert row.get('NOPE', 1) == reference.get('NOPE', 1)
        assert row.get('NOPE', 1) == ordered.get('NOPE', 1)

        assert json.dumps(row, sort_keys=True) == json.dumps(reference, sort_keys=True)
        assert json.dumps(row, sort_keys=True) == json.dumps(ordered, sort_keys=True)

Example 97

Project: datafari
Source File: test_resources.py
View license
    def testResolve(self):
        ad = pkg_resources.Environment([])
        ws = WorkingSet([])
        # Resolving no requirements -> nothing to install
        assert list(ws.resolve([], ad)) == []
        # Request something not in the collection -> DistributionNotFound
        with pytest.raises(pkg_resources.DistributionNotFound):
            ws.resolve(parse_requirements("Foo"), ad)

        Foo = Distribution.from_filename(
            "/foo_dir/Foo-1.2.egg",
            metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
        )
        ad.add(Foo)
        ad.add(Distribution.from_filename("Foo-0.9.egg"))

        # Request thing(s) that are available -> list to activate
        for i in range(3):
            targets = list(ws.resolve(parse_requirements("Foo"), ad))
            assert targets == [Foo]
            list(map(ws.add,targets))
        with pytest.raises(VersionConflict):
            ws.resolve(parse_requirements("Foo==0.9"), ad)
        ws = WorkingSet([]) # reset

        # Request an extra that causes an unresolved dependency for "Baz"
        with pytest.raises(pkg_resources.DistributionNotFound):
            ws.resolve(parse_requirements("Foo[bar]"), ad)
        Baz = Distribution.from_filename(
            "/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
        )
        ad.add(Baz)

        # Activation list now includes resolved dependency
        assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) ==[Foo,Baz]
        # Requests for conflicting versions produce VersionConflict
        with pytest.raises(VersionConflict) as vc:
            ws.resolve(parse_requirements("Foo==1.2\nFoo!=1.2"), ad)

        msg = 'Foo 0.9 is installed but Foo==1.2 is required'
        assert vc.value.report() == msg

Example 98

Project: python-flextls
Source File: test_field.py
View license
    def test_uint16enumfield(self):
        f = UInt16EnumField(
            "test",
            0,
            {
                0: "v_00000",
                32768: "v_32768"
            }
        )

        assert f.value == 0
        assert f.assemble() == b"\x00\x00"
        assert f.get_value_name() == "v_00000"

        f.value = 1
        assert f.assemble() == b"\x00\x01"
        assert f.get_value_name() == "n/a"

        f.value = 32768
        assert f.assemble() == b"\x80\x00"
        assert f.get_value_name() == "v_32768"
        assert f.get_value_name(True).startswith("v_32768")
        assert f.get_value_name() != f.get_value_name(True)

        f.value = "v_00000"
        assert f.value == 0

        with pytest.raises(ValueError):
            f.value = "v_00001"

        assert f.value == 0

        with pytest.raises(TypeError):
            f.value = []

        with pytest.raises(TypeError):
            f.set_value([])

        f.set_value([], True)

        assert f.dissect(b"\x80\00") == b""
        assert f.value == 32768

        assert f.dissect(b"\x00\x00\x99") == b"\x99"

        with pytest.raises(NotEnoughData):
            f.dissect(b"")

        with pytest.raises(NotEnoughData):
            f.dissect(b"\x00")

Example 99

Project: mongokat
Source File: test_api.py
View license
def test_document_common_methods(Sample):

  from bson import ObjectId
  import collections

  assert Sample.collection.find().count() == 0

  # Instanciate
  new_object = Sample({"name": "XXX", "url": "http://example.com"})

  # Should not save to DB yet.
  assert Sample.collection.find().count() == 0

  # Now save()
  new_object.save()

  # Once the object is in DB, we can't do it anymore.
  with pytest.raises(Exception):
    new_object.save()

  assert type(new_object["_id"]) == ObjectId

  assert Sample.collection.find().count() == 1
  db_object = Sample.collection.find_one()
  assert type(db_object) == dict
  assert db_object["name"] == "XXX"

  # test insert()
  inserted_object = Sample.insert({"name": "ZZZ", "url": "http://example2.com", "stats": {"nb_of_products": 2}})
  assert type(inserted_object) == ObjectId

  assert Sample.collection.find().count() == 2

  # Find back with different methods
  orm_object = Sample.find_by_id(db_object["_id"])
  assert orm_object["name"] == "XXX"
  orm_object = Sample.find_by_id(str(db_object["_id"]))
  assert orm_object["name"] == "XXX"
  orm_object = Sample.find_by_id({"_id": db_object["_id"]})
  assert orm_object["name"] == "XXX"
  orm_object = Sample.find_by_id({"_id": str(db_object["_id"])})
  assert orm_object["name"] == "XXX"
  assert isinstance(orm_object, sample_models.SampleDocument)

  # exists()
  assert Sample.exists({"name": "XXX"})

  # Other find styles
  cursor = Sample.find({"name": "XXX"})
  assert "cursor" in str(type(cursor)).lower()
  orm_objects = list(cursor)
  assert len(orm_objects) == 1
  assert isinstance(orm_objects[0], sample_models.SampleDocument)
  assert orm_objects[0]["name"] == "XXX"

  orm_object = Sample.find_one({"_id": db_object["_id"]})
  assert orm_object["name"] == "XXX"
  assert isinstance(orm_object, sample_models.SampleDocument)

  # TODO - should that not work?
  orm_object = Sample.find_one({"_id": str(db_object["_id"])})
  assert orm_object is None

  col_cursor = Sample.iter_column({"name": "XXX"})
  assert isinstance(col_cursor, collections.Iterable)
  assert list(col_cursor) == [new_object["_id"]]

  col = Sample.list_column({"name": "XXX"}, field="name")
  assert col == ["XXX"]
  col = Sample.list_column({"name": "ZZZ"}, field="stats.nb_of_products")
  assert col == [2]

  with pytest.raises(KeyError):
    Sample.list_column({"name": "ZZZ"}, field="inexistent_field")

  # We should be able to fetch & save partial objects.
  orm_object = Sample.find_by_id(db_object["_id"], fields=["url"])
  assert list(dict(orm_object).keys()) == ["url"]
  assert dict(orm_object)["url"] == "http://example.com"

  # If we save() that, it will create a new object because we lack an _id :(
  with pytest.raises(Exception):
    orm_object.save()

  assert Sample.collection.find().count() == 2

  # FIXME not anymore as we are requesting _id for each query
  # orm_object.save(force=True)

  # assert Sample.collection.find().count() == 3

  orm_object = Sample.find_by_id(db_object["_id"], fields=["url", "_id"])
  assert dict(orm_object) == {"url": "http://example.com", "_id": db_object["_id"]}

  # Change the data a bit and save.
  # This would remove "name" from the doc.
  orm_object["url"] = "http://other.example.com"

  # Not authorized!
  with pytest.raises(Exception):
    orm_object.save()

  assert Sample.collection.find().count() == 2
  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert "name" in db_object

  orm_object.save(force=True)

  # Should not add anything new
  assert Sample.collection.find().count() == 2

  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert "name" not in db_object
  assert db_object["url"] == "http://other.example.com"

  orm_object = Sample.find_by_id(db_object["_id"], fields=["_id"])
  orm_object["name"] = "YYY"

  # This one should not overwrite unset fields.
  orm_object.save_partial()

  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert db_object["name"] == "YYY"
  assert db_object["url"] == "http://other.example.com"

  # Test the reload() method by changing the data from somewhere else
  Sample.collection.update({"_id": db_object["_id"]}, {"$set": {"name": "AAA"}})

  assert orm_object["name"] == "YYY"

  orm_object.reload()

  assert orm_object["name"] == "AAA"

  # Test .update() - local dict update()
  orm_object.update({"name": "BBB"})

  assert orm_object["name"] == "BBB"

  # Should not have changed the DB
  db_object = Sample.collection.find_one({"_id": db_object["_id"]})
  assert db_object["name"] == "AAA"

Example 100

Project: umongo
Source File: test_fields.py
View license
    def test_embedded_document(self):

        @self.instance.register
        class MyEmbeddedDocument(EmbeddedDocument):
            a = fields.IntField(attribute='in_mongo_a')
            b = fields.IntField()

        embedded = MyEmbeddedDocument()
        assert embedded.to_mongo(update=True) is None
        assert not embedded.is_modified()

        @self.instance.register
        class MyDoc(Document):
            embedded = fields.EmbeddedField(MyEmbeddedDocument, attribute='in_mongo_embedded')

        MySchema = MyDoc.Schema

        # Make sure embedded document doesn't have implicit _id field
        assert '_id' not in MyEmbeddedDocument.Schema().fields
        assert 'id' not in MyEmbeddedDocument.Schema().fields

        MyDataProxy = data_proxy_factory('My', MySchema())
        d = MyDataProxy()
        d.from_mongo(data={'in_mongo_embedded': {'in_mongo_a': 1, 'b': 2}})
        assert d.dump() == {'embedded': {'a': 1, 'b': 2}}
        embedded = d.get('embedded')
        assert type(embedded) == MyEmbeddedDocument
        assert embedded.a == 1
        assert embedded.b == 2
        assert embedded.dump() == {'a': 1, 'b': 2}
        assert embedded.to_mongo() == {'in_mongo_a': 1, 'b': 2}
        assert d.to_mongo() == {'in_mongo_embedded': {'in_mongo_a': 1, 'b': 2}}

        d2 = MyDataProxy()
        d2.from_mongo(data={'in_mongo_embedded': {'in_mongo_a': 1, 'b': 2}})
        assert d == d2

        embedded.a = 3
        assert embedded.is_modified()
        assert embedded.to_mongo(update=True) == {'$set': {'in_mongo_a': 3}}
        assert d.to_mongo(update=True) == {'$set': {'in_mongo_embedded': {'in_mongo_a': 3, 'b': 2}}}
        embedded.clear_modified()
        assert embedded.to_mongo(update=True) is None
        assert d.to_mongo(update=True) is None

        del embedded.a
        assert embedded.to_mongo(update=True) == {'$unset': {'in_mongo_a': ''}}
        assert d.to_mongo(update=True) == {'$set': {'in_mongo_embedded': {'b': 2}}}

        d.set('embedded', MyEmbeddedDocument(a=4))
        assert d.get('embedded').to_mongo(update=True) == {'$set': {'in_mongo_a': 4}}
        d.get('embedded').clear_modified()
        assert d.get('embedded').to_mongo(update=True) is None
        assert d.to_mongo(update=True) == {'$set': {'in_mongo_embedded': {'in_mongo_a': 4}}}

        embedded_doc = MyEmbeddedDocument(a=1, b=2)
        assert embedded_doc.a == 1
        assert embedded_doc.b == 2
        assert embedded_doc == {'in_mongo_a': 1, 'b': 2}
        assert embedded_doc == MyEmbeddedDocument(a=1, b=2)
        assert embedded_doc['a'] == 1
        assert embedded_doc['b'] == 2

        embedded_doc.clear_modified()
        embedded_doc.update({'b': 42})
        assert embedded_doc.is_modified()
        assert embedded_doc.a == 1
        assert embedded_doc.b == 42

        with pytest.raises(ValidationError):
            MyEmbeddedDocument(in_mongo_a=1, b=2)

        embedded_doc['a'] = 1
        assert embedded_doc.a == embedded_doc['a'] == 1
        del embedded_doc['a']
        assert embedded_doc.a is embedded_doc['a'] is None

        # Test repr readability
        repr_d = repr(MyEmbeddedDocument(a=1, b=2))
        assert 'tests.test_fields.MyEmbeddedDocument' in repr_d
        assert "'in_mongo_a'" not in repr_d
        assert "'a': 1" in repr_d
        assert "'b': 2" in repr_d

        # Test unknown fields
        with pytest.raises(AttributeError):
            embedded_doc.dummy
        with pytest.raises(AttributeError):
            embedded_doc.dummy = None
        with pytest.raises(AttributeError):
            del embedded_doc.dummy
        with pytest.raises(KeyError):
            embedded_doc['dummy']
        with pytest.raises(KeyError):
            embedded_doc['dummy'] = None
        with pytest.raises(KeyError):
            del embedded_doc['dummy']