nose.tools.eq_

Here are the examples of the python api nose.tools.eq_ taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: ldsc
Source File: test_munge_sumstats.py
View license
def test_get_compression_gzip():
    y, x = munge.get_compression('foo.gz')
    nose.tools.eq_(x, 'gzip')
    y, x = munge.get_compression('foo.bz2')
    nose.tools.eq_(x, 'bz2')
    y, x = munge.get_compression('foo.bar')
    nose.tools.eq_(x, None)

Example 2

Project: mir_eval
Source File: test_util.py
View license
def test_bipartite_match():
    # This test constructs a graph as follows:
    #   v9 -- (u0)
    #   v8 -- (u0, u1)
    #   v7 -- (u0, u1, u2)
    #   ...
    #   v0 -- (u0, u1, ..., u9)
    #
    # This structure and ordering of this graph should force Hopcroft-Karp to
    # hit each algorithm/layering phase
    #
    G = collections.defaultdict(list)

    u_set = ['u{:d}'.format(_) for _ in range(10)]
    v_set = ['v{:d}'.format(_) for _ in range(len(u_set)+1)]
    for i, u in enumerate(u_set):
        for v in v_set[:-i-1]:
            G[v].append(u)

    matching = util._bipartite_match(G)

    # Make sure that each u vertex is matched
    nose.tools.eq_(len(matching), len(u_set))

    # Make sure that there are no duplicate keys
    lhs = set([k for k in matching])
    rhs = set([matching[k] for k in matching])

    nose.tools.eq_(len(matching), len(lhs))
    nose.tools.eq_(len(matching), len(rhs))

    # Finally, make sure that all detected edges are present in G
    for k in matching:
        v = matching[k]
        assert v in G[k] or k in G[v]

Example 3

Project: Sublime-Bogo
Source File: test_engine.py
View license
    def test_normal_typing(self):
        eq_(process_sequence('v'),     'v')
        eq_(process_sequence('aw'),   'ă')
        eq_(process_sequence('w'),    'ư')
        eq_(process_sequence('ow'),   'ơ')
        eq_(process_sequence('oo'),   'ô')
        eq_(process_sequence('Oo'),   'Ô')
        eq_(process_sequence('dd'),   'đ')
        eq_(process_sequence('muaf'), 'mùa')
        eq_(process_sequence('Doongd'), 'Đông')
        eq_(process_sequence('gif'),  'gì')
        eq_(process_sequence('loAnj'), 'loẠn')
        eq_(process_sequence('muongw'), 'mương')
        eq_(process_sequence('qur'), 'qur')
        eq_(process_sequence('Tosan'), 'Toán')
        eq_(process_sequence('tusnw'), 'tứn')
        eq_(process_sequence('dee'), 'dê')
        eq_(process_sequence('mowis'), 'mới')
        eq_(process_sequence('uwa'), 'ưa')
        eq_(process_sequence('uwo'), 'ưo')
        eq_(process_sequence('ddx'), 'đx')
        eq_(process_sequence('hoacw'), 'hoăc')
        eq_(process_sequence('cuooi'), 'cuôi')

        eq_(process_sequence('tooi'), 'tôi')
        eq_(process_sequence('chuyeenr'), 'chuyển')
        eq_(process_sequence('ddoonjg'), 'động')
        eq_(process_sequence('nheechs'), 'nhếch')

        # uơ related
        eq_(process_sequence('quowr'), 'quở')
        eq_(process_sequence('huow'), 'huơ')
        eq_(process_sequence('thuowr'), 'thuở')
        eq_(process_sequence('QUOWR'), 'QUỞ')
        eq_(process_sequence('HUOW'), 'HUƠ')
        eq_(process_sequence('THUOWR'), 'THUỞ')

        # English words
        eq_(process_key_no_skip('case'), 'cáe')
        eq_(process_key_no_skip('reset'), 'rết')

Example 4

Project: podio-py
Source File: utils.py
View license
def check_client_method():
    """
    Helper to test an API method -- returns a tuple of
    (test_api_client, check_assertions) where check_assertions will
    verify that the API method returned the data from http.request,
    and that http.request was called with the correct arguments.

    check_assertions' signature is:
    def check_assertions(object_returned_from_api,
                         # GET, POST, etc.
                         http_method,
                         # Include the leading /
                         expected_path,
                         # Assert that this string was sent as the request body
                         expected_body,
                         # Assert that the request headers match this dict
                         expected_headers)

    To assert that client.Org().get_all calls URL_BASE/org/ and
    is correctly hooked up to http.request():

        client, check_assertions = check_client_method()
        result = client.Org.get_all()
        check_assertions(result, 'GET', '/org/')

    You can also pass body and headers to check_assertions.
    """
    client, http = get_client_and_http()
    returned_object = {'uuid': uuid4().hex}

    response = Mock()
    response.status = 200
    http.request = Mock(return_value=(
            response, json.dumps(returned_object).encode("utf-8")))

    def check_assertions(actual_returned,
                         http_method,
                         expected_path,
                         expected_body=None,
                         expected_headers=None):
        if expected_headers is None:
            expected_headers = {}

        eq_(returned_object,
            actual_returned,
            "API method didn't return the same object as http.request()")
        http.request.assert_called_once_with(URL_BASE + expected_path,
                                              http_method,
                                              body=expected_body,
                                              headers=expected_headers)

    return client, check_assertions

Example 5

Project: inflect.py
Source File: test_inflections.py
View license
def test_ordinal():
    p = inflect.engine()
    eq_(p.ordinal(0), "0th", msg="0 -> 0th...")
    eq_(p.ordinal(1), "1st")
    eq_(p.ordinal(2), "2nd")
    eq_(p.ordinal(3), "3rd")
    eq_(p.ordinal(4), "4th")
    eq_(p.ordinal(5), "5th")
    eq_(p.ordinal(6), "6th")
    eq_(p.ordinal(7), "7th")
    eq_(p.ordinal(8), "8th")
    eq_(p.ordinal(9), "9th")
    eq_(p.ordinal(10), "10th")
    eq_(p.ordinal(11), "11th")
    eq_(p.ordinal(12), "12th")
    eq_(p.ordinal(13), "13th")
    eq_(p.ordinal(14), "14th")
    eq_(p.ordinal(15), "15th")
    eq_(p.ordinal(16), "16th")
    eq_(p.ordinal(17), "17th")
    eq_(p.ordinal(18), "18th")
    eq_(p.ordinal(19), "19th")
    eq_(p.ordinal(20), "20th")
    eq_(p.ordinal(21), "21st")
    eq_(p.ordinal(22), "22nd")
    eq_(p.ordinal(23), "23rd")
    eq_(p.ordinal(24), "24th")
    eq_(p.ordinal(100), "100th")
    eq_(p.ordinal(101), "101st")
    eq_(p.ordinal(102), "102nd")
    eq_(p.ordinal(103), "103rd")
    eq_(p.ordinal(104), "104th")

    eq_(p.ordinal('zero'), "zeroth", msg="zero -> zeroth...")
    eq_(p.ordinal('one'), "first")
    eq_(p.ordinal('two'), "second")
    eq_(p.ordinal('three'), "third")
    eq_(p.ordinal('four'), "fourth")
    eq_(p.ordinal('five'), "fifth")
    eq_(p.ordinal('six'), "sixth")
    eq_(p.ordinal('seven'), "seventh")
    eq_(p.ordinal('eight'), "eighth")
    eq_(p.ordinal('nine'), "ninth")
    eq_(p.ordinal('ten'), "tenth")
    eq_(p.ordinal('eleven'), "eleventh")
    eq_(p.ordinal('twelve'), "twelfth")
    eq_(p.ordinal('thirteen'), "thirteenth")
    eq_(p.ordinal('fourteen'), "fourteenth")
    eq_(p.ordinal('fifteen'), "fifteenth")
    eq_(p.ordinal('sixteen'), "sixteenth")
    eq_(p.ordinal('seventeen'), "seventeenth")
    eq_(p.ordinal('eighteen'), "eighteenth")
    eq_(p.ordinal('nineteen'), "nineteenth")
    eq_(p.ordinal('twenty'), "twentieth")
    eq_(p.ordinal('twenty-one'), "twenty-first")
    eq_(p.ordinal('twenty-two'), "twenty-second")
    eq_(p.ordinal('twenty-three'), "twenty-third")
    eq_(p.ordinal('twenty-four'), "twenty-fourth")
    eq_(p.ordinal('one hundred'), "one hundredth")
    eq_(p.ordinal('one hundred and one'), "one hundred and first")
    eq_(p.ordinal('one hundred and two'), "one hundred and second")
    eq_(p.ordinal('one hundred and three'), "one hundred and third")
    eq_(p.ordinal('one hundred and four'), "one hundred and fourth")

Example 6

Project: inflect.py
Source File: test_join.py
View license
def test_join():
    p = inflect.engine()

    # Three words...
    words = "apple banana carrot".split()

    eq_(p.join(words),
        "apple, banana, and carrot", msg='plain 3 words')

    eq_(p.join(words, final_sep=''),
        "apple, banana and carrot", msg='3 words, no final sep')

    eq_(p.join(words, final_sep='...'),
        "apple, banana... and carrot", msg='3 words, different final sep')

    eq_(p.join(words, final_sep='...', conj=''),
        "apple, banana... carrot",
        msg='-->%s != %s<--   3 words, different final sep, no conjunction' % (
        p.join(words, final_sep='...', conj=''), "apple, banana... carrot"))

    eq_(p.join(words, conj='or'),
        "apple, banana, or carrot",
        msg='%s != %s    3 words, different conjunction' % (
            p.join(words, conj='or'),
            "apple, banana, or carrot"))

    # Three words with semicolons...
    words = ('apple,fuji', 'banana', 'carrot')

    eq_(p.join(words),
        "apple,fuji; banana; and carrot",
        msg='%s != %s<-- comma-inclusive 3 words' % (
            p.join(words), "apple,fuji, banana; and carrot"))

    eq_(p.join(words, final_sep=''),
        "apple,fuji; banana and carrot",
        msg='join(%s) == "%s" != "%s"' % (
            words,
            p.join(words, final_sep=''),
            "apple,fuji) banana and carrot"))

    eq_(p.join(words, final_sep='...'),
        "apple,fuji; banana... and carrot", msg='comma-inclusive 3 words, different final sep')

    eq_(p.join(words, final_sep='...', conj=''),
        "apple,fuji; banana... carrot",
        msg='comma-inclusive 3 words, different final sep, no conjunction')

    eq_(p.join(words, conj='or'),
        "apple,fuji; banana; or carrot", msg='comma-inclusive 3 words, different conjunction')

    # Two words...
    words = ('apple', 'carrot')

    eq_(p.join(words),
        "apple and carrot", msg='plain 2 words')

    eq_(p.join(words, final_sep=''),
        "apple and carrot", msg='2 words, no final sep')

    eq_(p.join(words, final_sep='...'),
        "apple and carrot", msg='2 words, different final sep')

    eq_(p.join(words, final_sep='...', conj=''),
        "apple carrot", msg="join(%s, final_sep='...', conj='') == %s != %s" % (
            words, p.join(words, final_sep='...', conj=''), 'apple carrot'))

    eq_(p.join(words, final_sep='...', conj='', conj_spaced=False),
        "applecarrot", msg="join(%s, final_sep='...', conj='') == %s != %s" % (
            words, p.join(words, final_sep='...', conj=''), 'applecarrot'))

    eq_(p.join(words, conj='or'),
        "apple or carrot", msg='2 words, different conjunction')

    # One word...
    words = ['carrot']

    eq_(p.join(words),
        "carrot", msg='plain 1 word')

    eq_(p.join(words, final_sep=''),
        "carrot", msg='1 word, no final sep')

    eq_(p.join(words, final_sep='...'),
        "carrot", msg='1 word, different final sep')

    eq_(p.join(words, final_sep='...', conj=''),
        "carrot", msg='1 word, different final sep, no conjunction')

    eq_(p.join(words, conj='or'),
        "carrot", msg='1 word, different conjunction')

Example 7

Project: pyelasticsearch
Source File: client_tests.py
View license
    def test_bulk(self):
        es = self.conn

        # Test index and create and multiple operations in a batch:
        result = es.bulk([es.index_op(dict(title='Pride and Prejudice and Zombies',
                                           pages=200),
                                      id=5),
                          es.index_op(dict(title='Sense and Sensibility and Seamonsters',
                                           pages=200),
                                      id=6),
                          es.index_op(dict(title='San Franscisco Landline Phonebook',
                                           pages=3),
                                      id=7,
                                      overwrite_existing=False)],
                        index='test-index',
                        doc_type='book')
        eq_(result['items'], [{'index': {'_id': '5',
                                         '_index': 'test-index',
                                         '_type': 'book',
                                         '_version': 1,
                                         'status': 201}},
                              {'index': {'_id': '6',
                                         '_index': 'test-index',
                                         '_type': 'book',
                                         '_version': 1,
                                         'status': 201}},
                              {'create': {'_id': '7',
                                         '_index': 'test-index',
                                         '_type': 'book',
                                         '_version': 1,
                                         'status': 201}}])

        # Test the error handling:
        try:
            es.bulk([es.index_op(dict(pages=4),
                                 id=7,
                                 version=2)],
                    index='test-index',
                    doc_type='book')
        except BulkError as exc:
            eq_(exc.successes, [])
            eq_(exc.errors, [{'index': {'status': 409,
                                        '_type': 'book',
                                        '_id': '7',
                                        'error': ANY,
                                        '_index': 'test-index'}}])
        else:
            self.fail("bulk() didn't raise BulkError when a version conflict happened.")

        # Test updating:
        response = es.bulk([es.update_op(doc=dict(pages=4),
                              id=7)],
                           index='test-index',
                           doc_type='book')
        eq_(response['items'], [{'update': {'_id': '7',
                                            '_index': 'test-index',
                                            '_type': 'book',
                                            '_version': 2,
                                            'status': 200}}])

        # Test delete and index=None and doc_type=None:
        response = es.bulk([es.delete_op(index='test-index',
                                         doc_type='book',
                                         id=id) for id in [5, 6, 7]])
        eq_(self.conn.count('*:*', index=['test-index'])['count'], 0)

Example 8

Project: python-vxi11
Source File: test_vxi11.py
View license
def test_parse_visa_resource_string():
    f = parse_visa_resource_string

    res = f('TCPIP::10.0.0.1::INSTR')
    eq_(res['type'], 'TCPIP')
    eq_(res['prefix'], 'TCPIP')
    eq_(res['arg1'], '10.0.0.1')
    eq_(res['suffix'], 'INSTR')

    res = f('TCPIP0::10.0.0.1::INSTR')
    eq_(res['type'], 'TCPIP')
    eq_(res['prefix'], 'TCPIP0')
    eq_(res['arg1'], '10.0.0.1')
    eq_(res['suffix'], 'INSTR')

    res = f('TCPIP::10.0.0.1::gpib,5::INSTR')
    eq_(res['type'], 'TCPIP')
    eq_(res['prefix'], 'TCPIP')
    eq_(res['arg1'], '10.0.0.1')
    eq_(res['suffix'], 'INSTR')

    res = f('TCPIP0::10.0.0.1::gpib,5::INSTR')
    eq_(res['type'], 'TCPIP')
    eq_(res['prefix'], 'TCPIP0')
    eq_(res['arg1'], '10.0.0.1')
    eq_(res['arg2'], 'gpib,5')
    eq_(res['suffix'], 'INSTR')

    res = f('TCPIP0::10.0.0.1::usb0::INSTR')
    eq_(res['type'], 'TCPIP')
    eq_(res['prefix'], 'TCPIP0')
    eq_(res['arg1'], '10.0.0.1')
    eq_(res['arg2'], 'usb0')
    eq_(res['suffix'], 'INSTR')

    res = f('TCPIP0::10.0.0.1::usb0[1234::5678::MYSERIAL::0]::INSTR')
    eq_(res['type'], 'TCPIP')
    eq_(res['prefix'], 'TCPIP0')
    eq_(res['arg1'], '10.0.0.1')
    eq_(res['arg2'], 'usb0[1234::5678::MYSERIAL::0]')
    eq_(res['suffix'], 'INSTR')

Example 9

Project: kazoo
Source File: test_election.py
View license
    def test_election(self):
        elections = {}
        threads = {}
        for _ in range(3):
            contender = "c" + uuid.uuid4().hex
            elections[contender] = self.client.Election(self.path, contender)
            threads[contender] = self._spawn_contender(
                contender, elections[contender])

        # wait for a leader to be elected
        times = 0
        with self.condition:
            while not self.leader_id:
                self.condition.wait(5)
                times += 1
                if times > 5:
                    raise Exception("Still not a leader: lid: %s",
                                    self.leader_id)

        election = self.client.Election(self.path)

        # make sure all contenders are in the pool
        wait(lambda: len(election.contenders()) == len(elections))
        contenders = election.contenders()

        eq_(set(contenders), set(elections.keys()))

        # first one in list should be leader
        first_leader = contenders[0]
        eq_(first_leader, self.leader_id)

        # tell second one to cancel election. should never get elected.
        elections[contenders[1]].cancel()

        # make leader exit. third contender should be elected.
        self.exit_event.set()
        with self.condition:
            while self.leader_id == first_leader:
                self.condition.wait(45)
        eq_(self.leader_id, contenders[2])
        self._check_thread_error()

        # make first contender re-enter the race
        threads[first_leader].join()
        threads[first_leader] = self._spawn_contender(
            first_leader, elections[first_leader])

        # contender set should now be the current leader plus the first leader
        wait(lambda: len(election.contenders()) == 2)
        contenders = election.contenders()
        eq_(set(contenders), set([self.leader_id, first_leader]))

        # make current leader raise an exception. first should be reelected
        self.raise_exception = True
        self.exit_event.set()
        with self.condition:
            while self.leader_id != first_leader:
                self.condition.wait(45)
        eq_(self.leader_id, first_leader)
        self._check_thread_error()

        self.exit_event.set()
        for thread in threads.values():
            thread.join()
        self._check_thread_error()

Example 10

Project: kazoo
Source File: test_lock.py
View license
    def test_lock(self):
        threads = []
        names = ["contender" + str(i) for i in range(5)]

        contender_bits = {}

        for name in names:
            e = self.make_event()
            l = self.client.Lock(self.lockpath, name)
            t = self.make_thread(target=self._thread_lock_acquire_til_event,
                                 args=(name, l, e))
            contender_bits[name] = (t, e)
            threads.append(t)

        # acquire the lock ourselves first to make the others line up
        lock = self.client.Lock(self.lockpath, "test")
        lock.acquire()

        for t in threads:
            t.start()

        # wait for everyone to line up on the lock
        wait = self.make_wait()
        wait(lambda: len(lock.contenders()) == 6)
        contenders = lock.contenders()

        eq_(contenders[0], "test")
        contenders = contenders[1:]
        remaining = list(contenders)

        # release the lock and contenders should claim it in order
        lock.release()

        for contender in contenders:
            thread, event = contender_bits[contender]

            with self.condition:
                while not self.active_thread:
                    self.condition.wait()
                eq_(self.active_thread, contender)

            eq_(lock.contenders(), remaining)
            remaining = remaining[1:]

            event.set()

            with self.condition:
                while self.active_thread:
                    self.condition.wait()
        for thread in threads:
            thread.join()

Example 11

Project: PyUserInput
Source File: test_unix.py
View license
    def test_event(self):
        for size in screen_sizes:
            with Display(visible=VISIBLE, size=size):
                time.sleep(1.0)  # TODO: how long should we wait?
                mouse = PyMouse()
                event = Event()
                event.start()
                # check move
                for p in positions:
                    event.reset()
                    mouse.move(*p)
                    time.sleep(0.01)
                    print('check ', expect_pos(p, size), '=', event.pos)
                    eq_(expect_pos(p, size), event.pos)
                # check buttons
                for btn in buttons:
                    # press
                    event.reset()
                    mouse.press(0, 0, btn)
                    time.sleep(0.01)
                    print("check button", btn, "pressed")
                    eq_(btn, event.button)
                    eq_(True, event.press)
                    # release
                    event.reset()
                    mouse.release(0, 0, btn)
                    time.sleep(0.01)
                    print("check button", btn, "released")
                    eq_(btn, event.button)
                    eq_(False, event.press)
                # check scroll
                def check_scroll(btn, vertical=None, horizontal=None):
                    event.reset()
                    mouse.press(0, 0, btn)
                    time.sleep(0.01)
                    if vertical:
                        eq_(vertical, event.scroll_vertical)
                    elif horizontal:
                        eq_(horizontal, event.scroll_horizontal)
                print("check scroll up")
                check_scroll(4, 1, 0)
                print("check scroll down")
                check_scroll(5, -1, 0)
                print("check scroll left")
                check_scroll(6, 0, 1)
                print("check scroll right")
                check_scroll(7, 0, -1)
                event.stop()

Example 12

Project: neo4django
Source File: model_tests.py
View license
def test_model_pickling():
    """
    Covers issue #46, pickling `NodeModel`s.
    """

    def pickle_and_restore(m):
        import pickle
        return pickle.loads(pickle.dumps(m))

    def pickle_eq(m1, m2):
        eq_(m1.name, m2.name)
        eq_(m2.using, m2.using)
        eq_(m2.id, m2.id)

    # try a simple model
    pete = Person(name="Pete")
    restored_pete = pickle_and_restore(pete)

    pickle_eq(pete, restored_pete)

    # try a saved model

    pete.save()
    restored_saved_pete = pickle_and_restore(pete)

    pickle_eq(pete, restored_saved_pete)

    # try related models

    from .models import IndexedMouse, RelatedCat, LazyCat
    jerry = IndexedMouse.objects.create(name='Jerry')
    tom = RelatedCat(name='Jerry')

    tom.chases = jerry
    tom.save()
    
    restored_tom = pickle_and_restore(tom)

    pickle_eq(tom, restored_tom)
    pickle_eq(jerry, list(restored_tom.chases.all())[0])

    # try a model with a lazy relation
    
    garfield = LazyCat(name='Garfield')
    garfield.chases.add(jerry)

    restored_garfield = pickle_and_restore(garfield)

    pickle_eq(garfield, restored_garfield)
    pickle_eq(jerry, list(restored_garfield.chases.all())[0])

    # and finally a saved model with a lazy relation

    garfield.save()

    restored_saved_garfield = pickle_and_restore(garfield)

    pickle_eq(garfield, restored_saved_garfield)
    pickle_eq(jerry, list(restored_saved_garfield.chases.all())[0])

Example 13

View license
@YouCompleteMeInstance( { 'cache_omnifunc': 1 } )
def OmniCompleter_GetCompletions_Cache_ObjectListObject_Unicode_test( ycm ):
  contents = '†åsty_π.t'
  request_data = BuildRequestWrap( line_num = 1,
                                   column_num = 14,
                                   contents = contents )


  eq_( request_data[ 'query' ], 't' )

  # Make sure there is an omnifunc set up.
  with patch( 'vim.eval', return_value = ToBytesOnPY2( 'test_omnifunc' ) ):
    ycm._omnicomp.OnFileReadyToParse( request_data )

  omnifunc_result = {
    'words': [
      {
        'word': ToBytesOnPY2( 'ålpha∫et' ),
        'abbr': ToBytesOnPY2( 'å∫∫®'),
        'menu': ToBytesOnPY2( 'µ´~¨á' ),
        'info': ToBytesOnPY2( '^~fo' ),
        'kind': ToBytesOnPY2( '˚' )
      },
      {
        'word': ToBytesOnPY2( 'π†´ß†π' ),
        'abbr': ToBytesOnPY2( 'ÅııÂʉÍÊ'),
        'menu': ToBytesOnPY2( '˜‰ˆËʉÍÊ' ),
        'info': ToBytesOnPY2( 'ȈÏØʉÍÊ' ),
        'kind': ToBytesOnPY2( 'Ê' )
      },
      {
        'word': ToBytesOnPY2( 'test' ),
        'abbr': ToBytesOnPY2( 'ÅııÂʉÍÊ'),
        'menu': ToBytesOnPY2( '˜‰ˆËʉÍÊ' ),
        'info': ToBytesOnPY2( 'ȈÏØʉÍÊ' ),
        'kind': ToBytesOnPY2( 'Ê' )
      }
    ]
  }

  # And get the completions
  with patch( 'vim.eval',
              new_callable = ExtendedMock,
              side_effect = [ 6, omnifunc_result ] ) as vim_eval:

    results = ycm._omnicomp.ComputeCandidates( request_data )

    vim_eval.assert_has_exact_calls( [
      call( 'test_omnifunc(1,"")' ),
      call( "test_omnifunc(0,'t')" ),
    ] )

    # Note: the filtered results are all unicode objects (not bytes) because
    # they are passed through the FilterAndSortCandidates machinery
    # (via the server)
    eq_( results, [ {
      'word': 'test',
      'abbr': 'ÅııÂʉÍÊ',
      'menu': '˜‰ˆËʉÍÊ',
      'info': 'ȈÏØʉÍÊ',
      'kind': 'Ê'
    } ] )

Example 14

View license
@YouCompleteMeInstance( { 'cache_omnifunc': 1 } )
def OmniCompleter_GetCompletions_Cache_ObjectListObject_Unicode_test( ycm ):
  contents = '†åsty_π.t'
  request_data = BuildRequestWrap( line_num = 1,
                                   column_num = 14,
                                   contents = contents )


  eq_( request_data[ 'query' ], 't' )

  # Make sure there is an omnifunc set up.
  with patch( 'vim.eval', return_value = ToBytesOnPY2( 'test_omnifunc' ) ):
    ycm._omnicomp.OnFileReadyToParse( request_data )

  omnifunc_result = {
    'words': [
      {
        'word': ToBytesOnPY2( 'ålpha∫et' ),
        'abbr': ToBytesOnPY2( 'å∫∫®'),
        'menu': ToBytesOnPY2( 'µ´~¨á' ),
        'info': ToBytesOnPY2( '^~fo' ),
        'kind': ToBytesOnPY2( '˚' )
      },
      {
        'word': ToBytesOnPY2( 'π†´ß†π' ),
        'abbr': ToBytesOnPY2( 'ÅııÂʉÍÊ'),
        'menu': ToBytesOnPY2( '˜‰ˆËʉÍÊ' ),
        'info': ToBytesOnPY2( 'ȈÏØʉÍÊ' ),
        'kind': ToBytesOnPY2( 'Ê' )
      },
      {
        'word': ToBytesOnPY2( 'test' ),
        'abbr': ToBytesOnPY2( 'ÅııÂʉÍÊ'),
        'menu': ToBytesOnPY2( '˜‰ˆËʉÍÊ' ),
        'info': ToBytesOnPY2( 'ȈÏØʉÍÊ' ),
        'kind': ToBytesOnPY2( 'Ê' )
      }
    ]
  }

  # And get the completions
  with patch( 'vim.eval',
              new_callable = ExtendedMock,
              side_effect = [ 6, omnifunc_result ] ) as vim_eval:

    results = ycm._omnicomp.ComputeCandidates( request_data )

    vim_eval.assert_has_exact_calls( [
      call( 'test_omnifunc(1,"")' ),
      call( "test_omnifunc(0,'t')" ),
    ] )

    # Note: the filtered results are all unicode objects (not bytes) because
    # they are passed through the FilterAndSortCandidates machinery
    # (via the server)
    eq_( results, [ {
      'word': 'test',
      'abbr': 'ÅııÂʉÍÊ',
      'menu': '˜‰ˆËʉÍÊ',
      'info': 'ȈÏØʉÍÊ',
      'kind': 'Ê'
    } ] )

Example 15

Project: retools
Source File: test_cache.py
View license
    def test_unicode_keys(self):
        keys = [
            # arabic (egyptian)
            u"\u0644\u064a\u0647\u0645\u0627\u0628\u062a\u0643\u0644\u0645" \
            u"\u0648\u0634\u0639\u0631\u0628\u064a\u061f",
            # Chinese (simplified)
            u"\u4ed6\u4eec\u4e3a\u4ec0\u4e48\u4e0d\u8bf4\u4e2d\u6587",
            # Chinese (traditional)
            u"\u4ed6\u5011\u7232\u4ec0\u9ebd\u4e0d\u8aaa\u4e2d\u6587",
            # czech
            u"\u0050\u0072\u006f\u010d\u0070\u0072\u006f\u0073\u0074\u011b" \
            u"\u006e\u0065\u006d\u006c\u0075\u0076\u00ed\u010d\u0065\u0073" \
            u"\u006b\u0079",
            # hebrew
            u"\u05dc\u05de\u05d4\u05d4\u05dd\u05e4\u05e9\u05d5\u05d8\u05dc" \
            u"\u05d0\u05de\u05d3\u05d1\u05e8\u05d9\u05dd\u05e2\u05d1\u05e8" \
            u"\u05d9\u05ea",
            # Hindi (Devanagari)
            u"\u092f\u0939\u0932\u094b\u0917\u0939\u093f\u0928\u094d\u0926" \
            u"\u0940\u0915\u094d\u092f\u094b\u0902\u0928\u0939\u0940\u0902" \
            u"\u092c\u094b\u0932\u0938\u0915\u0924\u0947\u0939\u0948\u0902",
            # Japanese (kanji and hiragana)
            u"\u306a\u305c\u307f\u3093\u306a\u65e5\u672c\u8a9e\u3092\u8a71" \
            u"\u3057\u3066\u304f\u308c\u306a\u3044\u306e\u304b",
            # Russian (Cyrillic)
            u"\u043f\u043e\u0447\u0435\u043c\u0443\u0436\u0435\u043e\u043d" \
            u"\u0438\u043d\u0435\u0433\u043e\u0432\u043e\u0440\u044f\u0442" \
            u"\u043f\u043e\u0440\u0443\u0441\u0441\u043a\u0438",
            # Spanish
            u"\u0050\u006f\u0072\u0071\u0075\u00e9\u006e\u006f\u0070\u0075" \
            u"\u0065\u0064\u0065\u006e\u0073\u0069\u006d\u0070\u006c\u0065" \
            u"\u006d\u0065\u006e\u0074\u0065\u0068\u0061\u0062\u006c\u0061" \
            u"\u0072\u0065\u006e\u0045\u0073\u0070\u0061\u00f1\u006f\u006c",
            # Vietnamese
            u"\u0054\u1ea1\u0069\u0073\u0061\u006f\u0068\u1ecd\u006b\u0068" \
            u"\u00f4\u006e\u0067\u0074\u0068\u1ec3\u0063\u0068\u1ec9\u006e" \
            u"\u00f3\u0069\u0074\u0069\u1ebf\u006e\u0067\u0056\u0069\u1ec7" \
            u"\u0074",
            # Japanese
            u"\u0033\u5e74\u0042\u7d44\u91d1\u516b\u5148\u751f",
            # Japanese
            u"\u5b89\u5ba4\u5948\u7f8e\u6075\u002d\u0077\u0069\u0074\u0068" \
            u"\u002d\u0053\u0055\u0050\u0045\u0052\u002d\u004d\u004f\u004e" \
            u"\u004b\u0045\u0059\u0053",
            # Japanese
            u"\u0048\u0065\u006c\u006c\u006f\u002d\u0041\u006e\u006f\u0074" \
            u"\u0068\u0065\u0072\u002d\u0057\u0061\u0079\u002d\u305d\u308c" \
            u"\u305e\u308c\u306e\u5834\u6240",
            # Japanese
            u"\u3072\u3068\u3064\u5c4b\u6839\u306e\u4e0b\u0032",
            # Japanese
            u"\u004d\u0061\u006a\u0069\u3067\u004b\u006f\u0069\u3059\u308b" \
            u"\u0035\u79d2\u524d",
            # Japanese
            u"\u30d1\u30d5\u30a3\u30fc\u0064\u0065\u30eb\u30f3\u30d0",
            # Japanese
            u"\u305d\u306e\u30b9\u30d4\u30fc\u30c9\u3067",
            # greek
            u"\u03b5\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac",
            # Maltese (Malti)
            u"\u0062\u006f\u006e\u0121\u0075\u0073\u0061\u0127\u0127\u0061",
            # Russian (Cyrillic)
            u"\u043f\u043e\u0447\u0435\u043c\u0443\u0436\u0435\u043e\u043d" \
            u"\u0438\u043d\u0435\u0433\u043e\u0432\u043e\u0440\u044f\u0442" \
            u"\u043f\u043e\u0440\u0443\u0441\u0441\u043a\u0438"
        ]
        mock_redis = Mock(spec=redis.client.Redis)
        mock_pipeline = Mock(spec=redis.client.Pipeline)
        results = ['0', (None, '0')]

        def side_effect(*args, **kwargs):
            return results.pop()
        mock_redis.pipeline.return_value = mock_pipeline
        mock_pipeline.execute.side_effect = side_effect
        mock_redis.hgetall.return_value = {}

        def dummy_func(arg):
            return "This is a value: %s" % time.time()

        for key in keys:
            with patch('retools.global_connection._redis', mock_redis):
                CR = self._makeOne()
                CR.add_region('short_term', 60)
                decorated = self._decorateFunc(dummy_func, 'short_term')
                value = decorated(key)
                assert 'This is a value' in value
                exec_calls = [x for x in mock_pipeline.method_calls \
                      if x[0] == 'execute']
                eq_(len(exec_calls), 2)
            mock_pipeline.reset_mock()
            results.extend(['0', (None, '0')])

        for key in keys:
            with patch('retools.global_connection._redis', mock_redis):
                CR = self._makeOne()
                CR.add_region('short_term', 60)

                class DummyClass(object):
                    def dummy_func(self, arg):
                        return "This is a value: %s" % time.time()
                    dummy_func = self._decorateFunc(dummy_func, 'short_term')
                cl_inst = DummyClass()
                value = cl_inst.dummy_func(key)
                assert 'This is a value' in value
                exec_calls = [x for x in mock_pipeline.method_calls \
                      if x[0] == 'execute']
                eq_(len(exec_calls), 2)
            mock_pipeline.reset_mock()
            results.extend(['0', (None, '0')])

Example 16

Project: retools
Source File: test_queue.py
View license
    def test_enqueue_job_by_name(self):
        mock_redis = Mock(spec=redis.Redis)
        mock_pipeline = Mock(spec=redis.client.Pipeline)
        mock_redis.pipeline.return_value = mock_pipeline
        qm = self._makeQM(redis=mock_redis)

        job_id = qm.enqueue('retools.tests.jobs:echo_default',
                            default='hi there')
        meth, args, kw = mock_pipeline.method_calls[0]
        eq_('rpush', meth)
        eq_(kw, {})
        queue_name, job_body = args
        job_data = json.loads(job_body)
        eq_(job_data['job_id'], job_id)
        eq_(job_data['kwargs'], {"default": "hi there"})
        mock_redis.llen = Mock(return_value=1)

        created = time.time()

        # trying get_jobs/get_job
        job = json.dumps({'job_id': job_id,
                          'job': 'retools.tests.jobs:echo_default',
                          'kwargs': {},
                          'state': '',
                          'events': {},
                          'metadata': {'created': created}
                         })

        mock_redis.lindex = Mock(return_value=job)

        jobs = list(qm.get_jobs())
        self.assertEqual(len(jobs), 1)
        my_job = qm.get_job(job_id)
        self.assertEqual(my_job.job_name, 'retools.tests.jobs:echo_default')
        self.assertEqual(my_job.metadata['created'], created)

        # testing the Worker class methods
        from retools.queue import Worker
        mock_redis = Mock(spec=redis.Redis)
        mock_pipeline = Mock(spec=redis.client.Pipeline)
        mock_redis.pipeline.return_value = mock_pipeline
        mock_redis.smembers = Mock(return_value=[])

        workers = list(Worker.get_workers(redis=mock_redis))
        self.assertEqual(len(workers), 0)

        worker = Worker(queues=['main'])
        mock_redis.smembers = Mock(return_value=[worker.worker_id])
        worker.register_worker()
        try:
            workers = list(Worker.get_workers(redis=mock_redis))
            self.assertEqual(len(workers), 1, workers)
            ids = Worker.get_worker_ids(redis=mock_redis)
            self.assertEqual(ids, [worker.worker_id])
        finally:
            worker.unregister_worker()

Example 17

Project: routes
Source File: test_resources.py
View license
    def test_resource(self):
        m = Mapper()
        m.resource('person', 'people')
        m.create_regs(['people'])

        con = request_config()
        con.mapper = m
        def test_path(path, method):
            env = dict(HTTP_HOST='example.com', PATH_INFO=path, REQUEST_METHOD=method)
            con.mapper_dict = {}
            con.environ = env

        test_path('/people', 'GET')
        eq_({'controller':'people', 'action':'index'}, con.mapper_dict)
        test_path('/people.xml', 'GET')
        eq_({'controller':'people', 'action':'index', 'format':'xml'}, con.mapper_dict)

        test_path('/people', 'POST')
        eq_({'controller':'people', 'action':'create'}, con.mapper_dict)
        test_path('/people.html', 'POST')
        eq_({'controller':'people', 'action':'create', 'format':'html'}, con.mapper_dict)

        test_path('/people/2.xml', 'GET')
        eq_({'controller':'people', 'action':'show', 'id':'2', 'format':'xml'}, con.mapper_dict)
        test_path('/people/2', 'GET')
        eq_({'controller':'people', 'action':'show', 'id':'2'}, con.mapper_dict)

        test_path('/people/2/edit', 'GET')
        eq_({'controller':'people', 'action':'edit', 'id':'2'}, con.mapper_dict)
        test_path('/people/2/edit.xml', 'GET')
        eq_({'controller':'people', 'action':'edit', 'id':'2', 'format':'xml'}, con.mapper_dict)

        test_path('/people/2', 'DELETE')
        eq_({'controller':'people', 'action':'delete', 'id':'2'}, con.mapper_dict)

        test_path('/people/2', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'2'}, con.mapper_dict        )
        test_path('/people/2.json', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'2', 'format':'json'}, con.mapper_dict        )

        # Test for dots in urls
        test_path('/people/2\.13', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'2\.13'}, con.mapper_dict)
        test_path('/people/2\.13.xml', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'2\.13', 'format':'xml'}, con.mapper_dict)
        test_path('/people/user\.name', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'user\.name'}, con.mapper_dict)
        test_path('/people/user\.\.\.name', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'user\.\.\.name'}, con.mapper_dict)
        test_path('/people/user\.name\.has\.dots', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'user\.name\.has\.dots'}, con.mapper_dict)
        test_path('/people/user\.name\.is\.something.xml', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'user\.name\.is\.something', 'format':'xml'}, con.mapper_dict)
        test_path('/people/user\.name\.ends\.with\.dot\..xml', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'user\.name\.ends\.with\.dot\.', 'format':'xml'}, con.mapper_dict)
        test_path('/people/user\.name\.ends\.with\.dot\.', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'user\.name\.ends\.with\.dot\.'}, con.mapper_dict)
        test_path('/people/\.user\.name\.starts\.with\.dot', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'\.user\.name\.starts\.with\.dot'}, con.mapper_dict)
        test_path('/people/user\.name.json', 'PUT')
        eq_({'controller':'people', 'action':'update', 'id':'user\.name', 'format':'json'}, con.mapper_dict)

Example 18

Project: routes
Source File: test_resources.py
View license
    def test_resource_created_with_parent_resource(self):
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'))
        m.create_regs(['locations'])

        con = request_config()
        con.mapper = m
        def test_path(path, method):
            env = dict(HTTP_HOST='example.com', PATH_INFO=path,
                       REQUEST_METHOD=method)
            con.mapper_dict = {}
            con.environ = env

        test_path('/regions/13/locations', 'GET')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'action': 'index'})
        url = url_for('region_locations', region_id=13)
        eq_(url, '/regions/13/locations')

        test_path('/regions/13/locations', 'POST')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'action': 'create'})
        # new
        url = url_for('region_new_location', region_id=13)
        eq_(url, '/regions/13/locations/new')
        # create
        url = url_for('region_locations', region_id=13)
        eq_(url, '/regions/13/locations')

        test_path('/regions/13/locations/60', 'GET')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'show'})
        url = url_for('region_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60')

        test_path('/regions/13/locations/60/edit', 'GET')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'edit'})
        url = url_for('region_edit_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60/edit')

        test_path('/regions/13/locations/60', 'DELETE')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'delete'})
        url = url_for('region_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60')

        test_path('/regions/13/locations/60', 'PUT')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'update'})
        url = url_for('region_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60')

        # Make sure ``path_prefix`` overrides work
        # empty ``path_prefix`` (though I'm not sure why someone would do this)
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='')
        url = url_for('region_locations')
        eq_(url, '/locations')
        # different ``path_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='areas/:area_id')
        url = url_for('region_locations', area_id=51)
        eq_(url, '/areas/51/locations')

        # Make sure ``name_prefix`` overrides work
        # empty ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   name_prefix='')
        url = url_for('locations', region_id=51)
        eq_(url, '/regions/51/locations')
        # different ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   name_prefix='area_')
        url = url_for('area_locations', region_id=51)
        eq_(url, '/regions/51/locations')

        # Make sure ``path_prefix`` and ``name_prefix`` overrides work together
        # empty ``path_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='',
                   name_prefix='place_')
        url = url_for('place_locations')
        eq_(url, '/locations')
        # empty ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='areas/:area_id',
                   name_prefix='')
        url = url_for('locations', area_id=51)
        eq_(url, '/areas/51/locations')
        # different ``path_prefix`` and ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='areas/:area_id',
                   name_prefix='place_')
        url = url_for('place_locations', area_id=51)
        eq_(url, '/areas/51/locations')

Example 19

Project: routes
Source File: test_resources.py
View license
    def test_resource_created_with_parent_resource_nomin(self):
        m = Mapper()
        m.minimization = False
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'))
        m.create_regs(['locations'])

        con = request_config()
        con.mapper = m
        def test_path(path, method):
            env = dict(HTTP_HOST='example.com', PATH_INFO=path,
                       REQUEST_METHOD=method)
            con.mapper_dict = {}
            con.environ = env

        test_path('/regions/13/locations', 'GET')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'action': 'index'})
        url = url_for('region_locations', region_id=13)
        eq_(url, '/regions/13/locations')

        test_path('/regions/13/locations', 'POST')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'action': 'create'})
        # new
        url = url_for('region_new_location', region_id=13)
        eq_(url, '/regions/13/locations/new')
        # create
        url = url_for('region_locations', region_id=13)
        eq_(url, '/regions/13/locations')

        test_path('/regions/13/locations/60', 'GET')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'show'})
        url = url_for('region_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60')

        test_path('/regions/13/locations/60/edit', 'GET')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'edit'})
        url = url_for('region_edit_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60/edit')

        test_path('/regions/13/locations/60', 'DELETE')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'delete'})
        url = url_for('region_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60')

        test_path('/regions/13/locations/60', 'PUT')
        eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
                                   'id': '60', 'action': 'update'})
        url = url_for('region_location', region_id=13, id=60)
        eq_(url, '/regions/13/locations/60')

        # Make sure ``path_prefix`` overrides work
        # empty ``path_prefix`` (though I'm not sure why someone would do this)
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='/')
        url = url_for('region_locations')
        eq_(url, '/locations')
        # different ``path_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='areas/:area_id')
        url = url_for('region_locations', area_id=51)
        eq_(url, '/areas/51/locations')

        # Make sure ``name_prefix`` overrides work
        # empty ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   name_prefix='')
        url = url_for('locations', region_id=51)
        eq_(url, '/regions/51/locations')
        # different ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   name_prefix='area_')
        url = url_for('area_locations', region_id=51)
        eq_(url, '/regions/51/locations')

        # Make sure ``path_prefix`` and ``name_prefix`` overrides work together
        # empty ``path_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='',
                   name_prefix='place_')
        url = url_for('place_locations')
        eq_(url, '/locations')
        # empty ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='areas/:area_id',
                   name_prefix='')
        url = url_for('locations', area_id=51)
        eq_(url, '/areas/51/locations')
        # different ``path_prefix`` and ``name_prefix``
        m = Mapper()
        m.resource('location', 'locations',
                   parent_resource=dict(member_name='region',
                                        collection_name='regions'),
                   path_prefix='areas/:area_id',
                   name_prefix='place_')
        url = url_for('place_locations', area_id=51)
        eq_(url, '/areas/51/locations')

Example 20

Project: dh-virtualenv
Source File: test_deployment.py
View license
def check_shebangs_fix(interpreter, path):
    """Checks shebang substitution for the given interpreter"""
    deployment = Deployment('test')
    temp = tempfile.NamedTemporaryFile()
    # We cheat here a little. The fix_shebangs walks through the
    # project directory, however we can just point to a single
    # file, as the underlying mechanism is just grep -r.
    deployment.bin_dir = temp.name
    expected_shebang = '#!' + os.path.join(path, 'bin/python') + '\n'

    with open(temp.name, 'w') as f:
        f.write('#!/usr/bin/{0}\n'.format(interpreter))

    deployment.fix_shebangs()

    with open(temp.name) as f:
        eq_(f.read(), expected_shebang)

    with open(temp.name, 'w') as f:
        f.write('#!/usr/bin/env {0}\n'.format(interpreter))

    deployment.fix_shebangs()

    with open(temp.name) as f:
        eq_(f.readline(), expected_shebang)
        
    # Additional test to check for paths wrapped in quotes because they contained space
    # Example:
    #           #!"/some/local/path/dest/path/bin/python"     
    # was changed to: 
    #           #!/dest/path/bin/python"
    # which caused interpreter not found error
    
    with open(temp.name, 'w') as f:
        f.write('#!"/usr/bin/{0}"\n'.format(interpreter))

    deployment.fix_shebangs()

    with open(temp.name) as f:
        eq_(f.readline(), expected_shebang)

Example 21

Project: utter-pool
Source File: test_css.py
View license
def test_style_hang():
    """The sanitizer should not hang on any inline styles"""
    # TODO: Neaten this up. It's copypasta from MDN/Kuma to repro the bug
    style = ("""margin-top: 0px; margin-right: 0px; margin-bottom: 1.286em; """
             """margin-left: 0px; padding-top: 15px; padding-right: 15px; """
             """padding-bottom: 15px; padding-left: 15px; border-top-width: """
             """1px; border-right-width: 1px; border-bottom-width: 1px; """
             """border-left-width: 1px; border-top-style: dotted; """
             """border-right-style: dotted; border-bottom-style: dotted; """
             """border-left-style: dotted; border-top-color: rgb(203, 200, """
             """185); border-right-color: rgb(203, 200, 185); """
             """border-bottom-color: rgb(203, 200, 185); border-left-color: """
             """rgb(203, 200, 185); background-image: initial; """
             """background-attachment: initial; background-origin: initial; """
             """background-clip: initial; background-color: """
             """rgb(246, 246, 242); overflow-x: auto; overflow-y: auto; """
             """font: normal normal normal 100%/normal 'Courier New', """
             """'Andale Mono', monospace; background-position: initial """
             """initial; background-repeat: initial initial;""")
    html = '<p style="%s">Hello world</p>' % style
    styles = [
        'border', 'float', 'overflow', 'min-height', 'vertical-align',
        'white-space',
        'margin', 'margin-left', 'margin-top', 'margin-bottom', 'margin-right',
        'padding', 'padding-left', 'padding-top', 'padding-bottom', 'padding-right',
        'background',
        'background-color',
        'font', 'font-size', 'font-weight', 'text-align', 'text-transform',
    ]

    expected = ("""<p style="margin-top: 0px; margin-right: 0px; """
                """margin-bottom: 1.286em; margin-left: 0px; padding-top: """
                """15px; padding-right: 15px; padding-bottom: 15px; """
                """padding-left: 15px; background-color: """
                """rgb(246, 246, 242); font: normal normal normal """
                """100%/normal 'Courier New', 'Andale Mono', monospace;">"""
                """Hello world</p>""")

    result = clean(html, styles=styles)
    eq_(expected, result)

Example 22

Project: proxmoxer
Source File: https_tests.py
View license
    def test_post(self):
        node = self.proxmox.nodes('proxmox')
        node.openvz.create(vmid=800,
                           ostemplate='local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz',
                           hostname='test',
                           storage='local',
                           memory=512,
                           swap=512,
                           cpus=1,
                           disk=4,
                           password='secret',
                           ip_address='10.0.100.222')
        eq_(self.session.request.call_args[0], ('POST', 'https://proxmox:123/api2/json/nodes/proxmox/openvz'))
        ok_('data' in self.session.request.call_args[1])
        data = self.session.request.call_args[1]['data']
        eq_(data['cpus'], 1)
        eq_(data['disk'], 4)
        eq_(data['hostname'], 'test')
        eq_(data['ip_address'], '10.0.100.222')
        eq_(data['memory'], 512)
        eq_(data['ostemplate'], 'local:vztmpl/debian-6-turnkey-core_12.0-1_i386.tar.gz')
        eq_(data['password'], 'secret')
        eq_(data['storage'], 'local')
        eq_(data['swap'], 512)
        eq_(data['vmid'], 800)

        node = self.proxmox.nodes('proxmox1')
        node.openvz.post(vmid=900,
                         ostemplate='local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz',
                         hostname='test1',
                         storage='local1',
                         memory=1024,
                         swap=1024,
                         cpus=2,
                         disk=8,
                         password='secret1',
                         ip_address='10.0.100.111')
        eq_(self.session.request.call_args[0], ('POST', 'https://proxmox:123/api2/json/nodes/proxmox1/openvz'))
        ok_('data' in self.session.request.call_args[1])
        data = self.session.request.call_args[1]['data']
        eq_(data['cpus'], 2)
        eq_(data['disk'], 8)
        eq_(data['hostname'], 'test1')
        eq_(data['ip_address'], '10.0.100.111')
        eq_(data['memory'], 1024)
        eq_(data['ostemplate'], 'local:vztmpl/debian-7-turnkey-core_12.0-1_i386.tar.gz')
        eq_(data['password'], 'secret1')
        eq_(data['storage'], 'local1')
        eq_(data['swap'], 1024)
        eq_(data['vmid'], 900)

Example 23

Project: Flask-SuperAdmin
Source File: test_django.py
View license
def test_list():
    class Person(models.Model):
        name = models.CharField(max_length=255)
        age = models.IntegerField()

        def __unicode__(self):
            return self.name

    # Create tables in the database if they don't exists
    try:
        install_models(Person)
    except DatabaseError, e:
        if 'already exists' not in e.message:
            raise

    Person.objects.all().delete()

    view = CustomModelView(Person)
    admin.add_view(view)

    eq_(view.model, Person)
    eq_(view.name, 'Person')
    eq_(view.endpoint, 'person')
    eq_(view.url, '/admin/person')

    # Verify form
    with app.test_request_context():
        Form = view.get_form()
        ok_(isinstance(Form()._fields['name'], wtforms.TextField))
        ok_(isinstance(Form()._fields['age'], wtforms.IntegerField))

    # Make some test clients
    client = app.test_client()

    resp = client.get('/admin/person/')
    eq_(resp.status_code, 200)

    resp = client.get('/admin/person/add/')
    eq_(resp.status_code, 200)

    resp = client.post('/admin/person/add/',
                     data=dict(name='name', age='18'))
    eq_(resp.status_code, 302)

    person = Person.objects.all()[0]
    eq_(person.name, 'name')
    eq_(person.age, 18)

    resp = client.get('/admin/person/')
    eq_(resp.status_code, 200)
    ok_(person.name in resp.data)

    resp = client.get('/admin/person/%s/' % person.pk)
    eq_(resp.status_code, 200)

    resp = client.post('/admin/person/%s/' % person.pk, data=dict(name='changed'))
    eq_(resp.status_code, 302)

    person = Person.objects.all()[0]
    eq_(person.name, 'changed')
    eq_(person.age, 18)

    resp = client.post('/admin/person/%s/delete/' % person.pk)
    eq_(resp.status_code, 200)
    eq_(Person.objects.count(), 1)

    resp = client.post('/admin/person/%s/delete/' % person.pk, data={'confirm_delete': True})
    eq_(resp.status_code, 302)
    eq_(Person.objects.count(), 0)

Example 24

View license
def test_model():
    app, admin = setup()

    class Person(Document):
        name = StringField()
        age = IntField()

    Person.drop_collection()

    view = CustomModelView(Person)
    admin.add_view(view)

    eq_(view.model, Person)
    eq_(view.name, 'Person')
    eq_(view.endpoint, 'person')
    eq_(view.url, '/admin/person')

    # Verify form
    with app.test_request_context():
        Form = view.get_form()
        ok_(isinstance(Form()._fields['name'], wtforms.TextAreaField))
        ok_(isinstance(Form()._fields['age'], wtforms.IntegerField))

    # Make some test clients
    client = app.test_client()

    resp = client.get('/admin/person/')
    eq_(resp.status_code, 200)

    resp = client.get('/admin/person/add/')
    eq_(resp.status_code, 200)

    resp = client.post('/admin/person/add/',
                     data=dict(name='name', age='18'))
    eq_(resp.status_code, 302)

    person = Person.objects.first()
    eq_(person.name, 'name')
    eq_(person.age, 18)

    resp = client.get('/admin/person/')
    eq_(resp.status_code, 200)
    ok_(str(person.pk) in resp.data)

    resp = client.get('/admin/person/%s/' % person.pk)
    eq_(resp.status_code, 200)

    resp = client.post('/admin/person/%s/' % person.pk, data=dict(name='changed'))
    eq_(resp.status_code, 302)

    person = Person.objects.first()
    eq_(person.name, 'changed')
    eq_(person.age, 18)

    resp = client.post('/admin/person/%s/delete/' % person.pk)
    eq_(resp.status_code, 200)
    eq_(Person.objects.count(), 1)

    resp = client.post('/admin/person/%s/delete/' % person.pk, data={'confirm_delete': True})
    eq_(resp.status_code, 302)
    eq_(Person.objects.count(), 0)

Example 25

Project: Flask-SuperAdmin
Source File: test_sqlamodel.py
View license
def test_model():
    app, db, admin = setup()
    Model1, Model2 = create_models(db)
    db.create_all()

    view = CustomModelView(Model1, db.session)
    admin.add_view(view)

    eq_(view.model, Model1)
    eq_(view.name, 'Model1')
    eq_(view.endpoint, 'model1')

    eq_(view._primary_key, 'id')

    # Verify form
    with app.test_request_context():
        Form = view.get_form()
        ok_(isinstance(Form()._fields['test1'], wtforms.TextField))
        ok_(isinstance(Form()._fields['test2'], wtforms.TextField))
        ok_(isinstance(Form()._fields['test3'], wtforms.TextAreaField))
        ok_(isinstance(Form()._fields['test4'], wtforms.TextAreaField))

    # Make some test clients
    client = app.test_client()

    resp = client.get('/admin/model1/')
    eq_(resp.status_code, 200)

    resp = client.get('/admin/model1/add/')
    eq_(resp.status_code, 200)

    resp = client.post('/admin/model1/add/',
                       data=dict(test1='test1large', test2='test2'))
    eq_(resp.status_code, 302)

    model = db.session.query(Model1).first()
    eq_(model.test1, 'test1large')
    eq_(model.test2, 'test2')
    eq_(model.test3, '')
    eq_(model.test4, '')

    resp = client.get('/admin/model1/')
    eq_(resp.status_code, 200)
    ok_('test1large' in resp.data)

    resp = client.get('/admin/model1/%s/' % model.id)
    eq_(resp.status_code, 200)

    resp = client.post('/admin/model1/%s/' % model.id, data=dict(test1='test1small', test2='test2large'))
    eq_(resp.status_code, 302)

    model = db.session.query(Model1).first()
    eq_(model.test1, 'test1small')
    eq_(model.test2, 'test2large')
    eq_(model.test3, '')
    eq_(model.test4, '')

    resp = client.post('/admin/model1/%s/delete/' % model.id)
    eq_(resp.status_code, 200)
    eq_(db.session.query(Model1).count(), 1)

    resp = client.post('/admin/model1/%s/delete/' % model.id, data={'confirm_delete': True})
    eq_(resp.status_code, 302)
    eq_(db.session.query(Model1).count(), 0)

Example 26

Project: nbdiff
Source File: test_diff.py
View license
def test_lcs():
    grid = [
        [False, False, True, False, True, False],
        [False, True, False, True, False, False],
        [True, False, False, False, False, True],
        [False, False, True, False, True, False],
        [False, True, False, True, False, False],
        [False, True, False, True, False, False],
        [False, False, True, False, True, False]
    ]
    result = lcs(grid)
    expected = [(1, 1), (3, 2), (4, 3), (6, 4)]
    eq_(result, expected)

    grid = [
        [False, False, True, False, True, False],
        [False, False, False, True, False, False],
        [True, False, False, False, False, True],
        [False, False, True, False, True, False],
        [False, True, False, True, False, False],
        [False, True, False, True, False, False],
        [False, False, True, False, True, False]
    ]
    result = lcs(grid)
    expected = [(2, 0), (3, 2), (4, 3), (6, 4)]
    eq_(result, expected)

    grid = [
        [True, True, True, True, True, True],
        [True, True, True, True, True, True],
        [True, True, True, True, True, True],
        [True, True, True, True, True, True],
        [True, True, True, True, True, True],
        [True, True, True, True, True, True],
        [True, True, True, True, True, True]
    ]
    result = lcs(grid)
    expected = [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]
    eq_(result, expected)

    grid = [
        [False, True, True],
        [False, True, True],
        [False, True, True],
        [False, True, True],
        [False, True, True],
        [False, True, True],
        [False, True, True]
    ]
    result = lcs(grid)
    expected = [(0, 1), (1, 2)]
    eq_(result, expected)

Example 27

View license
def _check_file_handler(
        file_entry,
        file_requested,
        root_dir='python',
        expected_checksum=None,
        http_get_exception=None,
        parse_index_exception=None):

    pypi_base_url = 'http://dumb_url.com'

    parser_response = OrderedDict([
        ('nose-1.2.0.tar.gz', IndexRow(
            download_url='http://some_url.com/nose/nose-1.2.0.tar.gz',
            checksums=Checksums(
                md5='MD5-nose-1.2.0.tar.gz',
                sha1=None))),
        (file_entry.filename, file_entry.index_row),
        ('nose-1.2.1.egg', IndexRow(
            download_url='http://some_url.com/nose/nose-1.2.1.egg',
            checksums=Checksums(
                md5='MD5-nose-1.2.1.egg',
                sha1=None))),
    ])

    html_get_response = 'be dumb html'

    html_get_stub = FunctionStub(
        name='HTML Get',
        dummy_result=html_get_response,
        dummy_exception=http_get_exception)

    parser_stub = FunctionStub(
        name='Parser',
        dummy_result=parser_response,
        dummy_exception=parse_index_exception)

    handler = FileHandler(
        pypi_base_url=pypi_base_url,
        http_get_fn=html_get_stub,
        parse_index_fn=parser_stub)

    request = RequestStub(is_index=False)
    response = ResponseStub()

    # When not retrieving a checksum, we expect a redirection exception to be
    # thrown here. Asserting correct redirect behavior is performed in the
    # calling test function.
    response_str = handler.handle(
        path=[root_dir, file_entry.pkg_name, file_requested],
        request=request,
        response=response)

    expected_headers = {'Content-Type': 'application/x-checksum'}

    eq_(response.headers, expected_headers,
        msg='Response headers did not match the expected headers')

    eq_(response_str, expected_checksum,
        msg='Response checksum did not match the expected checksum')

    html_get_stub.assert_single_kw_call(expected_kwargs={
        'url': '{}/{}/'.format(pypi_base_url, file_entry.pkg_name)})

    parser_stub.assert_single_kw_call(expected_kwargs={
        'base_url': pypi_base_url,
        'package_path': file_entry.pkg_name,
        'html_str': html_get_response})

Example 28

View license
def _check_main_index_path(
        path,
        is_index,
        http_get_exception=None,
        parse_index_exception=None):

    pypi_base_url = 'http://dumb_url.com'
    builder_response = 'be dumb builder'
    parser_response = 'be dumb parser'
    html_get_response = 'be dumb html'
    py, package_path = path

    html_get_stub = FunctionStub(
        name='HTML Get',
        dummy_result=html_get_response,
        dummy_exception=http_get_exception)

    parser_stub = FunctionStub(
        name='Parser',
        dummy_result=parser_response,
        dummy_exception=parse_index_exception)

    builder_stub = FunctionStub(
        name='Builder',
        dummy_result=builder_response)

    handler = PyPIIndexHandler(
        pypi_base_url=pypi_base_url,
        http_get_fn=html_get_stub,
        parse_index_fn=parser_stub,
        build_index_fn=builder_stub)

    request = RequestStub(is_index=is_index)
    response = ResponseStub()

    response_str = handler.handle(
        path=path,
        request=request,
        response=response)

    eq_(response.headers, {},
        msg='Headers are expected to be unaffected')

    eq_(response_str, builder_response,
        msg='Handler did not return builder result')

    builder_stub.assert_single_kw_call(expected_kwargs={
        'index_rows': parser_response})

    parser_stub.assert_single_kw_call(expected_kwargs={
        'base_url': pypi_base_url,
        'package_path': package_path,
        'html_str': html_get_response})

Example 29

Project: python-vagrant
Source File: test_vagrant.py
View license
@with_setup(make_setup_vm(), teardown_vm)
def test_vm_sandbox_mode():
    '''
    Test methods for enabling/disabling the sandbox mode
    and committing/rolling back changes.

    This depends on the Sahara plugin.
    '''
    # Only test Sahara if it is installed.
    # This leaves the testing of Sahara to people who care.
    sahara_installed = _plugin_installed(vagrant.Vagrant(TD), 'sahara')
    if not sahara_installed:
        return

    v = vagrant.SandboxVagrant(TD)

    sandbox_status = v.sandbox_status()
    assert sandbox_status == "unknown", "Before the VM goes up the status should be 'unknown', " + "got:'{}'".format(sandbox_status)

    v.up()
    sandbox_status = v.sandbox_status()
    assert sandbox_status == "off", "After the VM goes up the status should be 'off', " + "got:'{}'".format(sandbox_status)

    v.sandbox_on()
    sandbox_status = v.sandbox_status()
    assert sandbox_status == "on", "After enabling the sandbox mode the status should be 'on', " + "got:'{}'".format(sandbox_status)

    v.sandbox_off()
    sandbox_status = v.sandbox_status()
    assert sandbox_status == "off", "After disabling the sandbox mode the status should be 'off', " + "got:'{}'".format(sandbox_status)

    v.sandbox_on()
    v.halt()
    sandbox_status = v.sandbox_status()
    assert sandbox_status == "on", "After halting the VM the status should be 'on', " + "got:'{}'".format(sandbox_status)

    v.up()
    sandbox_status = v.sandbox_status()
    assert sandbox_status == "on", "After bringing the VM up again the status should be 'on', " + "got:'{}'".format(sandbox_status)

    test_file_contents = _read_test_file(v)
    print(test_file_contents)
    eq_(test_file_contents, None, "There should be no test file")

    _write_test_file(v, "foo")
    test_file_contents = _read_test_file(v)
    print(test_file_contents)
    eq_(test_file_contents, "foo", "The test file should read 'foo'")

    v.sandbox_rollback()
    time.sleep(10)  # https://github.com/jedi4ever/sahara/issues/16

    test_file_contents = _read_test_file(v)
    print(test_file_contents)
    eq_(test_file_contents, None, "There should be no test file")

    _write_test_file(v, "foo")
    test_file_contents = _read_test_file(v)
    print(test_file_contents)
    eq_(test_file_contents, "foo", "The test file should read 'foo'")
    v.sandbox_commit()
    _write_test_file(v, "bar")
    test_file_contents = _read_test_file(v)
    print(test_file_contents)
    eq_(test_file_contents, "bar", "The test file should read 'bar'")

    v.sandbox_rollback()
    time.sleep(10)  # https://github.com/jedi4ever/sahara/issues/16

    test_file_contents = _read_test_file(v)
    print(test_file_contents)
    eq_(test_file_contents, "foo", "The test file should read 'foo'")

    sandbox_status = v._parse_vagrant_sandbox_status("Usage: ...")
    eq_(sandbox_status, "not installed", "When 'vagrant sandbox status'" +
        " outputs vagrant help status should be 'not installed', " +
        "got:'{}'".format(sandbox_status))

    v.destroy()
    sandbox_status = v.sandbox_status()
    assert sandbox_status == "unknown", "After destroying the VM the status should be 'unknown', " + "got:'{}'".format(sandbox_status)

Example 30

Project: pyspotlight
Source File: tests.py
View license
def test_single_candidate():
    # Test with a single returned candidate, as was reported by issue #3.
    # Thanks to aolieman for the awesome test data!
    data = """
{
   "annotation":{
      "@text":"Industrial Design at the Technische Universiteit Delft",
      "surfaceForm":{
         "@name":"Technische Universiteit Delft",
         "@offset":"25",
         "resource":[
            {
               "@label":"Technische Universiteit Delft",
               "@uri":"Technische_Universiteit_Delft",
               "@contextualScore":"0.9991813164782087",
               "@percentageOfSecondRank":"0.1422872887244497",
               "@support":"3",
               "@priorScore":"2.8799662606192636E-8",
               "@finalScore":"0.8754365122251001",
               "@types":""
            },
            {
               "@label":"Delft University of Technology",
               "@uri":"Delft_University_of_Technology",
               "@contextualScore":"8.186418452925803E-4",
               "@percentageOfSecondRank":"0.0",
               "@support":"521",
               "@priorScore":"5.001541405942121E-6",
               "@finalScore":"0.12456348777489806",
               "@types":"DBpedia:Agent, Schema:Organization, DBpedia:Organisation, Schema:EducationalOrganization, DBpedia:EducationalInstitution, Schema:CollegeOrUniversity, DBpedia:University"
            }
         ]
      }
   }
}
    """
    candidates = spotlight.candidates('http://localhost', 'asdasdasd',
                                      headers={'fake_response': data})
    expected_out = [
        {u'resource':
            [
                {
                    u'finalScore': 0.8754365122251001,
                    u'support': 3,
                    u'uri': u'Technische_Universiteit_Delft',
                    u'label': u'Technische Universiteit Delft',
                    u'types': u'',
                    u'percentageOfSecondRank': 0.1422872887244497,
                    u'priorScore': 2.8799662606192636e-08,
                    u'contextualScore': 0.9991813164782087
                },
                {
                    u'finalScore': 0.12456348777489806,
                    u'support': 521,
                    u'uri': u'Delft_University_of_Technology',
                    u'label': u'Delft University of Technology',
                    u'types': u'DBpedia:Agent, Schema:Organization, DBpedia:Organisation, Schema:EducationalOrganization, DBpedia:EducationalInstitution, Schema:CollegeOrUniversity, DBpedia:University',
                    u'percentageOfSecondRank': 0.0,
                    u'priorScore': 5.001541405942121e-06,
                    u'contextualScore': 0.0008186418452925803
                },
             ],
         u'name': u'Technische Universiteit Delft',
         u'offset': 25
        }
    ]
    eq_(candidates, expected_out)

Example 31

Project: Ultros
Source File: test_permissions.py
View license
    def test_reads(self):
        """
        PERMS | Test permissions handler reading functions\n
        """
        with self.data:
            self.data["groups"] = {}
            self.data["users"] = {}

            self.data["groups"]["default"] = {
                "options": {"lerp": "gerp"},
                "permissions": ["nose.test"],
                "protocols": {"nose-test": {
                    "permissions": [
                        "nose.test2",
                        "/g[A-Za-z].*2002/"
                    ],
                    "sources": {
                        "#nose": ["nose.test3"]
                    }
                }}
            }

            self.data["groups"]["inherits"] = {
                "options": {},
                "permissions": [],
                "protocols": {},
                "inherit": "default"
            }

            self.data["users"]["test"] = {
                "group": "default",
                "options": {
                    "superadmin": False
                },
                "permissions": ["nose.test"],
                "protocols": {"nose-test": {
                    "permissions": ["nose.test2"],
                    "sources": {
                        "#nose": ["nose.test3"]
                    }
                }}
            }

            self.logger.debug("[READING] Dummy data set up.")

        # Group tests

        self.logger.debug("[READING] Testing: Group options")

        nosetools.eq_(self.handler.get_group_option("default", "derp"),
                      None)
        nosetools.eq_(self.handler.get_group_option("default", "lerp"),
                      "gerp")
        nosetools.eq_(self.handler.get_group_option("herp", "derp"),
                      False)

        self.logger.debug("[READING] Testing: Group inheritance")

        nosetools.eq_(self.handler.get_group_inheritance("default"),
                      None)
        nosetools.eq_(self.handler.get_group_inheritance("inherits"),
                      "default")
        nosetools.eq_(self.handler.get_group_inheritance("herp"),
                      False)

        self.logger.debug("[READING] Testing: Group permissions")

        self.logger.debug("[READING] -- Permissions in their containers")
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.test"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.test2",
                                                        "nose-test"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "gDroid2002",
                                                        "nose-test"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.test3",
                                                        "nose-test",
                                                        "#nose"),
                      True)

        self.logger.debug("[READING] -- Cascading permissions")
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.test",
                                                        "nose-test"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.test",
                                                        "nose-test",
                                                        "#nose"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.test2",
                                                        "nose-test",
                                                        "#nose"),
                      True)

        self.logger.debug("[READING] -- False permissions")
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.untest"),
                      False)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.untest",
                                                        "nose-test"),
                      False)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "g100d2002",
                                                        "nose-test"),
                      False)
        nosetools.eq_(self.handler.group_has_permission("default",
                                                        "nose.untest",
                                                        "nose-test",
                                                        "#nose"),
                      False)

        self.logger.debug("[READING] -- Inherited permissions")
        self.logger.debug("[READING]    -- Permissions in their containers")
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.test"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.test2",
                                                        "nose-test"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.test3",
                                                        "nose-test",
                                                        "#nose"),
                      True)
        self.logger.debug("[READING]    -- Cascading permissions")
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.test",
                                                        "nose-test"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.test",
                                                        "nose-test",
                                                        "#nose"),
                      True)
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.test2",
                                                        "nose-test",
                                                        "#nose"),
                      True)

        self.logger.debug("[READING]    -- False permissions")
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.untest"),
                      False)
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.untest",
                                                        "nose-test"),
                      False)
        nosetools.eq_(self.handler.group_has_permission("inherits",
                                                        "nose.untest",
                                                        "nose-test",
                                                        "#nose"),
                      False)

        self.logger.debug("[READING] -- Impossible permissions")
        nosetools.eq_(self.handler.group_has_permission("gerp",
                                                        "nose.test"),
                      False)
        nosetools.eq_(self.handler.group_has_permission("gerp",
                                                        "nose.test",
                                                        "nose-test"),
                      False)
        nosetools.eq_(self.handler.group_has_permission("gerp",
                                                        "nose.test",
                                                        "nose-test",
                                                        "#nose"),
                      False)

        self.logger.debug("[READING] Tests complete.")

Example 32

Project: MatchTagAlways
Source File: mta_core_test.py
View license
def TAG_REGEX_Works_test():
  eq_(
    {
      'start_slash' : None,
      'tag_name' : 'div',
      'end_slash' : None,
    },
    mta_core.TAG_REGEX.match( "<div>" ).groupdict() )

  eq_(
    {
      'start_slash' : None,
      'tag_name' : 'p',
      'end_slash' : None,
    },
    mta_core.TAG_REGEX.match( "< p \n\n id='xx' \nclass='b'>" ).groupdict() )

  eq_(
    {
      'start_slash' : None,
      'tag_name' : 'foo:bar-goo',
      'end_slash' : None,
    },
    mta_core.TAG_REGEX.match( "<foo:bar-goo>" ).groupdict() )

  eq_(
    {
      'start_slash' : '/',
      'tag_name' : 'p',
      'end_slash' : None,
    },
    mta_core.TAG_REGEX.match( "</p>" ).groupdict() )

  eq_(
    {
      'start_slash' : '/',
      'tag_name' : 'p',
      'end_slash' : None,
    },
    mta_core.TAG_REGEX.match( "<\n/  p>" ).groupdict() )

  eq_(
    {
      'start_slash' : None,
      'tag_name' : 'br',
      'end_slash' : '/',
    },
    mta_core.TAG_REGEX.match( "< br \n\n id='xx' \nclass='b' />" ).groupdict() )

Example 33

View license
@YouCompleteMeInstance( { 'cache_omnifunc': 1 } )
def OmniCompleter_GetCompletions_Cache_ObjectListObject_Unicode_test( ycm ):
  contents = '†åsty_π.t'
  request_data = BuildRequestWrap( line_num = 1,
                                   column_num = 14,
                                   contents = contents )


  eq_( request_data[ 'query' ], 't' )

  # Make sure there is an omnifunc set up.
  with patch( 'vim.eval', return_value = ToBytesOnPY2( 'test_omnifunc' ) ):
    ycm._omnicomp.OnFileReadyToParse( request_data )

  omnifunc_result = {
    'words': [
      {
        'word': ToBytesOnPY2( 'ålpha∫et' ),
        'abbr': ToBytesOnPY2( 'å∫∫®'),
        'menu': ToBytesOnPY2( 'µ´~¨á' ),
        'info': ToBytesOnPY2( '^~fo' ),
        'kind': ToBytesOnPY2( '˚' )
      },
      {
        'word': ToBytesOnPY2( 'π†´ß†π' ),
        'abbr': ToBytesOnPY2( 'ÅııÂʉÍÊ'),
        'menu': ToBytesOnPY2( '˜‰ˆËʉÍÊ' ),
        'info': ToBytesOnPY2( 'ȈÏØʉÍÊ' ),
        'kind': ToBytesOnPY2( 'Ê' )
      },
      {
        'word': ToBytesOnPY2( 'test' ),
        'abbr': ToBytesOnPY2( 'ÅııÂʉÍÊ'),
        'menu': ToBytesOnPY2( '˜‰ˆËʉÍÊ' ),
        'info': ToBytesOnPY2( 'ȈÏØʉÍÊ' ),
        'kind': ToBytesOnPY2( 'Ê' )
      }
    ]
  }

  # And get the completions
  with patch( 'vim.eval',
              new_callable = ExtendedMock,
              side_effect = [ 6, omnifunc_result ] ) as vim_eval:

    results = ycm._omnicomp.ComputeCandidates( request_data )

    vim_eval.assert_has_exact_calls( [
      call( 'test_omnifunc(1,"")' ),
      call( "test_omnifunc(0,'t')" ),
    ] )

    # Note: the filtered results are all unicode objects (not bytes) because
    # they are passed through the FilterAndSortCandidates machinery
    # (via the server)
    eq_( results, [ {
      'word': 'test',
      'abbr': 'ÅııÂʉÍÊ',
      'menu': '˜‰ˆËʉÍÊ',
      'info': 'ȈÏØʉÍÊ',
      'kind': 'Ê'
    } ] )

Example 34

Project: apns-proxy-server
Source File: test_server.py
View license
def test_dispatch_known_app():
    server = APNSProxyServer(dummy_setting)
    server.create_workers({
        "myApp1": {
            "application_id": "myApp1",
            "name": "My App1",
            "sandbox": False,
            "cert_file": "sample.cert",
            "key_file": "sample.key"
        },
        "myApp2": {
            "application_id": "myApp2",
            "name": "My App2",
            "sandbox": False,
            "cert_file": "sample.cert",
            "key_file": "sample.key"
        },
    }, 1)

    token = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
    eq_(server.dispatch_queue(json.dumps({
        "token": token,
        "appid": "myApp1",
        "test": True,
        "aps": {
            "alert": "This is test",
            "badge": 1,
            "sound": "default"
        },
        "expiry": None
        })), True, "Dispatch should be success")

    eq_(server.dispatch_queue(json.dumps({
        "token": token,
        "appid": "myApp2",
        "test": True,
        "aps": {
            "alert": "This is test",
            "badge": 1,
            "sound": "default",
        },
        "expiry": None
        })), True, "Dispatch should be success")

Example 35

Project: vbench
Source File: test_bench.py
View license
def test_benchmarkrunner():
    from vbench.api import BenchmarkRunner
    from suite import *

    # Just to make sure there are no left-overs
    shutil.rmtree(TMP_DIR)
    if exists(DB_PATH):
        os.unlink(DB_PATH)
    ok_(not exists(DB_PATH))

    runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL,
                             BUILD, DB_PATH, TMP_DIR, PREPARE,
                             clean_cmd=CLEAN,
                             run_option='all', run_order='normal',
                             start_date=START_DATE,
                             module_dependencies=DEPENDENCIES)
    revisions_to_run = runner._get_revisions_to_run()
    eq_(len(revisions_to_run), 4)                # we had 4 so far

    revisions_ran = runner.run()
    # print "D1: ", revisions_ran
    assert_array_equal([x[0] for x in revisions_ran],
                       revisions_to_run)
    # First revision
    eq_(revisions_ran[0][1], (False, 3))    # no functions were available at that point
    eq_(revisions_ran[1][1], (True, 3))     # all 3 tests were available in the first rev

    ok_(exists(TMP_DIR))
    ok_(exists(DB_PATH))

    eq_(len(runner.blacklist), 0)

    # Run 2nd time and verify that all are still listed BUT none new succeeds
    revisions_ran = runner.run()
    #print "D2: ", revisions_ran
    for rev, v in revisions_ran:
        eq_(v, (False, 0))

    # What if we expand list of benchmarks and run 3rd time
    runner.benchmarks = collect_benchmarks(['vb_sins', 'vb_sins2'])
    revisions_ran = runner.run()
    # for that single added benchmark there still were no function
    eq_(revisions_ran[0][1], (False, 1))
    # all others should have "succeeded" on that single one
    for rev, v in revisions_ran[1:]:
        eq_(v, (True, 1))

    # and on 4th run -- nothing new
    revisions_ran = runner.run()
    for rev, v in revisions_ran:
        eq_(v, (False, 0))

    # Now let's smoke test generation of the .rst files
    from vbench.reports import generate_rst_files
    rstdir = pjoin(TMP_DIR, 'sources')
    generate_rst_files(runner.benchmarks, DB_PATH, rstdir, """VERY LONG DESCRIPTION""")

    # Verify that it all looks close to the desired
    image_files = [basename(x) for x in glob(pjoin(rstdir, 'vbench/figures/*.png'))]
    target_image_files = [b.name + '.png' for b in runner.benchmarks]
    eq_(set(image_files), set(target_image_files))

    rst_files = [basename(x) for x in glob(pjoin(rstdir, 'vbench/*.rst'))]
    target_rst_files = [b.name + '.rst' for b in runner.benchmarks]
    eq_(set(rst_files), set(target_rst_files))

    module_files = [basename(x) for x in glob(pjoin(rstdir, '*.rst'))]
    target_module_files = list(set(['vb_' + b.module_name + '.rst' for b in runner.benchmarks]))
    eq_(set(module_files), set(target_module_files + ['index.rst']))

    #print TMP_DIR
    shutil.rmtree(TMP_DIR)
    shutil.rmtree(dirname(DB_PATH))

Example 36

Project: ores
Source File: test_statsd.py
View license
def test_statsd():
    class StatsClient:

        def __init__(self):
            self.messages = []

        def incr(self, name, count=1):
            self.messages.append(("INCR", name, count))

        def timing(self, name, duration):
            self.messages.append(("TIMING", name, duration))

        @contextmanager
        def pipeline(self):
            yield self

    fake_client = StatsClient()

    collector = Statsd(fake_client)
    collector.precache_request("foo", {"bar", "derp"}, 100)
    collector.scores_request("foo", {"bar"}, 50, 150)
    collector.datasources_extracted("foo", {"bar"}, 10, 25)
    collector.score_processed("foo", {"bar"}, 1.1)
    collector.score_timed_out("foo", {"bar"}, 15.1)
    collector.score_cache_miss("foo", "derp")
    collector.score_cache_hit("foo", "bar")
    collector.score_errored("foo", {"bar"})

    eq_(set(fake_client.messages) -
        {('TIMING', 'precache_request.foo.derp', 100000),
         ('TIMING', 'precache_request.foo.bar', 100000),
         ('TIMING', 'precache_request.foo', 100000),
         ('TIMING', 'precache_request', 100000),
         ('TIMING', 'scores_request.foo.bar.50', 150000),
         ('TIMING', 'scores_request.foo.bar', 150000),
         ('TIMING', 'scores_request.foo', 150000),
         ('TIMING', 'scores_request', 150000),
         ('INCR', 'revision_scored.foo.bar', 50),
         ('INCR', 'revision_scored.foo', 50),
         ('INCR', 'revision_scored', 50),
         ('TIMING', 'datasources_extracted.foo.bar.10', 25000),
         ('TIMING', 'datasources_extracted.foo.bar', 25000),
         ('TIMING', 'datasources_extracted.foo', 25000),
         ('TIMING', 'datasources_extracted', 25000),
         ('TIMING', 'score_processed.foo.bar', 1100.0),
         ('TIMING', 'score_processed.foo', 1100.0),
         ('TIMING', 'score_processed', 1100.0),
         ('TIMING', 'score_timed_out.foo.bar', 15100.0),
         ('TIMING', 'score_timed_out.foo', 15100.0),
         ('TIMING', 'score_timed_out', 15100.0),
         ('INCR', 'score_cache_miss.foo.derp', 1),
         ('INCR', 'score_cache_miss.foo', 1),
         ('INCR', 'score_cache_miss', 1),
         ('INCR', 'score_cache_hit.foo.bar', 1),
         ('INCR', 'score_cache_hit.foo', 1),
         ('INCR', 'score_cache_hit', 1),
         ('INCR', 'score_errored.foo.bar', 1),
         ('INCR', 'score_errored.foo', 1),
         ('INCR', 'score_errored', 1)},
        set())

Example 37

Project: ores
Source File: test_scoring_context.py
View license
def test_scoring_context():
    from revscoring.datasources import Datasource
    from revscoring.dependencies import Dependent
    from revscoring.features import Feature

    fake_data = Datasource("fake_data", lambda: "fake")
    len_func = Dependent("len_func")
    literal_fake = Dependent("literal_fake")
    characters = Feature("characters", lambda word, len: len(word),
                         returns=int,
                         depends_on=[fake_data, len_func])
    is_fake = Feature("is_fake", lambda word, fake: word == fake,
                      returns=bool,
                      depends_on=[fake_data, literal_fake])

    FakeExtractor = namedtuple("Extractor", ['extract', 'solve', 'language'])

    def fake_extract(rev_ids, dependents, caches=None):
        caches = caches if caches is not None else {}
        for rev_id in rev_ids:
            if rev_id % 5 != 0:
                cache = caches.get(rev_id, {})
                values = dependencies.solve(dependents,
                                            context={len_func: lambda: len},
                                            cache=cache)
                values = list(values)
                caches[rev_id] = cache
                yield None, values
            else:
                yield RuntimeError("extract"), None

    def fake_solve(dependents, cache=None):
        cache = cache if cache is not None else {}
        cache.update({len_func: len,
                      literal_fake: "fake"})
        return dependencies.solve(dependents, cache=cache)

    extractor = FakeExtractor(fake_extract, fake_solve, None)

    FakeScorerModel = namedtuple("FakeScorerModel",
                                 ['score', 'version', 'language', 'features'])
    scorer_model = FakeScorerModel(lambda fvs: {"prediction": "generated"},
                                   "1", None, [characters, is_fake])

    scoring_context = ScoringContext("fakewiki", {"fake": scorer_model},
                                     extractor)

    rev_ids = [1, 2, 3, 4, 5]
    root_ds_caches, errors = scoring_context.extract_root_dependency_caches(
        ["fake"], rev_ids)
    print(root_ds_caches)
    print(errors)
    eq_(len(root_ds_caches), 4)
    eq_(len(errors), 1)
    eq_(root_ds_caches[1][fake_data], "fake")
    assert 5 in errors

    score = scoring_context.process_model_scores(
        ["fake"], {characters: 10, is_fake: False})
    eq_(score['fake']['score']['prediction'], "generated")

Example 38

Project: revscoring
Source File: test_revision.py
View license
def test_item():
    eq_(solve(revision.datasources.item, cache={r_text: None}).claims, {})

    solve(revision.datasources.item, cache={r_text: ALAN_TEXT})

    eq_(pickle.loads(pickle.dumps(revision.datasources.item)),
        revision.datasources.item)

    eq_(solve(revision.properties, cache={r_text: ALAN_TEXT}), 57)
    eq_(solve(revision.datasources.properties,
              cache={r_text: ALAN_TEXT}).keys(),
        {'P1430', 'P906', 'P1816', 'P570', 'P31', 'P1343', 'P2021', 'P535',
         'P800', 'P569', 'P373', 'P1819', 'P108', 'P227', 'P185', 'P910',
         'P1273', 'P69', 'P244', 'P20', 'P101', 'P106', 'P18', 'P1563', 'P25',
         'P646', 'P1296', 'P214', 'P950', 'P463', 'P1006', 'P268', 'P21',
         'P1417', 'P22', 'P1207', 'P19', 'P91', 'P735', 'P1412', 'P166',
         'P269', 'P1741', 'P1196', 'P27', 'P140', 'P512', 'P1415', 'P691',
         'P345', 'P949', 'P1263', 'P549', 'P184', 'P935', 'P349', 'P213'})

    eq_(solve(revision.claims, cache={r_text: ALAN_TEXT}), 71)
    eq_(solve(revision.datasources.claims, cache={r_text: ALAN_TEXT}),
        {('P646', '/m/0n00'), ('P101', 'Q897511'), ('P20', 'Q2011497'),
         ('P166', 'Q10762848'), ('P800', 'Q20895949'), ('P950', 'XX945020'),
         ('P1816', 'mp18700'), ('P1563', 'Turing'),
         ('P569', '+1912-06-23T00:00:00Z'), ('P19', 'Q122744'),
         ('P691', 'jn19990008646'), ('P185', 'Q249984'), ('P1343', 'Q2627728'),
         ('P512', 'Q21578'), ('P69', 'Q2278254'), ('P101', 'Q21198'),
         ('P800', 'Q772056'), ('P108', 'Q230899'), ('P25', 'Q20895935'),
         ('P1263', '952/000023883'), ('P214', '41887917'),
         ('P1296', '0067958'), ('P106', 'Q82594'), ('P106', 'Q4964182'),
         ('P1273', 'a11455408'), ('P1412', 'Q1860'), ('P1207', 'n98045497'),
         ('P910', 'Q9384007'), ('P140', 'Q7066'), ('P1430', '368'),
         ('P69', 'Q924289'),
         ('P2021', 'WbQuantity(amount=5, upperBound=5, lowerBound=5, unit=1)'),
         ('P463', 'Q123885'), ('P166', 'Q15631401'),
         ('P373', 'Alan Turing'), ('P549', '8014'),
         ('P213', '0000 0001 1058 9902'), ('P1417', '609739'),
         ('P27', 'Q145'), ('P21', 'Q6581097'), ('P268', '12205670t'),
         ('P184', 'Q92741'), ('P1196', 'Q10737'), ('P244', 'n83171546'),
         ('P22', 'Q20895930'), ('P269', '030691621'), ('P1741', '226316'),
         ('P106', 'Q81096'), ('P935', 'Alan Turing'), ('P1006', '070580685'),
         ('P69', 'Q21578'), ('P227', '118802976'), ('P906', '254262'),
         ('P349', '00621580'), ('P535', '12651680'), ('P91', 'Q6636'),
         ('P106', 'Q11513337'), ('P345', 'nm6290133'), ('P31', 'Q5'),
         ('P570', '+1954-06-07T00:00:00Z'), ('P18', 'Alan Turing Aged 16.jpg'),
         ('P735', 'Q294833'), ('P512', 'Q230899'), ('P1343', 'Q17329836'),
         ('P1415', '101036578'), ('P106', 'Q170790'), ('P1819', 'I00586443'),
         ('P949', '000133188'), ('P19', 'Q20895942'), ('P800', 'Q20895966'),
         ('P108', 'Q220798')})
    eq_(solve(revision.aliases, cache={r_text: ALAN_TEXT}), 9)
    eq_(solve(revision.datasources.aliases, cache={r_text: ALAN_TEXT}),
        {'de': ['Alan Mathison Turing'], 'en': ['Alan Mathison Turing'],
         'fr': ['Alan Mathison Turing'], 'ru': ['Тьюринг, Алан'],
         'jbo': ['alan turin'], 'it': ['Alan Mathison Turing'],
         'ko': ['앨런 매티슨 튜링'],
         'be-tarask': ["Элан Т'юрынг", 'Алан Цюрынг', "Т'юрынг"],
         'ja': ['アラン・テューリング']})
    eq_(solve(revision.sources, cache={r_text: ALAN_TEXT}), 53)
    eq_(solve(revision.datasources.sources, cache={r_text: ALAN_TEXT}),
        {('P19', 'Q122744', 0), ('P570', '+1954-06-07T00:00:00Z', 1),
         ('P19', 'Q122744', 1), ('P570', '+1954-06-07T00:00:00Z', 2),
         ('P570', '+1954-06-07T00:00:00Z', 3), ('P535', '12651680', 0),
         ('P108', 'Q220798', 0), ('P214', '41887917', 0),
         ('P906', '254262', 0), ('P1273', 'a11455408', 0),
         ('P25', 'Q20895935', 0), ('P800', 'Q20895949', 0),
         ('P106', 'Q4964182', 0), ('P69', 'Q924289', 0),
         ('P214', '41887917', 1), ('P108', 'Q230899', 0),
         ('P21', 'Q6581097', 0), ('P18', 'Alan Turing Aged 16.jpg', 0),
         ('P269', '030691621', 0), ('P935', 'Alan Turing', 0),
         ('P214', '41887917', 2), ('P21', 'Q6581097', 1),
         ('P646', '/m/0n00', 0), ('P244', 'n83171546', 0),
         ('P244', 'n83171546', 1), ('P800', 'Q20895966', 0),
         ('P27', 'Q145', 0), ('P20', 'Q2011497', 0), ('P69', 'Q2278254', 0),
         ('P800', 'Q772056', 0), ('P1412', 'Q1860', 0), ('P106', 'Q82594', 0),
         ('P31', 'Q5', 2), ('P213', '0000 0001 1058 9902', 0),
         ('P569', '+1912-06-23T00:00:00Z', 3), ('P31', 'Q5', 1),
         ('P22', 'Q20895930', 0), ('P569', '+1912-06-23T00:00:00Z', 2),
         ('P227', '118802976', 1), ('P31', 'Q5', 0), ('P19', 'Q20895942', 0),
         ('P512', 'Q230899', 0), ('P512', 'Q21578', 0),
         ('P569', '+1912-06-23T00:00:00Z', 1), ('P227', '118802976', 0),
         ('P349', '00621580', 0), ('P569', '+1912-06-23T00:00:00Z', 0),
         ('P549', '8014', 0), ('P1196', 'Q10737', 0), ('P91', 'Q6636', 0),
         ('P268', '12205670t', 0), ('P570', '+1954-06-07T00:00:00Z', 0),
         ('P1563', 'Turing', 0)})
    eq_(solve(revision.qualifiers, cache={r_text: ALAN_TEXT}), 6)
    eq_(solve(revision.datasources.qualifiers, cache={r_text: ALAN_TEXT}),
        {('P1343', 'Q17329836', 'P854'), ('P1343', 'Q2627728', 'P854'),
        ('P69', 'Q2278254', 'P580'), ('P108', 'Q220798', 'P582'),
        ('P108', 'Q220798', 'P580'), ('P108', 'Q230899', 'P580')})
    eq_(solve(revision.badges, cache={r_text: ALAN_TEXT}), 5)
    eq_(solve(revision.datasources.badges, cache={r_text: ALAN_TEXT}),
        {'aswiki': ['Q17437798'], 'ruwiki': ['Q17437798'],
         'azwiki': ['Q17437796'], 'lawiki': ['Q17437796'],
         'enwiki': ['Q17437798']})
    eq_(solve(revision.labels, cache={r_text: ALAN_TEXT}), 126)
    eq_(solve(revision.datasources.labels, cache={r_text: ALAN_TEXT}),
        {'th': 'แอลัน ทัวริง', 'is': 'Alan Turing', 'ku': 'Alan Turing',
         'sgs': 'Alans Tiorėngs', 'ar': 'آلان تورنج', 'kk': 'Алан Тьюринг',
         'yue': '圖靈', 'ta': 'அலன் டூரிங்', 'cs': 'Alan Turing',
         'li': 'Alan Turing', 'bn': 'অ্যালান টুরিং', 'sl': 'Alan Turing',
         'gsw': 'Alan Turing', 'sv': 'Alan Turing', 'hif': 'Alan Turing',
         'en-gb': 'Alan Turing', 'en': 'Alan Turing', 'az': 'Alan Türinq',
         'ja': 'アラン・チューリング', 'oc': 'Alan Turing',
         'pt-br': 'Alan Turing', 'da': 'Alan Turing', 'ca': 'Alan Turing',
         'eo': 'Alan TURING', 'el': 'Άλαν Τούρινγκ', 'yi': 'עלן טיורינג',
         'nan': 'Alan Turing', 'sh': 'Alan Turing', 'as': 'এলান ট্যুৰিং',
         'hy': 'Ալան Թյուրինգ', 'fa': 'آلن تورینگ', 'en-ca': 'Alan Turing',
         'tr': 'Alan Turing', 'mn': 'Алан Матисон Тюринг',
         'he': 'אלן טיורינג', 'scn': 'Alan Turing', 'vo': 'Alan Turing',
         'yo': 'Alan Turing', 'et': 'Alan Turing', 'ur': 'ایلن تورنگ',
         'fo': 'Alan Turing', 'io': 'Alan Turing', 'ilo': 'Alan Turing',
         'ru': 'Алан Тьюринг', 'gl': 'Alan Turing', 'war': 'Alan Turing',
         'kn': 'ಅಲೆನ್ ಟ್ಯೂರಿಂಗ್', 'uz': 'Tyuring', 'de': 'Alan Turing',
         'zh-cn': '艾伦·图灵', 'la': 'Alanus Mathison Turing',
         'sk': 'Alan Mathison Turing', 'mk': 'Алан Тјуринг',
         'hr': 'Alan Turing', 'uk': 'Алан Тюрінг', 'pl': 'Alan Turing',
         'ro': 'Alan Turing', 'nl': 'Alan Turing', 'nb': 'Alan Turing',
         'br': 'Alan Turing', 'fr': 'Alan Turing', 'mt': 'Alan Turing',
         'it': 'Alan Turing', 'ce': 'Тьюринг, Алан',
         'te': 'అలాన్ ట్యూరింగ్\u200c', 'fi': 'Alan Turing',
         'pa': 'ਅਲਾਨ ਟੂਰਿੰਗ',
         'nn': 'Alan Turing', 'zh-hans': '艾伦·图灵', 'af': 'Alan Turing',
         'be': 'Алан Матысан Цьюрынг', 'ga': 'Alan Turing',
         'ckb': 'ئالان تیورینگ', 'es': 'Alan Turing', 'arz': 'الان تورينج',
         'new': 'एलेन त्युरिङ्ग', 'tt': 'Alan Tyuring', 'ht': 'Alan Turing',
         'cy': 'Alan Turing', 'mwl': 'Alan Turing', 'or': 'ଆଲାନ ଟ୍ୟୁରିଙ୍ଗ',
         'jbo': '.alan turin', 'ml': 'അലൻ ട്യൂറിംഗ്', 'sa': 'एलेन ट्यूरिंग',
         'bs': 'Alan Turing', 'tg': 'Алан Тюринг', 'ms': 'Alan Turing',
         'lv': 'Alans Tjūrings', 'fur': 'Alan Turing', 'sco': 'Alan Turing',
         'sah': 'Алан Матисон Тьюринг', 'lmo': 'Alan Turing',
         'mr': 'ॲलन ट्युरिंग', 'pnb': 'الان ٹورنگ', 'eu': 'Alan Turing',
         'zh': '艾伦·图灵', 'de-ch': 'Alan Turing', 'gu': 'ઍલન ટ્યુરિંગ',
         'gan': '圖靈', 'sw': 'Alan Turing', 'mg': 'Alan Turing',
         'be-tarask': 'Элан Т’юрынг', 'hu': 'Alan Turing',
         'lij': 'Alan Turing', 'an': 'Alan Turing', 'pt': 'Alan Turing',
         'pms': 'Alan Turing', 'gd': 'Alan Turing', 'lt': 'Alan Turing',
         'jv': 'Alan Turing', 'fy': 'Alan Turing', 'sq': 'Alan Turing',
         'ka': 'ალან ტიურინგი', 'vi': 'Alan Turing', 'sr': 'Алан Тјуринг',
         'pam': 'Alan Turing', 'ast': 'Alan Turing', 'co': 'Alanu Turing',
         'ko': '앨런 튜링', 'tl': 'Alan Turing', 'rue': 'Алан Тюрінґ',
         'lb': 'Alan M. Turing', 'id': 'Alan Turing', 'bg': 'Алън Тюринг',
         'ba': 'Алан Тьюринг', 'hi': 'एलेन ट्यूरिंग'})
    eq_(solve(revision.sitelinks, cache={r_text: ALAN_TEXT}), 134)
    eq_(solve(revision.datasources.sitelinks, cache={r_text: ALAN_TEXT}),
        {'mrwiki': 'ॲलन ट्युरिंग', 'warwiki': 'Alan Turing',
         'mkwiki': 'Алан Тјуринг', 'bawiki': 'Алан Тьюринг',
         'mnwiki': 'Алан Матисон Тюринг', 'mgwiki': 'Alan Turing',
         'tawiki': 'அலன் டூரிங்', 'yowiki': 'Alan Turing',
         'ttwiki': 'Alan Tyuring', 'ruewiki': 'Алан Тюрінґ',
         'gdwiki': 'Alan Turing', 'liwiki': 'Alan Turing', 'pamwiki':
         'Alan Turing', 'scnwiki': 'Alan Turing', 'scowiki': 'Alan Turing',
         'fowiki': 'Alan Turing', 'fywiki': 'Alan Turing',
         'bnwiki': 'অ্যালান টুরিং', 'jbowiki': '.alan turin',
         'guwiki': 'ઍલન ટ્યુરિંગ', 'knwiki': 'ಅಲೆನ್ ಟ್ಯೂರಿಂಗ್',
         'dewiki': 'Alan Turing', 'be_x_oldwiki': 'Элан Т’юрынг',
         'eswiki': 'Alan Turing', 'hrwiki': 'Alan Turing',
         'mwlwiki': 'Alan Turing', 'afwiki': 'Alan Turing',
         'sqwiki': 'Alan Turing', 'mtwiki': 'Alan Turing',
         'cawiki': 'Alan Turing', 'zh_min_nanwiki': 'Alan Turing',
         'trwiki': 'Alan Turing', 'hiwiki': 'एलेन ट्यूरिंग',
         'nlwiki': 'Alan Turing', 'cswikiquote': 'Alan Turing',
         'azwiki': 'Alan Türinq', 'kkwiki': 'Алан Тьюринг',
         'plwikiquote': 'Alan Turing', 'hywiki': 'Ալան Թյուրինգ',
         'cewiki': 'Тьюринг, Алан', 'nnwiki': 'Alan Turing',
         'ruwikiquote': 'Алан Матисон Тьюринг', 'tgwiki': 'Алан Тюринг',
         'commonswiki': 'Alan Turing', 'lawiki': 'Alanus Mathison Turing',
         'itwiki': 'Alan Turing', 'eowiki': 'Alan Turing',
         'dawiki': 'Alan Turing', 'kowiki': '앨런 튜링',
         'bewiki': 'Алан Матысан Цьюрынг', 'rowiki': 'Alan Turing',
         'ocwiki': 'Alan Turing', 'newwiki': 'एलेन त्युरिङ्ग',
         'lbwiki': 'Alan M. Turing', 'pawiki': 'ਅਲਾਨ ਟੂਰਿੰਗ',
         'enwikiquote': 'Alan Turing', 'hifwiki': 'Alan Turing',
         'mlwiki': 'അലൻ ട്യൂറിംഗ്', 'jawiki': 'アラン・チューリング',
         'viwiki': 'Alan Turing', 'htwiki': 'Alan Turing',
         'furwiki': 'Alan Turing', 'zhwikiquote': '艾伦·图灵',
         'lijwiki': 'Alan Turing', 'plwiki': 'Alan Turing',
         'vowiki': 'Alan Turing', 'bswiki': 'Alan Turing',
         'tewiki': 'అలాన్ ట్యూరింగ్\u200c', 'sawiki': 'एलेन ट्यूरिंग',
         'ptwiki': 'Alan Turing', 'urwiki': 'ایلن تورنگ',
         'arwiki': 'آلان تورنج', 'iswiki': 'Alan Turing',
         'huwiki': 'Alan Turing', 'tlwiki': 'Alan Turing',
         'uzwiki': 'Alan Tyuring', 'frwikiquote': 'Alan Turing',
         'zh_yuewiki': '圖靈', 'pnbwiki': 'الان ٹورنگ',
         'dewikiquote': 'Alan Turing', 'swwiki': 'Alan Turing',
         'itwikiquote': 'Alan Turing', 'lvwiki': 'Alans Tjūrings',
         'anwiki': 'Alan Turing', 'aswiki': 'এলান ট্যুৰিং',
         'arzwiki': 'الان تورينج', 'srwiki': 'Алан Тјуринг',
         'eswikiquote': 'Alan Mathison Turing', 'elwiki': 'Άλαν Τούρινγκ',
         'frwiki': 'Alan Turing', 'brwiki': 'Alan Turing',
         'fiwiki': 'Alan Turing', 'fawiki': 'آلن تورینگ',
         'ilowiki': 'Alan Turing', 'cswiki': 'Alan Turing',
         'kawiki': 'ალან ტიურინგი', 'yiwiki': 'עלן טיורינג',
         'gawiki': 'Alan Turing', 'skwiki': 'Alan Turing',
         'shwiki': 'Alan Turing', 'sahwiki': 'Тьюринг Алан Матисон',
         'ukwiki': 'Алан Тюрінг', 'bat_smgwiki': 'Alans Tiorėngs',
         'hewiki': 'אלן טיורינג', 'enwiki': 'Alan Turing',
         'bgwiki': 'Алън Тюринг', 'svwiki': 'Alan Turing',
         'orwiki': 'ଆଲାନ ଟ୍ୟୁରିଙ୍ଗ', 'lmowiki': 'Alan Turing',
         'glwiki': 'Alan Turing', 'mswiki': 'Alan Turing',
         'zhwiki': '艾伦·图灵', 'alswiki': 'Alan Turing',
         'etwiki': 'Alan Turing', 'jvwiki': 'Alan Turing',
         'hewikiquote': 'אלן טיורינג', 'astwiki': 'Alan Turing',
         'kuwiki': 'Alan Turing', 'cywikiquote': 'Alan Turing',
         'idwiki': 'Alan Turing', 'thwiki': 'แอลัน ทัวริง',
         'pmswiki': 'Alan Turing', 'ruwiki': 'Тьюринг, Алан',
         'iowiki': 'Alan Turing', 'nowiki': 'Alan Turing',
         'cywiki': 'Alan Turing', 'euwiki': 'Alan Turing',
         'ltwiki': 'Alan Turing', 'cawikiquote': 'Alan Turing',
         'simplewiki': 'Alan Turing', 'cowiki': 'Alanu Turing',
         'ganwiki': '圖靈', 'ckbwiki': 'ئالان تیورینگ', 'slwiki': 'Alan Turing'})
    eq_(solve(revision.descriptions, cache={r_text: ALAN_TEXT}), 22)
    eq_(solve(revision.datasources.descriptions, cache={r_text: ALAN_TEXT}),
        {'da': 'britisk informatiker, matematiker og ingeniør',
         'ko': '영국의 수학자, 논리학자, 암호해독학자, 컴퓨터 과학자',
         'it': 'matematico, logico e crittografo britannico',
         'fr': 'mathématicien britannique',
         'nn': 'britisk informatikar, matematikar og ingeniør',
         'gl': 'matemático, filósofo e criptógrafo británico',
         'pam': 'Computer scientist, mathematician, and cryptographer',
         'nl': 'Brits wiskundige',
         'de': 'britischer Logiker, Mathematiker und Kryptoanalytiker',
         'zh-cn': '英国数学家,逻辑学家,密码学家和计算机科学家',
         'en': 'British mathematician, logician, cryptanalyst, and computer ' +
               'scientist',
         'as': 'Computer scientist, mathematician, and cryptographer',
         'zh': '英国数学家,逻辑学家,密码学家和计算机科学家',
         'ru': 'английский математик, логик, криптограф',
         'pl': 'angielski matematyk',
         'sv': 'brittisk datavetare, matematiker och ingenjör',
         'es': 'matemático, filósofo y criptógrafo británico',
         'sk': 'britský matematik, logik, kryptograf a vojnový hrdina',
         'ilo': 'Britaniko a matematiko, lohiko, kriptoanalista, ken ' +
                'sientista ti kompiuter',
         'zh-hans': '英国数学家,逻辑学家,密码学家和计算机科学家',
         'fa': 'دانشمند کامپیوتر، رمزشکن، منطق\u200cدان و ریاضی' +
               '\u200cدان بریتانیایی',
         'nb': 'britisk informatiker, matematiker og ingeniør'})

Example 39

Project: revscoring
Source File: test_tokenized.py
View license
def test_diff():
    diff = revision.diff

    cache = {p_text: "This is some tokens text with TOKENS.",
             r_text: "This is some TOKENS text tokens tokens!"}

    eq_(solve(diff.datasources.token_delta, cache=cache),
        {'tokens': 1, 'with': -1, '.': -1, '!': 1})
    eq_(solve(diff.datasources.token_prop_delta, cache=cache),
        {'tokens': 1 / 2, 'with': -1, '.': -1, '!': 1})
    eq_(round(solve(diff.token_prop_delta_sum, cache=cache), 2), -0.5)
    eq_(round(solve(diff.token_prop_delta_increase, cache=cache), 2), 1.5)
    eq_(round(solve(diff.token_prop_delta_decrease, cache=cache), 2), -2.0)

    eq_(solve(diff.datasources.word_delta, cache=cache),
        {'tokens': 1, 'with': -1})
    eq_(solve(diff.datasources.word_prop_delta, cache=cache),
        {'tokens': 1 / 3, 'with': -1})
    eq_(round(solve(diff.word_prop_delta_sum, cache=cache), 2), -0.67)
    eq_(round(solve(diff.word_prop_delta_increase, cache=cache), 2), 0.33)
    eq_(round(solve(diff.word_prop_delta_decrease, cache=cache), 2), -1.0)

    eq_(solve(diff.datasources.uppercase_word_delta, cache=cache),
        {})
    eq_(solve(diff.datasources.uppercase_word_prop_delta, cache=cache),
        {})
    eq_(round(solve(diff.uppercase_word_prop_delta_sum, cache=cache), 2), 0)
    eq_(round(solve(diff.uppercase_word_prop_delta_increase, cache=cache), 2),
        0)
    eq_(round(solve(diff.uppercase_word_prop_delta_decrease, cache=cache), 2),
        0)

    cache = {p_text: "This is 45 72 tokens 23 72.",
             r_text: "This is 45 72 hats pants 85 72 72."}
    eq_(solve(diff.datasources.number_delta, cache=cache),
        {'72': 1, '23': -1, '85': 1})
    eq_(solve(diff.datasources.number_prop_delta, cache=cache),
        {'72': 1 / 3, '23': -1, '85': 1})
    eq_(round(solve(diff.number_prop_delta_sum, cache=cache), 2), 0.33)
    eq_(round(solve(diff.number_prop_delta_increase, cache=cache), 2), 1.33)
    eq_(round(solve(diff.number_prop_delta_decrease, cache=cache), 2), -1.0)

    eq_(pickle.loads(pickle.dumps(diff.token_delta_sum)),
        diff.token_delta_sum)
    eq_(pickle.loads(pickle.dumps(diff.token_delta_increase)),
        diff.token_delta_increase)
    eq_(pickle.loads(pickle.dumps(diff.token_delta_decrease)),
        diff.token_delta_decrease)

    eq_(pickle.loads(pickle.dumps(diff.token_prop_delta_sum)),
        diff.token_prop_delta_sum)
    eq_(pickle.loads(pickle.dumps(diff.token_prop_delta_increase)),
        diff.token_prop_delta_increase)
    eq_(pickle.loads(pickle.dumps(diff.token_prop_delta_decrease)),
        diff.token_prop_delta_decrease)

    eq_(pickle.loads(pickle.dumps(diff.number_delta_sum)),
        diff.number_delta_sum)
    eq_(pickle.loads(pickle.dumps(diff.number_delta_increase)),
        diff.number_delta_increase)
    eq_(pickle.loads(pickle.dumps(diff.number_delta_decrease)),
        diff.number_delta_decrease)

    eq_(pickle.loads(pickle.dumps(diff.number_prop_delta_sum)),
        diff.number_prop_delta_sum)
    eq_(pickle.loads(pickle.dumps(diff.number_prop_delta_increase)),
        diff.number_prop_delta_increase)
    eq_(pickle.loads(pickle.dumps(diff.number_prop_delta_decrease)),
        diff.number_prop_delta_decrease)

Example 40

Project: revscoring
Source File: test_stopwords.py
View license
def test_stopwords():
    cache = {p_text: "My hat is the king of France.",
             r_text: "My waffle is the king of Normandy and the king of York."}

    eq_(solve(my_stops.revision.datasources.stopwords, cache=cache),
        ['My', 'is', 'the', 'of', 'and', 'the', 'of'])
    eq_(solve(my_stops.revision.parent.datasources.stopwords, cache=cache),
        ['My', 'is', 'the', 'of'])

    eq_(solve(my_stops.revision.datasources.non_stopwords, cache=cache),
        ['waffle', 'king', 'Normandy', 'king', 'York'])
    eq_(solve(my_stops.revision.parent.datasources.non_stopwords, cache=cache),
        ['hat', 'king', 'France'])

    eq_(solve(my_stops.revision.datasources.stopword_frequency, cache=cache),
        {'my': 1, 'is': 1, 'the': 2, 'and': 1, 'of': 2})
    eq_(solve(my_stops.revision.datasources.non_stopword_frequency,
        cache=cache),
        {'waffle': 1, 'king': 2, 'normandy': 1, 'york': 1})
    eq_(solve(my_stops.revision.parent.datasources.stopword_frequency,
        cache=cache),
        {'my': 1, 'is': 1, 'the': 1, 'of': 1})
    eq_(solve(my_stops.revision.parent.datasources.non_stopword_frequency,
        cache=cache),
        {'hat': 1, 'king': 1, 'france': 1})

    diff = my_stops.revision.diff
    eq_(solve(diff.datasources.stopword_delta, cache=cache),
        {'of': 1, 'the': 1, 'and': 1})
    pd = solve(diff.datasources.stopword_prop_delta, cache=cache)
    eq_(pd.keys(), {'of', 'the', 'and'})
    eq_(round(pd['of'], 2), 0.50)
    eq_(round(pd['the'], 2), 0.50)
    eq_(round(pd['and'], 2), 1)

    eq_(solve(diff.datasources.non_stopword_delta, cache=cache),
        {'hat': -1, 'waffle': 1, 'king': 1, 'normandy': 1, 'york': 1,
         'france': -1})
    pd = solve(diff.datasources.non_stopword_prop_delta, cache=cache)
    eq_(pd.keys(), {'hat', 'waffle', 'king', 'normandy', 'york', 'france'})
    eq_(round(pd['hat'], 2), -1)
    eq_(round(pd['waffle'], 2), 1)
    eq_(round(pd['king'], 2), 0.50)
    eq_(round(pd['normandy'], 2), 1)
    eq_(round(pd['york'], 2), 1)

    eq_(solve(my_stops.revision.stopwords, cache=cache), 7)
    eq_(solve(my_stops.revision.parent.stopwords, cache=cache), 4)
    eq_(solve(my_stops.revision.non_stopwords, cache=cache), 5)
    eq_(solve(my_stops.revision.parent.non_stopwords, cache=cache), 3)

    eq_(solve(diff.stopword_delta_sum, cache=cache), 3)
    eq_(solve(diff.stopword_delta_increase, cache=cache), 3)
    eq_(solve(diff.stopword_delta_decrease, cache=cache), 0)
    eq_(solve(diff.non_stopword_delta_sum, cache=cache), 2)
    eq_(solve(diff.non_stopword_delta_increase, cache=cache), 4)
    eq_(solve(diff.non_stopword_delta_decrease, cache=cache), -2)

    eq_(round(solve(diff.stopword_prop_delta_sum, cache=cache), 2), 2)
    eq_(round(solve(diff.stopword_prop_delta_increase, cache=cache), 2), 2)
    eq_(round(solve(diff.stopword_prop_delta_decrease, cache=cache), 2), 0)
    eq_(round(solve(diff.non_stopword_prop_delta_sum, cache=cache), 2), 1.5)
    eq_(round(solve(diff.non_stopword_prop_delta_increase, cache=cache), 2),
        3.5)
    eq_(round(solve(diff.non_stopword_prop_delta_decrease, cache=cache), 2),
        -2)

Example 41

Project: wikiclass
Source File: test_enwiki.py
View license
def test_extractor():

    Revision = namedtuple("Revisions", ['id', 'timestamp', 'sha1', 'text'])

    class Page:

        def __init__(self, title, namespace, revisions):
            self.title = title
            self.namespace = namespace
            self.revisions = revisions

        def __iter__(self):
            return iter(self.revisions)

    revisions = [
        Revision(
            1, Timestamp(0), "aaa",
            "{{talk page}}{{WikiProject Medicine|class=Stub}}..."
        ),
        Revision(
            2, Timestamp(1), "bbb",
            "{{talk page}}{{WikiProject Medicine|class=B}}..."
        ),
        Revision(
            3, Timestamp(2), "aaa",
            "{{talk page}}{{WikiProject Medicine|class=Stub<!--" +
            " test HTML comment -->}}..."
        ),
        Revision(
            4, Timestamp(3), "ccc",
            "{{talk page}}{{WikiProject Medicine|class=C}}..."
        ),
        Revision(
            5, Timestamp(4), "aaa",
            "{{talk page}}{{WikiProject Medicine|class=Stub}}..."
        ),
        Revision(
            6, Timestamp(4), "ccc",
            "{{talk page}}{{WikiProject Medicine|class= C}}..."
        ),
        Revision(
            7, Timestamp(5), "ddd",
            "{{talk page}}{{WikiProject Medicine|class=B}}\n" +
            "{{WP_Hats|class=B}}..."
        )
    ]
    page = Page("Foobar", 1, revisions)

    observations = enwiki.extract(page)
    project_labels = {(ob['project'], ob['wp10']): ob
                      for ob in observations}

    expected = [("medicine", "stub", Timestamp(0)),
                ("medicine", "c", Timestamp(3)),
                ("medicine", "b", Timestamp(5)),
                ("hats", "b", Timestamp(5))]

    print(project_labels)
    for proj, lab, timestamp in expected:
        ob = project_labels[(proj, lab)]
        eq_(ob['timestamp'], timestamp)

Example 42

Project: wikiclass
Source File: test_frwiki.py
View license
def test_extractor():

    Revision = namedtuple("Revisions", ['id', 'timestamp', 'sha1', 'text'])

    class Page:

        def __init__(self, title, namespace, revisions):
            self.title = title
            self.namespace = namespace
            self.revisions = revisions

        def __iter__(self):
            return iter(self.revisions)

    revisions = [
        Revision(
            1, Timestamp(0), "aaa",
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|avancement=e}}"
        ),
        Revision(
            2, Timestamp(1), "bbb",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|avancement=AdQ}}"
        ),
        Revision(
            3, Timestamp(2), "aaa",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|avancement=e}}"
        ),
        Revision(
            4, Timestamp(3), "ccc",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement=Ébauche}}"
        ),
        Revision(
            5, Timestamp(4), "aaa",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|avancement=e}}"
        ),
        Revision(
            6, Timestamp(4), "ccc",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement=Ébauche}}"
        ),
        Revision(
            7, Timestamp(5), "ddd",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement= bd }}"
        ),
        Revision(
            8, Timestamp(6), "eee",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement= Bon début }}"
        ),
        Revision(
            9, Timestamp(6), "eee",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement= b }}"
        ),
        Revision(
            10, Timestamp(7), "fff",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement= a }}"
        ),
        Revision(
            11, Timestamp(8), "fff",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement= ba }}"
        ),
        Revision(
            12, Timestamp(9), "fff",
            "{{talk page}}" +
            "{{Wikiprojet\n" +
            "|Seconde Guerre mondiale|maximum\n" +
            "|Japon|maximum\n" +
            "|Forces armées des États-Unis|maximum\n" +
            "|Nucléaire|maximum\n" +
            "|Sélection transversale|faible\n" +
            "|avancement= AdQ }}"
        )
    ]
    page = Page("Foobar", 1, revisions)

    observations = frwiki.extract(page)
    project_labels = {(ob['project'], ob['wp10']): ob
                      for ob in observations}

    expected = [("wikiprojet", "e", Timestamp(0)),
                ("wikiprojet", "bd", Timestamp(5)),
                ("wikiprojet", "b", Timestamp(6)),
                ("wikiprojet", "a", Timestamp(7)),
                ("wikiprojet", "ba", Timestamp(8)),
                ("wikiprojet", "adq", Timestamp(9))]

    print(project_labels)
    for proj, lab, timestamp in expected:
        ob = project_labels[(proj, lab)]
        eq_(ob['timestamp'], timestamp)

Example 43

Project: wikiclass
Source File: test_ruwiki.py
View license
def test_extractor():

    Revision = namedtuple("Revisions", ['id', 'timestamp', 'sha1', 'text'])

    class Page:

        def __init__(self, title, namespace, revisions):
            self.title = title
            self.namespace = namespace
            self.revisions = revisions

        def __iter__(self):
            return iter(self.revisions)

    revisions = [
        Revision(
            1, Timestamp(0), "aaa",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=IV\n<!-- HTML test comment -->}}"
        ),
        Revision(
            2, Timestamp(1), "bbb",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=III}}"
        ),
        Revision(
            3, Timestamp(2), "aaa",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=IV<!-- HTML test comment -->}}"
        ),
        Revision(
            4, Timestamp(3), "bbb",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=III}}"
        ),
        Revision(
            5, Timestamp(4), "ccc",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=II}}"
        ),
        Revision(
            6, Timestamp(5), "bbb",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=III}}"
        ),
        Revision(
            7, Timestamp(6), "ccc",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=II}}"
        ),
        Revision(
            8, Timestamp(7), "ddd",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=I}}"
        ),
        Revision(
            9, Timestamp(8), "eee",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=ХС}}"
        ),
        Revision(
            10, Timestamp(9), "fff",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=дс}}"
        ),
        Revision(
            11, Timestamp(10), "eee",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=ХС}}"
        ),
        Revision(
            12, Timestamp(11), "fff",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=дс}}"
        ),
        Revision(
            13, Timestamp(12), "ggg",
            "{{Статья проекта WikiProject\n" +
            "|важность=высшая\n" +
            "|уровень=ИС<!-- HTML test comment -->}}"
        )
    ]
    page = Page("Foobar", 1, revisions)

    observations = ruwiki.extract(page)
    project_labels = {(ob['project'], ob['wp10']):
                      ob for ob in observations}

    expected = [("wikiproject", "IV", Timestamp(0)),
                ("wikiproject", "III", Timestamp(1)),
                ("wikiproject", "II", Timestamp(4)),
                ("wikiproject", "I", Timestamp(7)),
                ("wikiproject", "ХС", Timestamp(8)),
                ("wikiproject", "ДС", Timestamp(9)),
                ("wikiproject", "ИС", Timestamp(12))]

    print(project_labels)
    for proj, lab, timestamp in expected:
        ob = project_labels[(proj, lab)]
        eq_(ob['timestamp'], timestamp)

Example 44

Project: bigmler
Source File: reify_steps.py
View license
def i_check_output_file(step, output=None, check_file=None):
    if check_file is None or output is None:
        assert False
    check_file = res_filename(check_file)
    output_file = os.path.join(world.directory, "reify.py")
    with open(check_file, open_mode("r")) as check_file_handler:
        check_contents = check_file_handler.read().strip("\n")
        check_contents_lines = check_contents.split("\n")
        for index, line in enumerate(check_contents_lines):
            if line:
                check_contents_lines[index] = INDENT + line
        check_contents = "\n".join(check_contents_lines)
    # remove unicode mark for strings if Python3
    if PYTHON3:
        check_contents = check_contents.replace( \
            " u'", " '").replace("{u'", "{'").replace( \
            ' u"', ' "').replace('u\\\'', '\\\'')
    with open(output_file, open_mode("r")) as output_file:
        output_file_contents = output_file.read()
    #strip comments at the beginning of the file
    output_file_contents = re.sub(r'#!.*def\smain\(\):\n', '',
                                  output_file_contents,
                                  flags=re.S).strip("\n")
    output_file_contents = output_file_contents.replace( \
        '\n\nif __name__ == "__main__":\n    main()', '')

    #strip internally added project id information
    prefix = "" if PYTHON3 else "u"
    p_str = r',\s\\\n%s\{\'project\':\s%s\'project/[a-f0-9]{24}\'\}\)' \
        % (INDENT * 2, prefix)
    output_file_contents = re.sub(p_str,
                                  ')', output_file_contents,
                                  flags=re.S).strip("\n")
    p_str = r',\s\\\n%s\s\'project\':\s%s\'project/[a-f0-9]{24}\'\}\)' \
        % (INDENT * 2, prefix)
    output_file_contents = re.sub(p_str,
                                  ')', output_file_contents,
                                  flags=re.S).strip("\n")
    p_str = r',\n%s\s\'project\':\s%s\'project/[a-f0-9]{24}\'\}\)' \
        % (INDENT * 2, prefix)
    output_file_contents = re.sub(p_str,
                                  '})', output_file_contents,
                                  flags=re.S).strip("\n")
    p_str = r',\s\'project\':\s%s\'project/[a-f0-9]{24}\'' % prefix
    output_file_contents = re.sub(p_str,
                                  '', output_file_contents,
                                  flags=re.S).strip("\n")
    if check_contents == output_file_contents:
        assert True
    else:
        if PYTHON3:
            # look for an alternative in PYTHON3
            check_contents = python3_contents(check_file, check_contents)
            if check_contents != output_file_contents:
                check_contents = python3_contents(
                    check_file, check_contents, alternative="_1")
        eq_(check_contents, output_file_contents)

Example 45

Project: redis-shard
Source File: test_pipeline.py
View license
    def test_pipeline(self):
        self.client.set('test', '1')
        pipe = self.client.pipeline()
        pipe.set('test', '2')
        pipe.zadd('testzset', 'first', 1)
        pipe.zincrby('testzset', 'first')
        pipe.zadd('testzset', 'second', 2)
        pipe.execute()
        pipe.reset()
        eq_(self.client.get('test'), b'2')
        eq_(self.client.zscore('testzset', 'first'), 2.0)
        eq_(self.client.zscore('testzset', 'second'), 2.0)

        with self.client.pipeline() as pipe:
            pipe.set('test', '3')
            pipe.zadd('testzset', 'first', 4)
            pipe.zincrby('testzset', 'first')
            pipe.zadd('testzset', 'second', 5)
            pipe.execute()
        eq_(self.client.get('test'), b'3')
        eq_(self.client.zscore('testzset', 'first'), 5.0)
        eq_(self.client.zscore('testzset', 'second'), 5.0)

        with self.client.pipeline() as pipe:
            pipe.watch('test')
            eq_(self.client.get('test'), b'3')
            pipe.multi()
            pipe.incr('test')
            eq_(pipe.execute(), [4])
        eq_(self.client.get('test'), b'4')

        with self.client.pipeline() as pipe:
            pipe.watch('test')
            pipe.multi()
            pipe.incr('test')
            self.client.decr('test')
            self.assertRaises(WatchError, pipe.execute)
        eq_(self.client.get('test'), b'3')

        keys_of_names = {}
        with self.client.pipeline() as pipe:
            for key in xrange(100):
                key = str(key)
                name = pipe.shard_api.get_server_name(key)
                if name not in keys_of_names:
                    keys_of_names[name] = key
                else:
                    key1 = key
                    key2 = keys_of_names[name]

                    pipe.watch(key1, key2)
                    pipe.multi()
                    pipe.set(key1, 1)
                    pipe.set(key2, 2)
                    pipe.execute()

                    eq_(self.client.get(key1), b'1')
                    eq_(self.client.get(key2), b'2')
                    break

Example 46

Project: NeuroM
Source File: test_neurolucida.py
View license
def test__flatten_section():
    #[X, Y, Z, R, TYPE, ID, PARENT_ID]
    subsection = [['0', '0', '0', '0'],
                  ['1', '1', '1', '1'],
                  ['2', '2', '2', '2'],
                  ['3', '3', '3', '3'],
                  ['4', '4', '4', '4'],
                  'Generated',
                  ]
    ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
    #correct parents
    ok_(np.allclose(ret[:, COLS.P], np.arange(-1, 4)))
    ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 5)))

    subsection = [['-1', '-1', '-1', '-1'],
                  [['0', '0', '0', '0'],
                   ['1', '1', '1', '1'],
                   ['2', '2', '2', '2'],
                   ['3', '3', '3', '3'],
                   ['4', '4', '4', '4'],
                   '|',
                   ['1', '2', '3', '4'],
                   ['1', '2', '3', '4'],
                   ['1', '2', '3', '4'],
                   ['1', '2', '3', '4'],
                   ['1', '2', '3', '4'], ]
                  ]
    ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
    #correct parents
    eq_(ret[0, COLS.P], -1.)
    eq_(ret[1, COLS.P], 0.0)
    eq_(ret[6, COLS.P], 0.0)
    ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 11))) #correct ID

    #Try a non-standard bifurcation, ie: missing '|' separator
    subsection = [['-1', '-1', '-1', '-1'],
                  [['0', '0', '0', '0'],
                   ['1', '1', '1', '1'], ]
                  ]
    ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
    eq_(ret.shape, (3, 7))

    #try multifurcation
    subsection = [['-1', '-1', '-1', '-1'],
                  [['0', '0', '0', '0'],
                   ['1', '1', '1', '1'],
                   '|',
                   ['2', '2', '2', '2'],
                   ['3', '3', '3', '3'],
                   '|',
                   ['4', '4', '4', '4'],
                   ['5', '5', '5', '5'], ]
                  ]
    ret = np.array([row for row in nasc._flatten_subsection(subsection, 0, offset=0, parent=-1)])
    #correct parents
    eq_(ret[0, COLS.P], -1.)
    eq_(ret[1, COLS.P], 0.0)
    eq_(ret[3, COLS.P], 0.0)
    eq_(ret[5, COLS.P], 0.0)
    ok_(np.allclose(ret[:, COLS.ID], np.arange(0, 7))) #correct ID

Example 47

Project: ibus-bogo
Source File: test_utils.py
View license
def test_separate():
    eq_(separate(''), ['', '', ''])

    eq_(separate('a'), ['', 'a', ''])
    eq_(separate('b'), ['b', '', ''])

    eq_(separate('aa'), ['', 'aa', ''])
    eq_(separate('ae'), ['', 'ae', ''])

    eq_(separate('bb'), ['bb', '', ''])
    eq_(separate('bc'), ['bc', '', ''])

    eq_(separate('ba'), ['b', 'a', ''])
    eq_(separate('baa'), ['b', 'aa', ''])
    eq_(separate('bba'), ['bb', 'a', ''])
    eq_(separate('bbaa'), ['bb', 'aa', ''])

    eq_(separate('bac'), ['b', 'a', 'c'])
    eq_(separate('baac'), ['b', 'aa', 'c'])
    eq_(separate('bbac'), ['bb', 'a', 'c'])
    eq_(separate('bbaacc'), ['bb', 'aa', 'cc'])

    eq_(separate('baca'), ['bac', 'a', ''])
    eq_(separate('bacaa'), ['bac', 'aa', ''])
    eq_(separate('bacaacaeb'), ['bacaac', 'ae', 'b'])

    eq_(separate('long'), ['l', 'o', 'ng'])
    eq_(separate('HoA'), ['H', 'oA', ''])
    eq_(separate('TruoNg'), ['Tr', 'uo', 'Ng'])
    eq_(separate('QuyÊn'), ['Qu', 'yÊ', 'n'])
    eq_(separate('Trùng'), ['Tr', 'ù', 'ng'])
    eq_(separate('uông'), ['', 'uô', 'ng'])
    eq_(separate('giƯờng'), ['gi', 'Ườ', 'ng'])
    eq_(separate('gi'), ['g', 'i', ''])
    eq_(separate('aoe'), ['', 'aoe', ''])
    eq_(separate('uo'), ['', 'uo', ''])
    eq_(separate('uong'), ['', 'uo', 'ng'])
    eq_(separate('nhếch'), ['nh', 'ế', 'ch'])
    eq_(separate('ếch'), ['', 'ế', 'ch'])
    eq_(separate('xẻng'), ['x', 'ẻ', 'ng'])
    eq_(separate('xoáy'), ['x', 'oáy', ''])
    eq_(separate('quây'), ['qu', 'ây', ''])

Example 48

Project: gh-mirror
Source File: tests.py
View license
@patch("ghm.urlopen")
def test_repos(urlopen_mock):
    files = [open(relative("output/repos/%d.json" % i), "rb") for i in range(1, 3)]
    urlopen_mock.side_effect = files

    repos = ghm.repos("!?")

    eq_(
        list(repos),
        [{'description': 'An Apache handler to dynamically generate ETags.',
          'url': 'git://github.com/cdown/Apache-ETag.git'},
         {'description': 'Simple AUR download utility.',
          'url': 'git://github.com/cdown/aurdl.git'},
         {'description': 'Simple AUR search utility.',
          'url': 'git://github.com/cdown/aursc.git'},
         {'description': 'Simple battery status tool in POSIX sh.',
          'url': 'git://github.com/cdown/bats.git'},
         {'description': 'Minimalist configuration management.',
          'url': 'git://github.com/cdown/ceci.git'},
         {'description': 'Personal ceci configs.',
          'url': 'git://github.com/cdown/ceci-configs.git'},
         {'description': 'Battery status in the console (Linux only).',
          'url': 'git://github.com/cdown/cellout.git'},
         {'description': 'Simple "correct horse battery staple"-style generator.',
          'url': 'git://github.com/cdown/chbs.git'},
         {'description': 'A tiny wrapper around the Arch checkupdates script for use in a cronjob.',
          'url': 'git://github.com/cdown/checkupdates-cron.git'},
         {'description': 'Simple URL shortener.',
          'url': 'git://github.com/cdown/chopurl.git'},
         {'description': 'The code powering chrisdown.name.',
          'url': 'git://github.com/cdown/chrisdown.name.git'},
         {'description': 'Concise IRC client library for node.js.',
          'url': 'git://github.com/cdown/circl.git'},
         {'description': 'Digital signage suite (deprecated, use osmo instead).',
          'url': 'git://github.com/cdown/clarity.git'},
         {'description': 'Personal configuration files.',
          'url': 'git://github.com/cdown/dotfiles.git'},
         {'description': 'Downloads the original individual images from Flickr sets.',
          'url': 'git://github.com/cdown/download-flickr-set.git'},
         {'description': 'Dynamic window manager for X.',
          'url': 'git://github.com/cdown/dwm.git'},
         {'description': 'Databaseless digital signage client (deprecated: use osmo instead).',
          'url': 'git://github.com/cdown/elision.git'},
         {'description': 'Simple URL shortener with support for custom identities.',
          'url': 'git://github.com/cdown/fwd.git'},
         {'description': 'Mirror all GitHub repositories for a user, maintaining metadata.',
          'url': 'git://github.com/cdown/gh-mirror.git'},
         {'description': 'Gets direct URLs to images from imgur albums.',
          'url': 'git://github.com/cdown/imurl.git'},
         {'description': "A sane initscript template. Ideally, edit two variables and you're done.",
          'url': 'git://github.com/cdown/initscript-template.git'},
         {'description': 'Remove mouse acceleration on Mac OSX.',
          'url': 'git://github.com/cdown/mac-cel.git'},
         {'description': 'dmenu frontend to MPD.',
          'url': 'git://github.com/cdown/mpdmenu.git'},
         {'description': 'Script to migrate OpenVZ containers to LXC.',
          'url': 'git://github.com/cdown/openvz-to-lxc.git'},
         {'description': 'Digital signage for minimalists.',
          'url': 'git://github.com/cdown/osmo.git'},
         {'description': 'Simple CLI Pushover interface.',
          'url': 'git://github.com/cdown/pushover-cli.git'},
         {'description': 'Arch User Repository interface.',
          'url': 'git://github.com/cdown/pyaur.git'},
         {'description': 'Really awesome deployment on your local machines.',
          'url': 'git://github.com/cdown/rad.git'},
         {'description': 'Web interface to RADIUS.',
          'url': 'git://github.com/cdown/radiusweb.git'},
         {'description': 'Rebuild a Debian ISO with preseed/custom files.',
          'url': 'git://github.com/cdown/rebuild-debian-iso.git'},
         {'description': 'Gets direct URLs to streaming media from SoundCloud song pages. ',
          'url': 'git://github.com/cdown/scurl.git'},
         {'description': 'IRC bot to help finding a TF2 merc/ringer.',
          'url': 'git://github.com/cdown/tf2mercbot.git'},
         {'description': 'Set the system timezone based on IP geolocation.',
          'url': 'git://github.com/cdown/tzupdate.git'},
         {'description': 'Automatically log in to captive portals.',
          'url': 'git://github.com/cdown/wifilogin.git'},
         {'description': 'Get direct URLs to YouTube videos.',
          'url': 'git://github.com/cdown/yturl.git'}],
    )

Example 49

Project: inferno
Source File: test_rule.py
View license
    def test_keysets(self):
#        # no key sets
#        rule = InfernoRule()
#        eq_(rule.params.keysets, {})

        # one key set
        rule = InfernoRule(
            key_parts=['id'],
            value_parts=['count'],
            table='some_table',
            column_mappings={'id': 'some_id'})
        keysets = {
            '_default': {
                'column_mappings': {'id': 'some_id'},
                'table': 'some_table',
                'value_parts': ['count'],
                'key_parts': ['_keyset', 'id'],
                'parts_preprocess': [],
                'parts_postprocess': []}}
        eq_(rule.params.keysets, keysets)

        # many key sets
        rule = InfernoRule(
            keysets={
                'keyset1': Keyset(
                    key_parts=['id1'],
                    value_parts=['count1'],
                    column_mappings={'id1': 'some_id1'},
                    table='some_table1'),
                'keyset2': Keyset(
                    key_parts=['id2'],
                    value_parts=['count2'],
                    column_mappings={'id2': 'some_id2'},
                    table='some_table2')})
        keysets = {
            'keyset1': {
                'column_mappings': {'id1': 'some_id1'},
                'table': 'some_table1',
                'value_parts': ['count1'],
                'key_parts': ['_keyset', 'id1'],
                'parts_preprocess': [],
                'parts_postprocess': [],
            },
            'keyset2': {
                'column_mappings': {'id2': 'some_id2'},
                'table': 'some_table2',
                'value_parts': ['count2'],
                'key_parts': ['_keyset', 'id2'],
                'parts_preprocess': [],
                'parts_postprocess': [],
            },
        }
        eq_(rule.params.keysets, keysets)

Example 50

Project: chemlab
Source File: test_base.py
View license
    def test_subdimension(self):
        b = B.from_arrays(type_array=['A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C'],
                          bonds=[[0, 1], [0, 2], [3, 4], [3, 5], [6, 7], [6, 8]],
                          maps={('x', 'a'): [0, 0, 0, 1, 1, 1, 2, 2, 2],
                                ('y', 'a'): [0, 0, 1, 1, 2, 2]})
        
        result = b._propagate_dim([0, 1, 2, 3, 4], 'x')
        assert_npequal(result['x'], [0, 1, 2, 3, 4]) 
        assert_npequal(result['a'], [0, 1]) 
        assert_npequal(result['y'], [0, 1, 2]) 
        
        result = b._propagate_dim([0], 'x')
        assert_npequal(result['x'], [0]) 
        assert_npequal(result['a'], [0]) 
        assert_npequal(result['y'], []) 

        result = b._propagate_dim([2, 6], 'x')
        assert_npequal(result['x'], [2, 6]) 
        assert_npequal(result['a'], [0, 2]) 
        assert_npequal(result['y'], []) 

        result = b._propagate_dim([False, True, False], 'a')
        assert_npequal(result['x'], [3, 4, 5]) 
        assert_npequal(result['a'], [1]) 
        assert_npequal(result['y'], [2, 3]) 
        
        c = b.sub_dimension([False, True, False], 'a')
        eq_(c.dimensions['x'], 3)
        eq_(c.dimensions['y'], 2)
        eq_(c.dimensions['a'], 1)
        assert_npequal(c.bonds, [[0, 1], [0, 2]])
        assert_npequal(c.type_array, ['A', 'B', 'C'])
        assert_npequal(c.maps['x', 'a'].value, [0, 0, 0])
        assert_npequal(c.maps['y', 'a'].value, [0, 0])
        
        result = b._propagate_dim([True, True, False, False, False, False], 'y')
        assert_npequal(result['x'], [0, 1, 2])
        assert_npequal(result['y'], [0, 1])
        assert_npequal(result['a'], [0])
        
        # Now we do similar tests without cascading through the dependencies
        c = b.sub_dimension([True, True, False, False, False, False], 'y', propagate=False)
        assert_npequal(c.bonds, [[0, 1], [0, 2]])
        assert_npequal(c.type_array, ['A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C'])
        assert_npequal(c.maps['x', 'a'].value, [0, 0, 0, 1, 1, 1, 2, 2, 2])
        assert_npequal(c.maps['y', 'a'].value, [0, 0])
    
        # Try without bonds
        b = B.from_arrays(type_array=['A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C'],
                          maps={('x', 'a'): [0, 1, 2, 3, 4, 5, 6, 6, 6]})
        
    
        c = b.sub_dimension([0, 1], 'x')
        assert_npequal(c.type_array, ['A', 'B'])