pytest.mark.parametrize

Here are the examples of the python api pytest.mark.parametrize taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

View license
@pytest.mark.parametrize('flash_type', ['default', 'success', 'info', 'warning', 'danger', 'well', 'modal'])
@pytest.mark.parametrize('flash_count', range(1, 11))
@pytest.mark.parametrize('message_size', ['small', 'medium', 'large'])
def test_good_requests(message_size, flash_count, flash_type):
    url = '/examples/alerts/modal?message_size={}&flash_count={}&flash_type={}'.format(message_size, flash_count,
                                                                                       flash_type)
    request = current_app.test_client().get(url, follow_redirects=True)
    assert '200 OK' == request.status

    test_message_size = dict(
        small='This is a sample message.',
        medium='Built-in functions, exceptions, and other objects.',
        large='gaierror: [Errno 8] nodename nor servname provided, or not known',
    )
    assert test_message_size[message_size] in request.data
    assert request.data.count(test_message_size[message_size]) == flash_count

Example 2

View license
@mock.patch('sys.platform', 'win32')
@mock.patch('os.startfile', create=True)
@pytest.mark.parametrize('configuration', abjad_configurations)
@pytest.mark.parametrize('file_path', test_files)
@pytest.mark.parametrize('application', applications)
def test_systemtools_IOManager__open_file_03(startfile_mock,
                                             configuration,
                                             file_path,
                                             application):
    with mock.patch(*configuration):
        systemtools.IOManager.open_file(
            file_path=file_path,
            application=application,
        )
        startfile_mock.assert_called_with(file_path)

Example 3

View license
@pytest.mark.parametrize('nullable', [True, False])
@pytest.mark.parametrize('required', [True, False])
@pytest.mark.parametrize('property_type, value',
                         [('string', 'y'),
                          ('object', {'y': 'z'}),
                          ('array', ['one', 'two', 'three'])])
def test_nullable_with_value(empty_swagger_spec, nullable, required,
                             property_type, value):
    content_spec = nullable_spec_factory(required, nullable, property_type)
    obj = {'x': value}
    expected = copy.deepcopy(obj)
    result = unmarshal_object(empty_swagger_spec, content_spec, obj)
    assert expected == result

Example 4

Project: vcli
Source File: test_vexecute.py
View license
@dbtest
@pytest.mark.parametrize('command', ['di', 'dv', 'ds', 'df', 'dT'])
@pytest.mark.parametrize('verbose', ['', '+'])
@pytest.mark.parametrize('pattern', ['', 'x', '*.*', 'x.y', 'x.*', '*.y'])
def test_describe_special(executor, command, verbose, pattern):
    # We don't have any tests for the output of any of the special commands,
    # but we can at least make sure they run without error
    sql = r'\{command}{verbose} {pattern}'.format(**locals())
    executor.run(sql)

Example 5

Project: dit
Source File: test_generalized_divergences.py
View license
@pytest.mark.parametrize('args', [
    [d4, d1, None, None],
    [d4, d2, None, None],
    [d4, d3, None, None],
    [d1, d2, [0, 1], None],
    [d3, d4, [1], None],
    [d5, d1, [0], [1]],
    [d4, d3, [1], [0]]
])
@pytest.mark.parametrize('divergence', divergences)
@pytest.mark.parametrize('alpha', [0, 1, 2, 0.5])
def test_exceptions(args, divergence, alpha):
    """
    Test that when p has outcomes that q doesn't have, that we raise an exception.
    """
    first, second, rvs, crvs = args
    with pytest.raises(ditException):
        divergence(first, second, alpha, rvs, crvs)

Example 6

Project: hedge
Source File: test_parallel.py
View license
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("flux_type", StrongAdvectionOperator.flux_types)
@pytest.mark.parametrize("random_partition", [True, False])
@pytest.mark.parametrize("mesh_gen", [my_box_mesh])
def test_hedge_parallel(dtype, flux_type, random_partition, mesh_gen):
    from pytools.mpi import run_with_mpi_ranks
    run_with_mpi_ranks(__file__, 2,
            run_convergence_test_advec,
                (dtype, flux_type, random_partition, mesh_gen))

Example 7

View license
@pytest.mark.parametrize("minimum", [1, 0.01, 0.000000001, 0.0000000001, 1.000000000000001])
@pytest.mark.parametrize("skew_ratio", [0, 1, -1])
@pytest.mark.parametrize("additive", [True, False])
@pytest.mark.benchmark(max_time=0, min_rounds=1, calibration_precision=100)
def test_calibrate_stuck(benchmark, minimum, additive, skew_ratio):
    # if skew_ratio:
    #     ratio += skew_ratio * SKEW
    if skew_ratio > 0:
        ratio = 50 * 1.000000000000001
    elif skew_ratio < 0:
        ratio = 50 / 1.000000000000001
    else:
        ratio = 50
    t = timer(ratio, minimum, additive)
    benchmark._timer = partial(next, t)
    benchmark._min_time = minimum
    benchmark(t.send, True)

Example 8

Project: OCRmyPDF
Source File: test_main.py
View license
@pytest.mark.parametrize(
    "pdf",
    ['palette.pdf', 'cmyk.pdf', 'ccitt.pdf', 'jbig2.pdf', 'lichtenstein.pdf'])
@pytest.mark.parametrize("renderer", ['hocr', 'tesseract'])
@pytest.mark.parametrize("output_type", ['pdf', 'pdfa'])
def test_exotic_image(spoof_tesseract_cache, pdf, renderer, output_type):
    check_ocrmypdf(
        pdf,
        'test_{0}_{1}.pdf'.format(pdf, renderer),
        '-dc',
        '-v', '1',
        '--output-type', output_type,
        '--pdf-renderer', renderer, env=spoof_tesseract_cache)

Example 9

Project: flask
Source File: test_basic.py
View license
@pytest.mark.parametrize('debug', [True, False])
@pytest.mark.parametrize('use_debugger', [True, False])
@pytest.mark.parametrize('use_reloader', [True, False])
@pytest.mark.parametrize('propagate_exceptions', [None, True, False])
def test_werkzeug_passthrough_errors(monkeypatch, debug, use_debugger,
                                     use_reloader, propagate_exceptions):
    rv = {}

    # Mocks werkzeug.serving.run_simple method
    def run_simple_mock(*args, **kwargs):
        rv['passthrough_errors'] = kwargs.get('passthrough_errors')

    app = flask.Flask(__name__)
    monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
    app.config['PROPAGATE_EXCEPTIONS'] = propagate_exceptions
    app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)

Example 10

Project: atomic-reactor
Source File: test_add_labels.py
View license
@pytest.mark.parametrize('df_old_as_plugin_arg', [True, False])
@pytest.mark.parametrize('df_new_as_plugin_arg', [True, False])
@pytest.mark.parametrize('base_old, base_new, df_old, df_new, expected_old, expected_new, expected_log', [
    (None,  None,  None,  None,  None,  None, None                             ),
    (None,  None,  None,  'A',   None,  'A',  None                             ),
    (None,  None,  'A',   None,  'A',   'A',  'as an alias for label'          ),
    (None,  None,  'A',   'A',   'A',   'A',  'already exists'                 ),
    (None,  None,  'A',   'B',   'A',   'B',  'should probably have same value'),
    (None,  'A',   None,  None,  None,  'A',  None                             ),
    (None,  'A',   None,  'A',   None,  'A',  None                             ),
    (None,  'A',   None,  'B',   None,  'B',  None                             ),
    (None,  'A',   'A',   None,  'A',   'A',  'as an alias for label'          ),
    (None,  'A',   'B',   None,  'B',   'B',  'as an alias for label'          ),
    (None,  'A',   'A',   'A',   'A',   'A',  'already exists'                 ),
    (None,  'A',   'A',   'B',   'A',   'B',  'should probably have same value'),
    (None,  'A',   'B',   'A',   'B',   'A',  'should probably have same value'),
    (None,  'A',   'B',   'B',   'B',   'B',  'already exists'                 ),
    (None,  'A',   'B',   'C',   'B',   'C',  'should probably have same value'),
    ('A',   None,  None,  None,  'A',   'A',  'as an alias for label'          ),
    ('A',   None,  None,  'A',   'A',   'A',  'already exists'                 ),
    ('A',   None,  None,  'B',   'B',   'B',  'as an alias for label'          ),
    ('A',   None,  'A',   None,  'A',   'A',  'as an alias for label'          ),
    ('A',   None,  'B',   None,  'B',   'B',  'as an alias for label'          ),
    ('A',   None,  'A',   'A',   'A',   'A',  'already exists'                 ),
    ('A',   None,  'A',   'B',   'A',   'B',  'should probably have same value'),
    ('A',   None,  'B',   'A',   'B',   'A',  'should probably have same value'),
    ('A',   None,  'B',   'B',   'B',   'B',  'already exists'                 ),
    ('A',   None,  'B',   'C',   'B',   'C',  'should probably have same value'),
    ('A',   'A',   None,  None,  'A',   'A',  'as an alias for label'          ),
    ('A',   'A',   None,  'A',   'A',   'A',  'already exists'                 ),
    ('A',   'A',   None,  'B',   'B',   'B',  'as an alias for label'          ),
    ('A',   'A',   'A',   None,  'A',   'A',  'as an alias for label'          ),
    ('A',   'A',   'B',   None,  'B',   'B',  'as an alias for label'          ),
    ('A',   'A',   'A',   'A',   'A',   'A',  'already exists'                 ),
    ('A',   'A',   'A',   'B',   'A',   'B',  'should probably have same value'),
    ('A',   'A',   'B',   'A',   'B',   'A',  'should probably have same value'),
    ('A',   'A',   'B',   'B',   'B',   'B',  'already exists'                 ),
    ('A',   'A',   'B',   'C',   'B',   'C',  'should probably have same value'),
    ('A',   'B',   None,  None,  'B',   'B',  'as an alias for label'          ), #really?
    ('A',   'B',   None,  'A',   'A',   'A',  'already exists'                 ),
    ('A',   'B',   None,  'B',   'B',   'B',  'as an alias for label'          ),
    ('A',   'B',   None,  'C',   'C',   'C',  'as an alias for label'          ),
    ('A',   'B',   'A',   None,  'A',   'A',  'as an alias for label'          ),
    ('A',   'B',   'B',   None,  'B',   'B',  'as an alias for label'          ),
    ('A',   'B',   'C',   None,  'C',   'C',  'as an alias for label'          ),
    ('A',   'B',   'A',   'A',   'A',   'A',  'already exists'                 ),
    ('A',   'B',   'A',   'B',   'A',   'B',  'should probably have same value'),
    ('A',   'B',   'A',   'C',   'A',   'C',  'should probably have same value'),
    ('A',   'B',   'B',   'A',   'B',   'A',  'should probably have same value'),
    ('A',   'B',   'B',   'B',   'B',   'B',  'already exists'                 ),
    ('A',   'B',   'B',   'C',   'B',   'C',  'should probably have same value'),
    ('A',   'B',   'C',   'A',   'C',   'A',  'should probably have same value'),
    ('A',   'B',   'C',   'B',   'C',   'B',  'should probably have same value'),
    ('A',   'B',   'C',   'C',   'C',   'C',  'already exists'                 ),
    ('A',   'B',   'C',   'D',   'C',   'D',  'should probably have same value'),
])
def test_add_labels_aliases(tmpdir, docker_tasker, caplog,
                            df_old_as_plugin_arg, df_new_as_plugin_arg,
                            base_old, base_new, df_old, df_new, expected_old, expected_new, expected_log):
    if MOCK:
        mock_docker()

    df_content = "FROM fedora\n"
    plugin_labels = {}
    if df_old:
        if df_old_as_plugin_arg:
            plugin_labels["label_old"] = df_old
        else:
            df_content += 'LABEL label_old="{0}"\n'.format(df_old)
    if df_new:
        if df_new_as_plugin_arg:
            plugin_labels["label_new"] = df_new
        else:
            df_content += 'LABEL label_new="{0}"\n'.format(df_new)

    base_labels = {INSPECT_CONFIG: {"Labels": {}}}
    if base_old:
        base_labels[INSPECT_CONFIG]["Labels"]["label_old"] = base_old
    if base_new:
        base_labels[INSPECT_CONFIG]["Labels"]["label_new"] = base_new

    df = DockerfileParser(str(tmpdir))
    df.content = df_content

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
    setattr(workflow, 'builder', X)
    flexmock(workflow, base_image_inspect=base_labels)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)

    runner = PreBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{
            'name': AddLabelsPlugin.key,
            'args': {
                'labels': plugin_labels,
                'dont_overwrite': [],
                'auto_labels': [],
                'aliases': {"label_old": "label_new"},
            }
        }]
    )

    runner.run()
    assert AddLabelsPlugin.key is not None
    result_old = df.labels.get("label_old") or base_labels[INSPECT_CONFIG]["Labels"].get("label_old")
    result_new = df.labels.get("label_new") or base_labels[INSPECT_CONFIG]["Labels"].get("label_new")
    assert result_old == expected_old
    assert result_new == expected_new

    if expected_log:
        assert expected_log in caplog.text()

Example 11

Project: atomic-reactor
Source File: test_util.py
View license
@pytest.mark.parametrize('insecure', [
    True,
    False,
])
@pytest.mark.parametrize('versions', [
    (('v1', 'v2')),
    (('v1',)),
    (('v2',)),
    (tuple()),
    None,
])
@pytest.mark.parametrize('creds', [
    ('user1', 'pass'),
    (None, 'pass'),
    ('user1', None),
    None,
])
@pytest.mark.parametrize('image,registry,url', [
    ('not-used.com/spam:latest', 'localhost.com',
     'https://localhost.com/v2/spam/manifests/latest'),

    ('not-used.com/food/spam:latest', 'http://localhost.com',
     'http://localhost.com/v2/food/spam/manifests/latest'),

    ('not-used.com/spam', 'https://localhost.com',
     'https://localhost.com/v2/spam/manifests/latest'),
])
@responses.activate
def test_get_manifest_digests(tmpdir, image, registry, insecure, creds,
                              versions, url):
    kwargs = {}

    image = ImageName.parse(image)
    kwargs['image'] = image

    if creds:

        temp_dir = mkdtemp(dir=str(tmpdir))
        with open(os.path.join(temp_dir, '.dockercfg'), 'w+') as dockerconfig:
            dockerconfig.write(json.dumps({
                image.registry: {
                    'username': creds[0], 'password': creds[1]
                }
            }))
        kwargs['dockercfg_path'] = temp_dir

    kwargs['registry'] = registry

    if insecure is not None:
        kwargs['insecure'] = insecure

    if versions is not None:
        kwargs['versions'] = versions

    def request_callback(request):
        if creds and creds[0] and creds[1]:
            assert request.headers['Authorization']

        media_type = request.headers['Accept']
        if media_type.endswith('v2+json'):
            digest = 'v2-digest'
        elif media_type.endswith('v1+json'):
            digest = 'v1-digest'
        else:
            raise ValueError('Unexpected media type {}'.format(media_type))

        media_type_prefix = media_type.split('+')[0]
        headers = {
            'Content-Type': '{}+jsonish'.format(media_type_prefix),
            'Docker-Content-Digest': digest
        }
        return (200, headers, '')

    responses.add_callback(responses.GET, url, callback=request_callback)

    expected_versions = versions
    if versions is None:
        # Test default versions value
        expected_versions = ('v1', 'v2')

    expected_result = dict(
        (version, '{}-digest'.format(version))
        for version in expected_versions)

    if expected_versions:
        actual_digests = get_manifest_digests(**kwargs)
        assert actual_digests.v1 == expected_result.get('v1')
        assert actual_digests.v2 == expected_result.get('v2')
    else:
        with pytest.raises(RuntimeError):
            get_manifest_digests(**kwargs)

Example 12

Project: raiden
Source File: test_blockchainservice.py
View license
@pytest.mark.parametrize('privatekey_seed', ['blockchain:{}'])
@pytest.mark.parametrize('number_of_nodes', [3])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_assets', [0])
def test_new_netting_contract(raiden_network, asset_amount, settle_timeout):
    # pylint: disable=line-too-long,too-many-statements,too-many-locals

    app0, app1, app2 = raiden_network
    peer0_address = app0.raiden.address
    peer1_address = app1.raiden.address
    peer2_address = app2.raiden.address

    blockchain_service0 = app0.raiden.chain

    asset_address = blockchain_service0.deploy_and_register_asset(
        contract_name='HumanStandardToken',
        contract_file='HumanStandardToken.sol',
        constructor_parameters=(asset_amount, 'raiden', 2, 'Rd'),
    )

    asset0 = blockchain_service0.asset(asset_address)
    for transfer_to in raiden_network[1:]:
        asset0.transfer(
            privatekey_to_address(transfer_to.raiden.privkey),
            asset_amount // len(raiden_network),
        )

    manager0 = blockchain_service0.manager_by_asset(asset_address)

    # sanity
    assert manager0.channels_addresses() == []
    assert manager0.channels_by_participant(peer0_address) == []
    assert manager0.channels_by_participant(peer1_address) == []
    assert manager0.channels_by_participant(peer2_address) == []

    # create one channel
    netting_address_01 = manager0.new_netting_channel(
        peer0_address,
        peer1_address,
        settle_timeout,
    )

    # check contract state
    netting_channel_01 = blockchain_service0.netting_channel(netting_address_01)
    assert netting_channel_01.isopen() is False
    assert netting_channel_01.partner(peer0_address) == peer1_address
    assert netting_channel_01.partner(peer1_address) == peer0_address

    # check channels
    channel_list = manager0.channels_addresses()
    assert sorted(channel_list[0]) == sorted([peer0_address, peer1_address])

    assert manager0.channels_by_participant(peer0_address) == [netting_address_01]
    assert manager0.channels_by_participant(peer1_address) == [netting_address_01]
    assert manager0.channels_by_participant(peer2_address) == []

    # TODO:
    # cant recreate the existing channel
    # with pytest.raises(Exception):
    #     manager0.new_netting_channel(
    #         peer0_address,
    #         peer1_address,
    #         settle_timeout,
    #     )

    # create other chanel
    netting_address_02 = manager0.new_netting_channel(
        peer0_address,
        peer2_address,
        settle_timeout,
    )

    netting_channel_02 = blockchain_service0.netting_channel(netting_address_02)

    assert netting_channel_02.isopen() is False
    assert netting_channel_02.partner(peer0_address) == peer2_address
    assert netting_channel_02.partner(peer2_address) == peer0_address

    channel_list = manager0.channels_addresses()
    expected_channels = [
        sorted([peer0_address, peer1_address]),
        sorted([peer0_address, peer2_address]),
    ]

    for channel in channel_list:
        assert sorted(channel) in expected_channels

    assert sorted(manager0.channels_by_participant(peer0_address)) == sorted([netting_address_01, netting_address_02])
    assert manager0.channels_by_participant(peer1_address) == [netting_address_01]
    assert manager0.channels_by_participant(peer2_address) == [netting_address_02]

    # deposit without approve should fail
    netting_channel_01.deposit(peer0_address, 100)
    assert netting_channel_01.isopen() is False
    assert netting_channel_02.isopen() is False
    assert netting_channel_01.detail(peer0_address)['our_balance'] == 0
    assert netting_channel_01.detail(peer1_address)['our_balance'] == 0

    # single-funded channel
    app0.raiden.chain.asset(asset_address).approve(netting_address_01, 100)
    netting_channel_01.deposit(peer0_address, 100)
    assert netting_channel_01.isopen() is True
    assert netting_channel_02.isopen() is False

    assert netting_channel_01.detail(peer0_address)['our_balance'] == 100
    assert netting_channel_01.detail(peer1_address)['our_balance'] == 0

    # with pytest.raises(Exception):
    #    blockchain_service0.deposit(asset_address, netting_address_01, peer0_address, 100)

    # double-funded channel
    app0.raiden.chain.asset(asset_address).approve(netting_address_02, 70)
    netting_channel_02.deposit(peer0_address, 70)
    assert netting_channel_01.isopen() is True
    assert netting_channel_02.isopen() is True

    assert netting_channel_02.detail(peer0_address)['our_balance'] == 70
    assert netting_channel_02.detail(peer2_address)['our_balance'] == 0

    app2.raiden.chain.asset(asset_address).approve(netting_address_02, 130)
    app2.raiden.chain.netting_channel(netting_address_02).deposit(peer2_address, 130)
    assert netting_channel_01.isopen() is True
    assert netting_channel_02.isopen() is True

    assert netting_channel_02.detail(peer0_address)['our_balance'] == 70
    assert netting_channel_02.detail(peer2_address)['our_balance'] == 130

Example 13

Project: raiden
Source File: test_blockchainservice.py
View license
@pytest.mark.parametrize('blockchain_type', ['geth'])
@pytest.mark.parametrize('privatekey_seed', ['blockchain:{}'])
@pytest.mark.parametrize('number_of_nodes', [3])
def test_blockchain(blockchain_backend, private_keys, number_of_nodes, poll_timeout):
    # pylint: disable=too-many-locals
    addresses = [
        privatekey_to_address(priv)
        for priv in private_keys
    ]

    privatekey = private_keys[0]
    address = privatekey_to_address(privatekey)
    total_asset = 100

    jsonrpc_client = JSONRPCClient(
        privkey=privatekey,
        print_communication=False,
    )
    patch_send_transaction(jsonrpc_client)

    humantoken_path = get_contract_path('HumanStandardToken.sol')
    humantoken_contracts = compile_file(humantoken_path, libraries=dict())
    token_proxy = jsonrpc_client.deploy_solidity_contract(
        address,
        'HumanStandardToken',
        humantoken_contracts,
        dict(),
        (total_asset, 'raiden', 2, 'Rd'),
        timeout=poll_timeout,
    )

    registry_path = get_contract_path('Registry.sol')
    registry_contracts = compile_file(registry_path)
    registry_proxy = jsonrpc_client.deploy_solidity_contract(
        address,
        'Registry',
        registry_contracts,
        dict(),
        tuple(),
        timeout=poll_timeout,
    )

    log_list = jsonrpc_client.call(
        'eth_getLogs',
        {
            'fromBlock': '0x0',
            'toBlock': 'latest',
            'topics': [],
        },
    )
    assert len(log_list) == 0

    # pylint: disable=no-member

    assert token_proxy.balanceOf(address) == total_asset
    transaction_hash = registry_proxy.addAsset.transact(
        token_proxy.address,
        gasprice=denoms.wei,
    )
    jsonrpc_client.poll(transaction_hash.decode('hex'), timeout=poll_timeout)

    assert len(registry_proxy.assetAddresses.call()) == 1

    log_list = jsonrpc_client.call(
        'eth_getLogs',
        {
            'fromBlock': '0x0',
            'toBlock': 'latest',
            'topics': [],
        },
    )
    assert len(log_list) == 1

    channel_manager_address_encoded = registry_proxy.channelManagerByAsset.call(token_proxy.address)
    channel_manager_address = channel_manager_address_encoded.decode('hex')

    log = log_list[0]
    log_topics = [
        decode_topic(topic)
        for topic in log['topics']  # pylint: disable=invalid-sequence-index
    ]
    log_data = log['data']
    event = registry_proxy.translator.decode_event(
        log_topics,
        log_data[2:].decode('hex'),
    )

    assert channel_manager_address == event['channel_manager_address'].decode('hex')
    assert token_proxy.address == event['asset_address'].decode('hex')

    channel_manager_proxy = jsonrpc_client.new_contract_proxy(
        registry_contracts['ChannelManagerContract']['abi'],
        channel_manager_address,
    )

    transaction_hash = channel_manager_proxy.newChannel.transact(
        addresses[1],
        10,
        gasprice=denoms.wei,
    )
    jsonrpc_client.poll(transaction_hash.decode('hex'), timeout=poll_timeout)

    log_list = jsonrpc_client.call(
        'eth_getLogs',
        {
            'fromBlock': '0x0',
            'toBlock': 'latest',
            'topics': [],
        },
    )
    assert len(log_list) == 2

Example 14

Project: raiden
Source File: test_events.py
View license
@pytest.mark.parametrize('privatekey_seed', ['event_new_channel:{}'])
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [0])
def test_event_new_channel(raiden_chain, deposit, settle_timeout, events_poll_timeout):
    app0, app1 = raiden_chain  # pylint: disable=unbalanced-tuple-unpacking

    asset_address = app0.raiden.chain.default_registry.asset_addresses()[0]

    assert len(app0.raiden.managers_by_asset_address[asset_address].address_channel) == 0
    assert len(app1.raiden.managers_by_asset_address[asset_address].address_channel) == 0

    asset0 = app0.raiden.chain.asset(asset_address)
    manager0 = app0.raiden.chain.manager_by_asset(asset_address)

    asset1 = app1.raiden.chain.asset(asset_address)

    netcontract_address = manager0.new_netting_channel(
        app0.raiden.address,
        app1.raiden.address,
        settle_timeout,
    )

    netting_channel0 = app0.raiden.chain.netting_channel(netcontract_address)
    netting_channel1 = app1.raiden.chain.netting_channel(netcontract_address)

    gevent.sleep(events_poll_timeout)

    # channel is created but not opened and without funds
    assert len(app0.raiden.managers_by_asset_address[asset_address].address_channel) == 1
    assert len(app1.raiden.managers_by_asset_address[asset_address].address_channel) == 1

    channel0 = app0.raiden.managers_by_asset_address[asset_address].address_channel.values()[0]
    channel1 = app1.raiden.managers_by_asset_address[asset_address].address_channel.values()[0]

    assert_synched_channels(
        channel0, 0, [],
        channel1, 0, [],
    )

    asset0.approve(netcontract_address, deposit)
    netting_channel0.deposit(app0.raiden.address, deposit)

    gevent.sleep(events_poll_timeout)

    # channel is open but single funded
    assert len(app0.raiden.managers_by_asset_address[asset_address].address_channel) == 1
    assert len(app1.raiden.managers_by_asset_address[asset_address].address_channel) == 1

    channel0 = app0.raiden.managers_by_asset_address[asset_address].address_channel.values()[0]
    channel1 = app1.raiden.managers_by_asset_address[asset_address].address_channel.values()[0]

    assert_synched_channels(
        channel0, deposit, [],
        channel1, 0, [],
    )

    asset1.approve(netcontract_address, deposit)
    netting_channel1.deposit(app1.raiden.address, deposit)

    gevent.sleep(events_poll_timeout)

    # channel is open and funded by both participants
    assert len(app0.raiden.managers_by_asset_address[asset_address].address_channel) == 1
    assert len(app1.raiden.managers_by_asset_address[asset_address].address_channel) == 1

    channel0 = app0.raiden.managers_by_asset_address[asset_address].address_channel.values()[0]
    channel1 = app1.raiden.managers_by_asset_address[asset_address].address_channel.values()[0]

    assert_synched_channels(
        channel0, deposit, [],
        channel1, deposit, [],
    )

Example 15

Project: raiden
Source File: test_channel.py
View license
@pytest.mark.parametrize('blockchain_type', ['mock'])
@pytest.mark.parametrize('deposit', [2 ** 30])
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('number_of_transfers', [100])
def test_interwoven_transfers(number_of_transfers, raiden_network,
                              settle_timeout):
    """ Can keep doing transaction even if not all secrets have been released. """
    def log_state():
        unclaimed = [
            transfer.lock.amount
            for pos, transfer in enumerate(transfers_list)
            if not transfers_claimed[pos]
        ]

        claimed = [
            transfer.lock.amount
            for pos, transfer in enumerate(transfers_list)
            if transfers_claimed[pos]
        ]
        log.info(
            'interwoven',
            claimed_amount=claimed_amount,
            distributed_amount=distributed_amount,
            claimed=claimed,
            unclaimed=unclaimed,
        )

    app0, app1 = raiden_network  # pylint: disable=unbalanced-tuple-unpacking

    channel0 = app0.raiden.managers_by_asset_address.values()[0].partneraddress_channel.values()[0]
    channel1 = app1.raiden.managers_by_asset_address.values()[0].partneraddress_channel.values()[0]

    contract_balance0 = channel0.contract_balance
    contract_balance1 = channel1.contract_balance

    unclaimed_locks = []
    transfers_list = []
    transfers_claimed = []

    # start at 1 because we can't use amount=0
    transfers_amount = [i for i in range(1, number_of_transfers + 1)]
    transfers_secret = [str(i) for i in range(number_of_transfers)]

    claimed_amount = 0
    distributed_amount = 0

    for i, (amount, secret) in enumerate(zip(transfers_amount, transfers_secret)):
        expiration = app0.raiden.chain.block_number() + settle_timeout - 1
        locked_transfer = channel0.create_lockedtransfer(
            amount=amount,
            identifier=1,  # TODO: fill in identifier
            expiration=expiration,
            hashlock=sha3(secret),
        )

        # synchronized registration
        app0.raiden.sign(locked_transfer)
        channel0.register_transfer(locked_transfer)
        channel1.register_transfer(locked_transfer)

        # update test state
        distributed_amount += amount
        transfers_claimed.append(False)
        transfers_list.append(locked_transfer)
        unclaimed_locks.append(locked_transfer.lock)

        log_state()

        # test the synchronization and values
        assert_synched_channels(
            channel0, contract_balance0 - claimed_amount, [],
            channel1, contract_balance1 + claimed_amount, unclaimed_locks,
        )
        assert channel0.distributable == contract_balance0 - distributed_amount

        # claim a transaction at every other iteration, leaving the current one
        # in place
        if i > 0 and i % 2 == 0:
            transfer = transfers_list[i - 1]
            secret = transfers_secret[i - 1]

            # synchronized clamining
            channel0.release_lock(secret)
            channel1.withdraw_lock(secret)

            # update test state
            claimed_amount += transfer.lock.amount
            transfers_claimed[i - 1] = True
            unclaimed_locks = [
                unclaimed_transfer.lock
                for pos, unclaimed_transfer in enumerate(transfers_list)
                if not transfers_claimed[pos]
            ]

            log_state()

            # test the state of the channels after the claim
            assert_synched_channels(
                channel0, contract_balance0 - claimed_amount, [],
                channel1, contract_balance1 + claimed_amount, unclaimed_locks,
            )
            assert channel0.distributable == contract_balance0 - distributed_amount

Example 16

Project: raiden
Source File: test_transfer.py
View license
@pytest.mark.parametrize('blockchain_type', ['mock'])
@pytest.mark.parametrize('channels_per_node', [2])
@pytest.mark.parametrize('number_of_nodes', [10])
def test_mediated_transfer(raiden_network):

    def get_channel(from_, to_):
        return ams_by_address[from_][asset_address].partneraddress_channel[to_]

    alice_app = raiden_network[0]
    setup_messages_cb()

    asset_manager = alice_app.raiden.managers_by_asset_address.values()[0]
    asset_address = asset_manager.asset_address

    # search for a path of length=2 A > B > C
    num_hops = 2
    initiator_address = alice_app.raiden.address

    paths_length_2 = asset_manager.channelgraph.get_paths_of_length(
        initiator_address,
        num_hops,
    )

    assert len(paths_length_2)
    for path in paths_length_2:
        assert len(path) == num_hops + 1
        assert path[0] == initiator_address

    path = paths_length_2[0]

    alice_address, bob_address, charlie_address = path

    shortest_paths = list(asset_manager.channelgraph.get_shortest_paths(
        initiator_address,
        charlie_address,
    ))

    assert path in shortest_paths
    assert min(len(path) for path in shortest_paths) == num_hops + 1

    ams_by_address = dict(
        (app.raiden.address, app.raiden.managers_by_asset_address)
        for app in raiden_network
    )

    # channels (alice <-> bob <-> charlie)
    channel_ab = get_channel(alice_address, bob_address)
    channel_ba = get_channel(bob_address, alice_address)
    channel_bc = get_channel(bob_address, charlie_address)
    channel_cb = get_channel(charlie_address, bob_address)

    initial_balance_ab = channel_ab.balance
    initial_balance_ba = channel_ba.balance
    initial_balance_bc = channel_bc.balance
    initial_balance_cb = channel_cb.balance

    amount = 10

    alice_app.raiden.api.transfer(
        asset_address,
        amount,
        charlie_address,
    )

    gevent.sleep(1.)

    assert initial_balance_ab - amount == channel_ab.balance
    assert initial_balance_ba + amount == channel_ba.balance
    assert initial_balance_bc - amount == channel_bc.balance
    assert initial_balance_cb + amount == channel_cb.balance

Example 17

Project: terminaltables
Source File: test_gen_table.py
View license
@pytest.mark.parametrize('inner_heading_row_border', [True, False])
@pytest.mark.parametrize('inner_footing_row_border', [True, False])
@pytest.mark.parametrize('inner_row_border', [True, False])
def test_inner_row_borders(inner_heading_row_border, inner_footing_row_border, inner_row_border):
    """Test heading/footing/row borders.

    :param bool inner_heading_row_border: Passed to table.
    :param bool inner_footing_row_border: Passed to table.
    :param bool inner_row_border: Passed to table.
    """
    table_data = [
        ['Name', 'Color', 'Type'],
        ['Avocado', 'green', 'nut'],
        ['Tomato', 'red', 'fruit'],
        ['Lettuce', 'green', 'vegetable'],
    ]
    table = BaseTable(table_data)
    table.inner_heading_row_border = inner_heading_row_border
    table.inner_footing_row_border = inner_footing_row_border
    table.inner_row_border = inner_row_border
    inner_widths, inner_heights, outer_widths = max_dimensions(table_data, table.padding_left, table.padding_right)[:3]
    actual = flatten(table.gen_table(inner_widths, inner_heights, outer_widths))

    # Determine expected.
    if inner_row_border:
        expected = (
            '+---------+-------+-----------+\n'
            '| Name    | Color | Type      |\n'
            '+---------+-------+-----------+\n'
            '| Avocado | green | nut       |\n'
            '+---------+-------+-----------+\n'
            '| Tomato  | red   | fruit     |\n'
            '+---------+-------+-----------+\n'
            '| Lettuce | green | vegetable |\n'
            '+---------+-------+-----------+'
        )
    elif inner_heading_row_border and inner_footing_row_border:
        expected = (
            '+---------+-------+-----------+\n'
            '| Name    | Color | Type      |\n'
            '+---------+-------+-----------+\n'
            '| Avocado | green | nut       |\n'
            '| Tomato  | red   | fruit     |\n'
            '+---------+-------+-----------+\n'
            '| Lettuce | green | vegetable |\n'
            '+---------+-------+-----------+'
        )
    elif inner_heading_row_border:
        expected = (
            '+---------+-------+-----------+\n'
            '| Name    | Color | Type      |\n'
            '+---------+-------+-----------+\n'
            '| Avocado | green | nut       |\n'
            '| Tomato  | red   | fruit     |\n'
            '| Lettuce | green | vegetable |\n'
            '+---------+-------+-----------+'
        )
    elif inner_footing_row_border:
        expected = (
            '+---------+-------+-----------+\n'
            '| Name    | Color | Type      |\n'
            '| Avocado | green | nut       |\n'
            '| Tomato  | red   | fruit     |\n'
            '+---------+-------+-----------+\n'
            '| Lettuce | green | vegetable |\n'
            '+---------+-------+-----------+'
        )
    else:
        expected = (
            '+---------+-------+-----------+\n'
            '| Name    | Color | Type      |\n'
            '| Avocado | green | nut       |\n'
            '| Tomato  | red   | fruit     |\n'
            '| Lettuce | green | vegetable |\n'
            '+---------+-------+-----------+'
        )

    assert actual == expected

Example 18

Project: terminaltables
Source File: test_build_border.py
View license
@pytest.mark.parametrize('outer_widths,intersect,expected', [
    ([12], '+', '\x1b[34mTEST\x1b[0m--------'),
    ([12], '', '\x1b[34mTEST\x1b[0m--------'),
    ([7, 5], '+', '\x1b[34mTEST\x1b[0m---+-----'),
    ([7, 5], '', '\x1b[34mTEST\x1b[0m--------'),
    ([4], '+', '\x1b[34mTEST\x1b[0m'),
    ([4], '', '\x1b[34mTEST\x1b[0m'),
    ([4, 1], '+', '\x1b[34mTEST\x1b[0m+-'),
    ([4, 1], '', '\x1b[34mTEST\x1b[0m-'),
    ([4, 0], '+', '\x1b[34mTEST\x1b[0m+'),
    ([4, 0], '', '\x1b[34mTEST\x1b[0m'),
    ([12], '', '\x1b[34mTEST\x1b[0m--------'),
    ([6, 6], '', '\x1b[34mTEST\x1b[0m--------'),
    ([3, 3, 3, 3], '', '\x1b[34mTEST\x1b[0m--------'),
    ([2, 1, 2, 1, 2, 1, 2, 1], '', '\x1b[34mTEST\x1b[0m--------'),
    ([1] * 12, '', '\x1b[34mTEST\x1b[0m--------'),
    ([2, 4], '', '\x1b[34mTEST\x1b[0m--'),
    ([1, 4], '', '\x1b[34mTEST\x1b[0m-'),
    ([1, 3], '', '\x1b[34mTEST\x1b[0m'),
    ([1, 2], '', '---'),
    ([12], '+', '\x1b[34mTEST\x1b[0m--------'),
    ([0, 12], '+', '\x1b[34mTEST\x1b[0m---------'),
    ([12, 0], '+', '\x1b[34mTEST\x1b[0m--------+'),
    ([0, 0, 12], '+', '\x1b[34mTEST\x1b[0m----------'),
    ([12, 0, 0], '+', '\x1b[34mTEST\x1b[0m--------++'),
    ([3, 3], '+', '\x1b[34mTEST\x1b[0m---'),
    ([4, 2], '+', '\x1b[34mTEST\x1b[0m+--'),
    ([5, 1], '+', '\x1b[34mTEST\x1b[0m-+-'),
    ([3, 3, 3, 3], '+', '\x1b[34mTEST\x1b[0m---+---+---'),
    ([2, 2, 4, 4], '+', '\x1b[34mTEST\x1b[0m-+----+----'),
    ([1, 1, 5, 5], '+', '\x1b[34mTEST\x1b[0m-----+-----'),
    ([2, 2, 2, 2], '+', '\x1b[34mTEST\x1b[0m-+--+--'),
    ([1, 1, 1, 1, 1], '+', '\x1b[34mTEST\x1b[0m-+-+-'),
    ([0, 0, 0, 0, 0, 0, 0], '+', '\x1b[34mTEST\x1b[0m++'),
    ([1, 1], '+', '-+-'),
])
@pytest.mark.parametrize('left,right', [('', ''), ('<', '>')])
@pytest.mark.parametrize('title', [
    '\x1b[34mTEST\x1b[0m',
    Color('{blue}TEST{/all}'),
    Fore.BLUE + 'TEST' + Style.RESET_ALL,
    colored('TEST', 'blue'),
])
def test_colors(outer_widths, left, intersect, right, title, expected):
    """Test with color title characters.

    :param iter outer_widths: List of integers representing column widths with padding.
    :param str left: Left border.
    :param str intersect: Column separator.
    :param str right: Right border.
    :param title: Title in border with color codes.
    :param str expected: Expected output.
    """
    if left and right:
        expected = left + expected + right
    actual = build_border(outer_widths, '-', left, intersect, right, title=title)
    assert ''.join(actual) == expected

Example 19

Project: schematics
Source File: test_conversion.py
View license
@pytest.mark.parametrize('variant', (None, 'noerrors'))
@pytest.mark.parametrize('partial', (True, False))
@pytest.mark.parametrize('import_, two_pass, input_instance, input_init, init',
                       [( True,    False,    False,          None,       True),
                        ( True,    False,    False,          None,       False),
                        ( True,    False,    True,           False,      True),
                        ( True,    False,    True,           False,      False),
                        ( True,    False,    True,           True,       True),
                        ( True,    False,    True,           True,       False),
                        ( True,    True,     False,          None,       True),
                        ( True,    True,     False,          None,       False),
                        ( True,    True,     True,           False,      True),
                        ( True,    True,     True,           False,      False),
                        ( True,    True,     True,           True,       True),
                        ( True,    True,     True,           True,       False),
                        ( False,   None,     True,           False,      True),
                        ( False,   None,     True,           False,      False),
                        ( False,   None,     True,           True,       True),
                        ( False,   None,     True,           True,       False)])
def test_conversion_with_validation(input, import_, two_pass, input_instance, input_init, init,
                                    partial, variant):

    init_to_none = input_init or init

    if variant == 'noerrors':

        orig_input = copy(input)

        if input_instance:
            assert input.modelfield is orig_input.modelfield

        if import_:
            if two_pass:
                m = M(input, init=init)
                m.validate(partial=partial)
            else:
                m = M(input, init=init, partial=partial, validate=True)
        else:
            input.validate(init_values=init, partial=partial)
            m = input

        assert input == orig_input

        if input_instance:
            if import_:
                assert m.modelfield is not input.modelfield
                assert m._data['modelfield'] is not input._data['modelfield']
                assert m.modelfield.listfield is not input.modelfield.listfield
            else:
                assert m.modelfield is input.modelfield
                assert m._data['modelfield'] is input._data['modelfield']
                assert m.modelfield.listfield is input.modelfield.listfield

        return

    if init_to_none:
        partial_data = {
            'intfield': 1,
            'reqfield': u'foo',
            'matrixfield': None,
            'modelfield': {
                'intfield': None,
                'reqfield': u'bar',
                'matrixfield': None,
                'modelfield': {
                    'reqfield': None,
                    'listfield': None,
                    'modelfield': M({
                        'intfield': 0,
                        'reqfield': u'foo',
                        'listfield': None})}}}
    else:
        partial_data = {
            'intfield': 1,
            'reqfield': u'foo',
            'modelfield': {
                'reqfield': u'bar',
                'modelfield': {
                    'listfield': None,
                    'modelfield': M({
                        'intfield': 0,
                        'reqfield': u'foo',
                        'listfield': None}, init=False)}}}

    with pytest.raises(DataError) as excinfo:
        if import_:
            if two_pass:
                m = M(input, init=init)
                m.validate(partial=partial)
            else:
                M(input, init=init, partial=partial, validate=True)
        else:
            input.validate(init_values=init, partial=partial)

    errors = excinfo.value.errors

    err_list = errors.pop('listfield')
    assert type(err_list) is ValidationError
    assert len(err_list) == 1

    err_list = errors['modelfield'].pop('listfield')
    assert type(err_list) is ValidationError
    assert len(err_list) == 2

    err_list = errors['modelfield']['modelfield'].pop('intfield')
    assert len(err_list) == 1

    if not partial:
        err_list = errors['modelfield']['modelfield'].pop('reqfield')
        assert len(err_list) == 1
        if init_to_none:
            partial_data['modelfield']['modelfield'].pop('reqfield')

    err_dict = errors['modelfield']['modelfield'].pop('matrixfield')
    sub_err_dict = err_dict.pop(1)
    assert list((k, type(v)) for k, v in sub_err_dict.items()) \
        == [(2, ValidationError), (3, ValidationError)]
    assert err_dict == {}

    assert errors['modelfield'].pop('modelfield') == {}
    assert errors.pop('modelfield') == {}
    assert errors == {}

    assert excinfo.value.partial_data == partial_data

Example 20

Project: hydrachain
Source File: test_sim_joins.py
View license
@pytest.mark.parametrize('validators', range(4, 7) + [10])
@pytest.mark.parametrize('late', range(1, 3))
@pytest.mark.parametrize('delay', [2])
# run this test with `tox -- -rx -k test_late_joins`
def test_late_joins(validators, late, delay):
    """In this test, we spawn a network with a number of
    `validators` validator nodes, where a number of `late` nodes stay
    offline until after a certain delay:

    >>> initial sync_time = delay * (validators - late)

    Now the "late-joiners" come online and we let them sync until
    the networks head block is at `num_initial_blocks` (default: 10).

    Since in some configurations the late-joiners don't manage to catch up
    at that point, we inject a transaction (leading to a new block) into
    the now fully online network.

    Now all nodes must be at the same block-height: `(num_initial_blocks + 1)`.
    """
    network = Network(num_nodes=validators, simenv=True)
    for node in network.nodes[validators - late:]:
        node.isactive = False
    network.connect_nodes()
    network.normvariate_base_latencies()
    network.start()
    network.run(delay * (validators - late))
    for node in network.nodes[validators - late:]:
        node.isactive = True
    network.connect_nodes()
    network.normvariate_base_latencies()
    network.start()
    network.run(max(10, validators * delay))

    r = network.check_consistency()

    # now majority must be at block 10
    # late-joiners may be at block 9 or even still at block 0
    assert_heightdistance(r, max_distance=10)
    assert r['heights'][10] >= (validators - late)

    # after a new block, all nodes should be up-to-date:
    chainservice = network.nodes[0].services.chainservice

    sender = chainservice.chain.coinbase
    to = 'x' * 20
    nonce = chainservice.chain.head.get_nonce(sender)
    gas = 21000
    gasprice = 1
    value = 1
    assert chainservice.chain.head.get_balance(sender) > gas * gasprice + value
    tx = Transaction(nonce, gasprice, gas, to, value, data='')
    network.nodes[0].services.accounts.sign_tx(sender, tx)
    assert tx.sender == sender

    success = chainservice.add_transaction(tx)
    assert success

    # run in ever longer bursts until we're at height 11
    for i in range(1, 10):
        network.connect_nodes()
        network.normvariate_base_latencies()
        network.start()
        network.run(2 * i)
        r = network.check_consistency()
        if r['heights'][11] == validators:
            break

    assert_heightdistance(r)
    assert r['heights'][11] == validators

Example 21

Project: concierge
Source File: test_endpoints_app.py
View license
@pytest.mark.parametrize(
    "no_templater", (
        True, False))
@pytest.mark.parametrize(
    "boring_syntax", (
        True, False))
@pytest.mark.parametrize(
    "add_header", (
        True, False))
def test_get_new_config(monkeypatch, cliargs_default, template_render,
                        mock_get_content, no_templater, boring_syntax,
                        add_header):
    template_render.side_effect = lambda param: param.upper()
    mock_get_content.return_value = """\
Compression yes

Host q
    HostName e

    Host b
        HostName lalala
    """

    app = get_app()
    app.no_templater = no_templater
    app.boring_syntax = boring_syntax
    app.add_header = add_header

    if not no_templater and not boring_syntax:
        with pytest.raises(Exception):
            app.get_new_config()
    else:
        result = app.get_new_config()

        if not no_templater:
            assert "COMPRESSION YES" in result
        else:
            assert "Compression yes" in result

        if boring_syntax:
            assert "Host qb" not in result
        else:
            assert "Host qb" in result

        if add_header:
            assert result.startswith("#")
        else:
            assert not result.startswith("#")

Example 22

View license
@pytest.mark.parametrize('convert_titles', [True, False])
@pytest.mark.parametrize('use_schema', [True, False])
@pytest.mark.parametrize('root_id,root_id_kwargs', ROOT_ID_PARAMS)
@pytest.mark.parametrize('comment,input_list,expected_output_list', testdata)
def test_unflatten(convert_titles, use_schema, root_id, root_id_kwargs, input_list, expected_output_list, recwarn, comment):
    extra_kwargs = {'convert_titles': convert_titles}
    extra_kwargs.update(root_id_kwargs)
    spreadsheet_input = ListInput(
        sheets={
            'custom_main': [
                inject_root_id(root_id, input_row) for input_row in input_list
            ]
        },
        **extra_kwargs)
    spreadsheet_input.read_sheets()

    parser = SchemaParser(
        root_schema_dict=create_schema(root_id) if use_schema else {"properties": {}},
        root_id=root_id,
        rollup=True
    )
    parser.parse()
    spreadsheet_input.parser = parser

    expected_output_list = [
        inject_root_id(root_id, expected_output_dict) for expected_output_dict in expected_output_list
    ]
    if expected_output_list == [{}]:
        # We don't expect an empty dictionary
        expected_output_list = []
    assert list(spreadsheet_input.unflatten()) == expected_output_list
    # We expect no warnings
    if not convert_titles: # TODO what are the warnings here
        assert recwarn.list == []

Example 23

View license
@pytest.mark.parametrize("enabled", [["node-enabled-1", "node-enabled-2"],
                                     ["node-enabled-1"]])
@pytest.mark.parametrize("disabled", [["node-disabled-1", "node-disabled-2"],
                                      ["node-disabled-1"]])
@pytest.mark.parametrize("node_fqdn", ["node-disabled-1", "node-enabled-1"])
@pytest.mark.parametrize("nodes_in_error_state", [True, False])
@pytest.mark.parametrize("fuel_version", ["7.0", "8.0"])
@pytest.mark.parametrize("instances", [["instance_1", "instance_2"]])
def test_evacuate_host(mocker, enabled, disabled, node_fqdn,
                       nodes_in_error_state, fuel_version, instances):
    env = mock.Mock()
    controller = mock.Mock()
    node = mock.Mock()
    node.env = env
    node.env.data = {"fuel_version": fuel_version}

    mock_get_compute_list = mocker.patch("octane.util.nova.get_compute_lists",
                                         return_value=(enabled, disabled))

    mock_get_one_controller = mocker.patch(
        "octane.util.env.get_one_controller", return_value=controller)

    run_nova_cmd = mocker.patch("octane.util.nova.run_nova_cmd")
    get_node_fqdn_mock = mocker.patch("octane.util.node.get_nova_node_handle",
                                      return_value=node_fqdn)
    mock_is_nova_state = mocker.patch(
        "octane.util.nova.do_nova_instances_exist",
        return_value=nodes_in_error_state)

    get_instances_mock = mocker.patch(
        "octane.util.nova.get_active_instances", return_value=instances)

    mock_waiting = mocker.patch(
        "octane.util.nova.waiting_for_status_completed")

    handler = compute.ComputeUpgrade(node, env, False, False)
    if [node_fqdn] == enabled:
        with pytest.raises(Exception):
            handler.evacuate_host()
        error = True
    elif nodes_in_error_state:
        with pytest.raises(Exception):
            handler.evacuate_host()
        error = True
    else:
        handler.evacuate_host()
        error = False
    nova_calls = []
    if node_fqdn not in disabled:
        nova_calls.append(mock.call(
            ["nova", "service-disable", node_fqdn, "nova-compute"],
            controller, False))
    for instance in instances:
        nova_calls.append(mock.call(
            ["nova", "live-migration", instance], controller, False))
    if error:
        assert not run_nova_cmd.called
        assert not mock_waiting.called
        assert not get_instances_mock.called
    else:
        assert run_nova_cmd.call_args_list == nova_calls
        get_instances_mock.assert_called_once_with(controller, node_fqdn)
        waiting_calls = [mock.call(controller, node_fqdn, "MIGRATING")
                         for i in instances]
        assert waiting_calls == mock_waiting.call_args_list
    if [node_fqdn] == enabled:
        assert not mock_is_nova_state.called
    else:
        if error:
            mock_is_nova_state.assert_called_once_with(
                controller, node_fqdn, "ERROR")
        else:
            assert [
                mock.call(controller, node_fqdn, "ERROR"),
                mock.call(controller, node_fqdn),
            ] == mock_is_nova_state.call_args_list
    get_node_fqdn_mock.assert_called_once_with(node)
    mock_get_compute_list.assert_called_once_with(controller)
    mock_get_one_controller.assert_called_once_with(env)

Example 24

Project: fuel-octane
Source File: test_upgrade_ceph.py
View license
@pytest.mark.parametrize("fsid", ["fsid_value"])
@pytest.mark.parametrize("conf_file", ["/conf/file/path"])
@pytest.mark.parametrize("edit_conf,expected_conf", [(
    [
        "[global]\n",
        "fsid = 2f496dc5-f9df-4c03-9dd6-f7dd5997bd4b\n",
        "mon_initial_members = node-1 node-3 node-2\n",
        "mon_host = 10.21.7.3 10.21.7.5 10.21.7.4\n",
        "auth_cluster_required = cephx\n",
        "auth_service_required = cephx\n",
        "auth_client_required = cephx\n",
        "filestore_xattr_use_omap = true\n",
        "log_to_syslog_level = info\n",
        "log_to_syslog = True\n",
        "osd_pool_default_size = 2\n",
        "osd_pool_default_min_size = 1\n",
        "log_file = /var/log/ceph/radosgw.log\n",
        "osd_pool_default_pg_num = 128\n",
        "public_network = 10.21.7.0/24\n",
        "log_to_syslog_facility = LOG_LOCAL0\n",
        "osd_journal_size = 2048\n",
        "auth_supported = cephx\n",
        "osd_pool_default_pgp_num = 128\n",
        "osd_mkfs_type = xfs\n",
        "cluster_network = 10.21.9.0/24\n",
        "osd_recovery_max_active = 1\n",
        "osd_max_backfills = 1\n",
        "\n",
        "\n",
        "[client]\n",
        "rbd cache writethrough until flush = True\n",
        "rbd cache = True\n",
        "rbd_cache_writethrough_until_flush = True\n",
        "rbd_cache = True",
    ],
    [

        "[global]\n",
        "fsid = {fsid_value}\n",
        "mon_initial_members = node-1 node-3 node-2\n",
        "mon_host = 10.21.7.3 10.21.7.5 10.21.7.4\n",
        "auth_cluster_required = cephx\n",
        "auth_service_required = cephx\n",
        "auth_client_required = cephx\n",
        "filestore_xattr_use_omap = true\n",
        "log_to_syslog_level = info\n",
        "log_to_syslog = True\n",
        "osd_pool_default_size = 2\n",
        "osd_pool_default_min_size = 1\n",
        "log_file = /var/log/ceph/radosgw.log\n",
        "osd_pool_default_pg_num = 128\n",
        "public_network = 10.21.7.0/24\n",
        "log_to_syslog_facility = LOG_LOCAL0\n",
        "osd_journal_size = 2048\n",
        "auth_supported = cephx\n",
        "osd_pool_default_pgp_num = 128\n",
        "osd_mkfs_type = xfs\n",
        "cluster_network = 10.21.9.0/24\n",
        "osd_recovery_max_active = 1\n",
        "osd_max_backfills = 1\n",
        "\n",
        "\n",
        "[client]\n",
        "rbd cache writethrough until flush = True\n",
        "rbd cache = True\n",
        "rbd_cache_writethrough_until_flush = True\n",
        "rbd_cache = True",
    ]
)])
def test_change_fsid(mocker, node, fsid, edit_conf, expected_conf, conf_file):
    sftp_mock = mocker.patch("octane.util.ssh.sftp")
    new_mock = mock.Mock()
    update_file_mock = mocker.patch("octane.util.ssh.update_file")
    update_file_mock.return_value.__enter__.return_value = (
        edit_conf, new_mock)
    upgrade_ceph.change_fsid(conf_file, node, fsid)
    write_calls = [mock.call(l.format(fsid_value=fsid)) for l in expected_conf]
    assert write_calls == new_mock.write.call_args_list
    sftp_mock.assert_called_once_with(node)
    update_file_mock.assert_called_once_with(sftp_mock.return_value, conf_file)

Example 25

Project: vdirsyncer
Source File: test_sync.py
View license
@pytest.mark.parametrize('partial_sync', ['error', 'ignore', 'revert', None])
def test_partial_sync(tmpdir, runner, partial_sync):
    runner.write_with_general(dedent('''
    [pair foobar]
    a = "foo"
    b = "bar"
    collections = null
    {partial_sync}

    [storage foo]
    type = "filesystem"
    fileext = ".txt"
    path = "{base}/foo"

    [storage bar]
    type = "filesystem"
    read_only = true
    fileext = ".txt"
    path = "{base}/bar"
    '''.format(
        partial_sync=('partial_sync = {}\n'.format(partial_sync)
                      if partial_sync else ''),
        base=str(tmpdir)
    )))

    foo = tmpdir.mkdir('foo')
    bar = tmpdir.mkdir('bar')

    foo.join('other.txt').write('UID:other')
    bar.join('other.txt').write('UID:other')

    baritem = bar.join('lol.txt')
    baritem.write('UID:lol')

    r = runner.invoke(['discover'])
    assert not r.exception

    r = runner.invoke(['sync'])
    assert not r.exception

    fooitem = foo.join('lol.txt')
    fooitem.remove()

    r = runner.invoke(['sync'])

    if partial_sync == 'error':
        assert r.exception
        assert 'Attempted change' in r.output
    elif partial_sync == 'ignore':
        assert baritem.exists()
        r = runner.invoke(['sync'])
        assert not r.exception
        assert baritem.exists()
    else:
        assert baritem.exists()
        r = runner.invoke(['sync'])
        assert not r.exception
        assert baritem.exists()
        assert fooitem.exists()

Example 26

Project: ethereum-datetime
Source File: test_minute.py
View license
@pytest.mark.parametrize(
    'timestamp,minute',
    (
        (63071999, 59),
        (63072000, 0),
        (63072059, 0),
        (63072060, 1),
        (63072119, 1),
        (63072120, 2),
        (63072179, 2),
        (63072180, 3),
        (63072239, 3),
        (63072240, 4),
        (63072299, 4),
        (63072300, 5),
        (63072359, 5),
        (63072360, 6),
        (63072419, 6),
        (63072420, 7),
        (63072479, 7),
        (63072480, 8),
        (63072539, 8),
        (63072540, 9),
        (63072599, 9),
        (63072600, 10),
        (63072659, 10),
        (63072660, 11),
        (63072719, 11),
        (63072720, 12),
        (63072779, 12),
        (63072780, 13),
        (63072839, 13),
        (63072840, 14),
        (63072899, 14),
        (63072900, 15),
        (63072959, 15),
        (63072960, 16),
        (63073019, 16),
        (63073020, 17),
        (63073079, 17),
        (63073080, 18),
        (63073139, 18),
        (63073140, 19),
        (63073199, 19),
        (63073200, 20),
        (63073259, 20),
        (63073260, 21),
        (63073319, 21),
        (63073320, 22),
        (63073379, 22),
        (63073380, 23),
        (63073439, 23),
        (63073440, 24),
        (63073499, 24),
        (63073500, 25),
        (63073559, 25),
        (63073560, 26),
        (63073619, 26),
        (63073620, 27),
        (63073679, 27),
        (63073680, 28),
        (63073739, 28),
        (63073740, 29),
        (63073799, 29),
        (63073800, 30),
        (63073859, 30),
        (63073860, 31),
        (63073919, 31),
        (63073920, 32),
        (63073979, 32),
        (63073980, 33),
        (63074039, 33),
        (63074040, 34),
        (63074099, 34),
        (63074100, 35),
        (63074159, 35),
        (63074160, 36),
        (63074219, 36),
        (63074220, 37),
        (63074279, 37),
        (63074280, 38),
        (63074339, 38),
        (63074340, 39),
        (63074399, 39),
        (63074400, 40),
        (63074459, 40),
        (63074460, 41),
        (63074519, 41),
        (63074520, 42),
        (63074579, 42),
        (63074580, 43),
        (63074639, 43),
        (63074640, 44),
        (63074699, 44),
        (63074700, 45),
        (63074759, 45),
        (63074760, 46),
        (63074819, 46),
        (63074820, 47),
        (63074879, 47),
        (63074880, 48),
        (63074939, 48),
        (63074940, 49),
        (63074999, 49),
        (63075000, 50),
        (63075059, 50),
        (63075060, 51),
        (63075119, 51),
        (63075120, 52),
        (63075179, 52),
        (63075180, 53),
        (63075239, 53),
        (63075240, 54),
        (63075299, 54),
        (63075300, 55),
        (63075359, 55),
        (63075360, 56),
        (63075419, 56),
        (63075420, 57),
        (63075479, 57),
        (63075480, 58),
        (63075539, 58),
        (63075540, 59),
        (63075599, 59),
        (63075600, 0),
    ),
)
def test_get_minute_from_timestamp(deployed_contracts, timestamp, minute):
    crontab = deployed_contracts.DateTime
    assert crontab.getMinute(timestamp) == minute

Example 27

Project: ethereum-datetime
Source File: test_second.py
View license
@pytest.mark.parametrize(
    'timestamp,second',
    (
        (63071999, 59),
        (63072000, 0),
        (63072001, 1),
        (63072002, 2),
        (63072003, 3),
        (63072004, 4),
        (63072005, 5),
        (63072006, 6),
        (63072007, 7),
        (63072008, 8),
        (63072009, 9),
        (63072010, 10),
        (63072011, 11),
        (63072012, 12),
        (63072013, 13),
        (63072014, 14),
        (63072015, 15),
        (63072016, 16),
        (63072017, 17),
        (63072018, 18),
        (63072019, 19),
        (63072020, 20),
        (63072021, 21),
        (63072022, 22),
        (63072023, 23),
        (63072024, 24),
        (63072025, 25),
        (63072026, 26),
        (63072027, 27),
        (63072028, 28),
        (63072029, 29),
        (63072030, 30),
        (63072031, 31),
        (63072032, 32),
        (63072033, 33),
        (63072034, 34),
        (63072035, 35),
        (63072036, 36),
        (63072037, 37),
        (63072038, 38),
        (63072039, 39),
        (63072040, 40),
        (63072041, 41),
        (63072042, 42),
        (63072043, 43),
        (63072044, 44),
        (63072045, 45),
        (63072046, 46),
        (63072047, 47),
        (63072048, 48),
        (63072049, 49),
        (63072050, 50),
        (63072051, 51),
        (63072052, 52),
        (63072053, 53),
        (63072054, 54),
        (63072055, 55),
        (63072056, 56),
        (63072057, 57),
        (63072058, 58),
        (63072059, 59),
        (63072060, 0),
    ),
)
def test_get_second_from_timestamp(deployed_contracts, timestamp, second):
    crontab = deployed_contracts.DateTime
    assert crontab.getSecond(timestamp) == second

Example 28

Project: ethereum-datetime
Source File: test_year.py
View license
@pytest.mark.parametrize(
    'timestamp,year',
    (
        (0, 1970),
        (31536000, 1971),
        (31535999, 1970),
        (63072000, 1972),
        (63071999, 1971),
        (94694400, 1973),
        (94694399, 1972),
        (126230400, 1974),
        (126230399, 1973),
        (157766400, 1975),
        (157766399, 1974),
        (189302400, 1976),
        (189302399, 1975),
        (220924800, 1977),
        (220924799, 1976),
        (252460800, 1978),
        (252460799, 1977),
        (283996800, 1979),
        (283996799, 1978),
        (315532800, 1980),
        (315532799, 1979),
        (347155200, 1981),
        (347155199, 1980),
        (378691200, 1982),
        (378691199, 1981),
        (410227200, 1983),
        (410227199, 1982),
        (441763200, 1984),
        (441763199, 1983),
        (473385600, 1985),
        (473385599, 1984),
        (504921600, 1986),
        (504921599, 1985),
        (536457600, 1987),
        (536457599, 1986),
        (567993600, 1988),
        (567993599, 1987),
        (599616000, 1989),
        (599615999, 1988),
        (631152000, 1990),
        (631151999, 1989),
        (662688000, 1991),
        (662687999, 1990),
        (694224000, 1992),
        (694223999, 1991),
        (725846400, 1993),
        (725846399, 1992),
        (757382400, 1994),
        (757382399, 1993),
        (788918400, 1995),
(788918399, 1994),
(820454400, 1996),
(820454399, 1995),
(852076800, 1997),
(852076799, 1996),
(883612800, 1998),
(883612799, 1997),
(915148800, 1999),
(915148799, 1998),
(946684800, 2000),
(946684799, 1999),
(978307200, 2001),
(978307199, 2000),
(1009843200, 2002),
(1009843199, 2001),
(1041379200, 2003),
(1041379199, 2002),
(1072915200, 2004),
(1072915199, 2003),
(1104537600, 2005),
(1104537599, 2004),
(1136073600, 2006),
(1136073599, 2005),
(1167609600, 2007),
(1167609599, 2006),
(1199145600, 2008),
(1199145599, 2007),
(1230768000, 2009),
(1230767999, 2008),
(1262304000, 2010),
(1262303999, 2009),
(1293840000, 2011),
(1293839999, 2010),
(1325376000, 2012),
(1325375999, 2011),
(1356998400, 2013),
(1356998399, 2012),
(1388534400, 2014),
(1388534399, 2013),
(1420070400, 2015),
(1420070399, 2014),
(1451606400, 2016),
(1451606399, 2015),
(1483228800, 2017),
(1483228799, 2016),
(1514764800, 2018),
(1514764799, 2017),
(1546300800, 2019),
(1546300799, 2018),
(1577836800, 2020),
(1577836799, 2019),
(1609459200, 2021),
(1609459199, 2020),
(1640995200, 2022),
(1640995199, 2021),
(1672531200, 2023),
(1672531199, 2022),
(1704067200, 2024),
(1704067199, 2023),
(1735689600, 2025),
(1735689599, 2024),
(1767225600, 2026),
(1767225599, 2025),
(1798761600, 2027),
(1798761599, 2026),
(1830297600, 2028),
(1830297599, 2027),
(1861920000, 2029),
(1861919999, 2028),
(1893456000, 2030),
(1893455999, 2029),
(1924992000, 2031),
(1924991999, 2030),
(1956528000, 2032),
(1956527999, 2031),
(1988150400, 2033),
(1988150399, 2032),
(2019686400, 2034),
(2019686399, 2033),
(2051222400, 2035),
(2051222399, 2034),
(2082758400, 2036),
(2082758399, 2035),
(2114380800, 2037),
(2114380799, 2036),
(2145916800, 2038),
(2145916799, 2037),
(2177452800, 2039),
(2177452799, 2038),
(2208988800, 2040),
(2208988799, 2039),
(2240611200, 2041),
(2240611199, 2040),
(2272147200, 2042),
(2272147199, 2041),
(2303683200, 2043),
(2303683199, 2042),
(2335219200, 2044),
(2335219199, 2043),
(2366841600, 2045),
(2366841599, 2044),
(2398377600, 2046),
(2398377599, 2045),
(2429913600, 2047),
(2429913599, 2046),
(2461449600, 2048),
(2461449599, 2047),
(2493072000, 2049),
(2493071999, 2048),
(2524608000, 2050),
(2524607999, 2049),
(2556144000, 2051),
(2556143999, 2050),
(2587680000, 2052),
(2587679999, 2051),
(2619302400, 2053),
(2619302399, 2052),
(2650838400, 2054),
(2650838399, 2053),
(2682374400, 2055),
(2682374399, 2054),
(2713910400, 2056),
(2713910399, 2055),
(2745532800, 2057),
(2745532799, 2056),
(2777068800, 2058),
(2777068799, 2057),
(2808604800, 2059),
(2808604799, 2058),
(2840140800, 2060),
(2840140799, 2059),
(2871763200, 2061),
(2871763199, 2060),
(2903299200, 2062),
(2903299199, 2061),
(2934835200, 2063),
(2934835199, 2062),
(2966371200, 2064),
(2966371199, 2063),
(2997993600, 2065),
(2997993599, 2064),
(3029529600, 2066),
(3029529599, 2065),
(3061065600, 2067),
(3061065599, 2066),
(3092601600, 2068),
(3092601599, 2067),
(3124224000, 2069),
(3124223999, 2068),
(3155760000, 2070),
(3155759999, 2069),
(3187296000, 2071),
(3187295999, 2070),
(3218832000, 2072),
(3218831999, 2071),
(3250454400, 2073),
(3250454399, 2072),
(3281990400, 2074),
(3281990399, 2073),
(3313526400, 2075),
(3313526399, 2074),
(3345062400, 2076),
(3345062399, 2075),
(3376684800, 2077),
(3376684799, 2076),
(3408220800, 2078),
(3408220799, 2077),
(3439756800, 2079),
(3439756799, 2078),
(3471292800, 2080),
(3471292799, 2079),
(3502915200, 2081),
(3502915199, 2080),
(3534451200, 2082),
(3534451199, 2081),
(3565987200, 2083),
(3565987199, 2082),
(3597523200, 2084),
(3597523199, 2083),
(3629145600, 2085),
(3629145599, 2084),
(3660681600, 2086),
(3660681599, 2085),
(3692217600, 2087),
(3692217599, 2086),
(3723753600, 2088),
(3723753599, 2087),
(3755376000, 2089),
(3755375999, 2088),
(3786912000, 2090),
(3786911999, 2089),
(3818448000, 2091),
(3818447999, 2090),
(3849984000, 2092),
(3849983999, 2091),
(3881606400, 2093),
(3881606399, 2092),
(3913142400, 2094),
(3913142399, 2093),
(3944678400, 2095),
(3944678399, 2094),
(3976214400, 2096),
(3976214399, 2095),
(4007836800, 2097),
(4007836799, 2096),
(4039372800, 2098),
(4039372799, 2097),
(4070908800, 2099),
(4070908799, 2098),
(4102444800, 2100),
(4102444799, 2099),
(4133980800, 2101),
(4133980799, 2100),
(4165516800, 2102),
(4165516799, 2101),
(4197052800, 2103),
(4197052799, 2102),
(4228588800, 2104),
(4228588799, 2103),
(4260211200, 2105),
(4260211199, 2104),
(4291747200, 2106),
(4291747199, 2105),
(4323283200, 2107),
(4323283199, 2106),
(4354819200, 2108),
(4354819199, 2107),
(4386441600, 2109),
(4386441599, 2108),
(4417977600, 2110),
(4417977599, 2109),
(4449513600, 2111),
(4449513599, 2110),
(4481049600, 2112),
(4481049599, 2111),
(4512672000, 2113),
(4512671999, 2112),
(4544208000, 2114),
(4544207999, 2113),
(4575744000, 2115),
(4575743999, 2114),
(4607280000, 2116),
(4607279999, 2115),
(4638902400, 2117),
(4638902399, 2116),
(4670438400, 2118),
(4670438399, 2117),
(4701974400, 2119),
(4701974399, 2118),
(4733510400, 2120),
(4733510399, 2119),
(4765132800, 2121),
(4765132799, 2120),
(4796668800, 2122),
(4796668799, 2121),
(4828204800, 2123),
(4828204799, 2122),
(4859740800, 2124),
(4859740799, 2123),
(4891363200, 2125),
(4891363199, 2124),
(4922899200, 2126),
(4922899199, 2125),
(4954435200, 2127),
(4954435199, 2126),
(4985971200, 2128),
(4985971199, 2127),
(5017593600, 2129),
(5017593599, 2128),
(5049129600, 2130),
(5049129599, 2129),
(5080665600, 2131),
(5080665599, 2130),
(5112201600, 2132),
(5112201599, 2131),
(5143824000, 2133),
(5143823999, 2132),
(5175360000, 2134),
(5175359999, 2133),
(5206896000, 2135),
(5206895999, 2134),
(5238432000, 2136),
(5238431999, 2135),
(5270054400, 2137),
(5270054399, 2136),
(5301590400, 2138),
(5301590399, 2137),
(5333126400, 2139),
(5333126399, 2138),
(5364662400, 2140),
(5364662399, 2139),
(5396284800, 2141),
(5396284799, 2140),
(5427820800, 2142),
(5427820799, 2141),
(5459356800, 2143),
(5459356799, 2142),
(5490892800, 2144),
(5490892799, 2143),
(5522515200, 2145),
(5522515199, 2144),
(5554051200, 2146),
(5554051199, 2145),
(5585587200, 2147),
(5585587199, 2146),
(5617123200, 2148),
(5617123199, 2147),
(5648745600, 2149),
(5648745599, 2148),
(5680281600, 2150),
(5680281599, 2149),
(5711817600, 2151),
(5711817599, 2150),
(5743353600, 2152),
(5743353599, 2151),
(5774976000, 2153),
(5774975999, 2152),
(5806512000, 2154),
(5806511999, 2153),
(5838048000, 2155),
(5838047999, 2154),
(5869584000, 2156),
(5869583999, 2155),
(5901206400, 2157),
(5901206399, 2156),
(5932742400, 2158),
(5932742399, 2157),
(5964278400, 2159),
(5964278399, 2158),
(5995814400, 2160),
(5995814399, 2159),
(6027436800, 2161),
(6027436799, 2160),
(6058972800, 2162),
(6058972799, 2161),
(6090508800, 2163),
(6090508799, 2162),
(6122044800, 2164),
(6122044799, 2163),
(6153667200, 2165),
(6153667199, 2164),
(6185203200, 2166),
(6185203199, 2165),
(6216739200, 2167),
(6216739199, 2166),
(6248275200, 2168),
(6248275199, 2167),
(6279897600, 2169),
(6279897599, 2168),
(6311433600, 2170),
(6311433599, 2169),
(6342969600, 2171),
(6342969599, 2170),
(6374505600, 2172),
(6374505599, 2171),
(6406128000, 2173),
(6406127999, 2172),
(6437664000, 2174),
(6437663999, 2173),
(6469200000, 2175),
(6469199999, 2174),
(6500736000, 2176),
(6500735999, 2175),
(6532358400, 2177),
(6532358399, 2176),
(6563894400, 2178),
(6563894399, 2177),
(6595430400, 2179),
(6595430399, 2178),
(6626966400, 2180),
(6626966399, 2179),
(6658588800, 2181),
(6658588799, 2180),
(6690124800, 2182),
(6690124799, 2181),
(6721660800, 2183),
(6721660799, 2182),
(6753196800, 2184),
(6753196799, 2183),
(6784819200, 2185),
(6784819199, 2184),
(6816355200, 2186),
(6816355199, 2185),
(6847891200, 2187),
(6847891199, 2186),
(6879427200, 2188),
(6879427199, 2187),
(6911049600, 2189),
(6911049599, 2188),
(6942585600, 2190),
(6942585599, 2189),
(6974121600, 2191),
(6974121599, 2190),
(7005657600, 2192),
(7005657599, 2191),
(7037280000, 2193),
(7037279999, 2192),
(7068816000, 2194),
(7068815999, 2193),
(7100352000, 2195),
(7100351999, 2194),
(7131888000, 2196),
(7131887999, 2195),
(7163510400, 2197),
(7163510399, 2196),
(7195046400, 2198),
(7195046399, 2197),
(7226582400, 2199),
(7226582399, 2198),
(7258118400, 2200),
(7258118399, 2199),
(7289654400, 2201),
(7289654399, 2200),
(7321190400, 2202),
(7321190399, 2201),
(7352726400, 2203),
(7352726399, 2202),
(7384262400, 2204),
(7384262399, 2203),
(7415884800, 2205),
(7415884799, 2204),
(7447420800, 2206),
(7447420799, 2205),
(7478956800, 2207),
(7478956799, 2206),
(7510492800, 2208),
(7510492799, 2207),
(7542115200, 2209),
(7542115199, 2208),
(7573651200, 2210),
(7573651199, 2209),
(7605187200, 2211),
(7605187199, 2210),
(7636723200, 2212),
(7636723199, 2211),
(7668345600, 2213),
(7668345599, 2212),
(7699881600, 2214),
(7699881599, 2213),
(7731417600, 2215),
(7731417599, 2214),
(7762953600, 2216),
(7762953599, 2215),
(7794576000, 2217),
(7794575999, 2216),
(7826112000, 2218),
(7826111999, 2217),
(7857648000, 2219),
(7857647999, 2218),
(7889184000, 2220),
(7889183999, 2219),
(7920806400, 2221),
(7920806399, 2220),
(7952342400, 2222),
(7952342399, 2221),
(7983878400, 2223),
(7983878399, 2222),
(8015414400, 2224),
(8015414399, 2223),
(8047036800, 2225),
(8047036799, 2224),
(8078572800, 2226),
(8078572799, 2225),
(8110108800, 2227),
(8110108799, 2226),
(8141644800, 2228),
(8141644799, 2227),
(8173267200, 2229),
(8173267199, 2228),
(8204803200, 2230),
(8204803199, 2229),
(8236339200, 2231),
(8236339199, 2230),
(8267875200, 2232),
(8267875199, 2231),
(8299497600, 2233),
(8299497599, 2232),
(8331033600, 2234),
(8331033599, 2233),
(8362569600, 2235),
(8362569599, 2234),
(8394105600, 2236),
(8394105599, 2235),
(8425728000, 2237),
(8425727999, 2236),
(8457264000, 2238),
(8457263999, 2237),
(8488800000, 2239),
(8488799999, 2238),
(8520336000, 2240),
(8520335999, 2239),
(8551958400, 2241),
(8551958399, 2240),
(8583494400, 2242),
(8583494399, 2241),
(8615030400, 2243),
(8615030399, 2242),
(8646566400, 2244),
(8646566399, 2243),
(8678188800, 2245),
(8678188799, 2244),
(8709724800, 2246),
(8709724799, 2245),
(8741260800, 2247),
(8741260799, 2246),
(8772796800, 2248),
(8772796799, 2247),
(8804419200, 2249),
(8804419199, 2248),
(8835955200, 2250),
(8835955199, 2249),
(8867491200, 2251),
(8867491199, 2250),
(8899027200, 2252),
(8899027199, 2251),
(8930649600, 2253),
(8930649599, 2252),
(8962185600, 2254),
(8962185599, 2253),
(8993721600, 2255),
(8993721599, 2254),
(9025257600, 2256),
(9025257599, 2255),
(9056880000, 2257),
(9056879999, 2256),
(9088416000, 2258),
(9088415999, 2257),
(9119952000, 2259),
(9119951999, 2258),
(9151488000, 2260),
(9151487999, 2259),
(9183110400, 2261),
(9183110399, 2260),
(9214646400, 2262),
(9214646399, 2261),
(9246182400, 2263),
(9246182399, 2262),
(9277718400, 2264),
(9277718399, 2263),
(9309340800, 2265),
(9309340799, 2264),
(9340876800, 2266),
(9340876799, 2265),
(9372412800, 2267),
(9372412799, 2266),
(9403948800, 2268),
(9403948799, 2267),
(9435571200, 2269),
(9435571199, 2268),
(9467107200, 2270),
(9467107199, 2269),
(9498643200, 2271),
(9498643199, 2270),
(9530179200, 2272),
(9530179199, 2271),
(9561801600, 2273),
(9561801599, 2272),
(9593337600, 2274),
(9593337599, 2273),
(9624873600, 2275),
(9624873599, 2274),
(9656409600, 2276),
(9656409599, 2275),
(9688032000, 2277),
(9688031999, 2276),
(9719568000, 2278),
(9719567999, 2277),
(9751104000, 2279),
(9751103999, 2278),
(9782640000, 2280),
(9782639999, 2279),
(9814262400, 2281),
(9814262399, 2280),
(9845798400, 2282),
(9845798399, 2281),
(9877334400, 2283),
(9877334399, 2282),
(9908870400, 2284),
(9908870399, 2283),
(9940492800, 2285),
(9940492799, 2284),
(9972028800, 2286),
(9972028799, 2285),
(10003564800, 2287),
(10003564799, 2286),
(10035100800, 2288),
(10035100799, 2287),
(10066723200, 2289),
(10066723199, 2288),
(10098259200, 2290),
(10098259199, 2289),
(10129795200, 2291),
(10129795199, 2290),
(10161331200, 2292),
(10161331199, 2291),
(10192953600, 2293),
(10192953599, 2292),
(10224489600, 2294),
(10224489599, 2293),
(10256025600, 2295),
(10256025599, 2294),
(10287561600, 2296),
(10287561599, 2295),
(10319184000, 2297),
(10319183999, 2296),
(10350720000, 2298),
(10350719999, 2297),
(10382256000, 2299),
(10382255999, 2298),
(10413792000, 2300),
(10413791999, 2299),
(10445328000, 2301),
(10445327999, 2300),
(10476864000, 2302),
(10476863999, 2301),
(10508400000, 2303),
(10508399999, 2302),
(10539936000, 2304),
(10539935999, 2303),
(10571558400, 2305),
(10571558399, 2304),
(10603094400, 2306),
(10603094399, 2305),
(10634630400, 2307),
(10634630399, 2306),
(10666166400, 2308),
(10666166399, 2307),
(10697788800, 2309),
(10697788799, 2308),
(10729324800, 2310),
(10729324799, 2309),
(10760860800, 2311),
(10760860799, 2310),
(10792396800, 2312),
(10792396799, 2311),
(10824019200, 2313),
(10824019199, 2312),
(10855555200, 2314),
(10855555199, 2313),
(10887091200, 2315),
(10887091199, 2314),
(10918627200, 2316),
(10918627199, 2315),
(10950249600, 2317),
(10950249599, 2316),
(10981785600, 2318),
(10981785599, 2317),
(11013321600, 2319),
(11013321599, 2318),
(11044857600, 2320),
(11044857599, 2319),
(11076480000, 2321),
(11076479999, 2320),
(11108016000, 2322),
(11108015999, 2321),
(11139552000, 2323),
(11139551999, 2322),
(11171088000, 2324),
(11171087999, 2323),
(11202710400, 2325),
(11202710399, 2324),
(11234246400, 2326),
(11234246399, 2325),
(11265782400, 2327),
(11265782399, 2326),
(11297318400, 2328),
(11297318399, 2327),
(11328940800, 2329),
(11328940799, 2328),
(11360476800, 2330),
(11360476799, 2329),
(11392012800, 2331),
(11392012799, 2330),
(11423548800, 2332),
(11423548799, 2331),
(11455171200, 2333),
(11455171199, 2332),
(11486707200, 2334),
(11486707199, 2333),
(11518243200, 2335),
(11518243199, 2334),
(11549779200, 2336),
(11549779199, 2335),
(11581401600, 2337),
(11581401599, 2336),
(11612937600, 2338),
(11612937599, 2337),
(11644473600, 2339),
(11644473599, 2338),
(11676009600, 2340),
(11676009599, 2339),
(11707632000, 2341),
(11707631999, 2340),
(11739168000, 2342),
(11739167999, 2341),
(11770704000, 2343),
(11770703999, 2342),
(11802240000, 2344),
(11802239999, 2343),
(11833862400, 2345),
(11833862399, 2344),
(11865398400, 2346),
(11865398399, 2345),
(11896934400, 2347),
(11896934399, 2346),
(11928470400, 2348),
(11928470399, 2347),
(11960092800, 2349),
(11960092799, 2348),
(11991628800, 2350),
(11991628799, 2349),
(12023164800, 2351),
(12023164799, 2350),
(12054700800, 2352),
(12054700799, 2351),
(12086323200, 2353),
(12086323199, 2352),
(12117859200, 2354),
(12117859199, 2353),
(12149395200, 2355),
(12149395199, 2354),
(12180931200, 2356),
(12180931199, 2355),
(12212553600, 2357),
(12212553599, 2356),
(12244089600, 2358),
(12244089599, 2357),
(12275625600, 2359),
(12275625599, 2358),
(12307161600, 2360),
(12307161599, 2359),
(12338784000, 2361),
(12338783999, 2360),
(12370320000, 2362),
(12370319999, 2361),
(12401856000, 2363),
(12401855999, 2362),
(12433392000, 2364),
(12433391999, 2363),
(12465014400, 2365),
(12465014399, 2364),
(12496550400, 2366),
(12496550399, 2365),
(12528086400, 2367),
(12528086399, 2366),
(12559622400, 2368),
(12559622399, 2367),
(12591244800, 2369),
(12591244799, 2368),
(12622780800, 2370),
(12622780799, 2369),
(12654316800, 2371),
(12654316799, 2370),
(12685852800, 2372),
(12685852799, 2371),
(12717475200, 2373),
(12717475199, 2372),
(12749011200, 2374),
(12749011199, 2373),
(12780547200, 2375),
(12780547199, 2374),
(12812083200, 2376),
(12812083199, 2375),
(12843705600, 2377),
(12843705599, 2376),
(12875241600, 2378),
(12875241599, 2377),
(12906777600, 2379),
(12906777599, 2378),
(12938313600, 2380),
(12938313599, 2379),
(12969936000, 2381),
(12969935999, 2380),
(13001472000, 2382),
(13001471999, 2381),
(13033008000, 2383),
(13033007999, 2382),
(13064544000, 2384),
(13064543999, 2383),
(13096166400, 2385),
(13096166399, 2384),
(13127702400, 2386),
(13127702399, 2385),
(13159238400, 2387),
(13159238399, 2386),
(13190774400, 2388),
(13190774399, 2387),
(13222396800, 2389),
(13222396799, 2388),
(13253932800, 2390),
(13253932799, 2389),
(13285468800, 2391),
(13285468799, 2390),
(13317004800, 2392),
(13317004799, 2391),
(13348627200, 2393),
(13348627199, 2392),
(13380163200, 2394),
(13380163199, 2393),
(13411699200, 2395),
(13411699199, 2394),
(13443235200, 2396),
(13443235199, 2395),
(13474857600, 2397),
(13474857599, 2396),
(13506393600, 2398),
(13506393599, 2397),
(13537929600, 2399),
(13537929599, 2398),
)
)
def test_get_year_from_timestamp(deployed_contracts, timestamp, year):
    crontab = deployed_contracts.DateTime
    assert crontab.getYear(timestamp) == year

Example 29

View license
@pytest.mark.parametrize(
    'ids_to_remove,expected_states',
    (
        # id, value, parent, left, right, height
        # Leaf Nodes
        (
            ['n'], {
                ('t', 1, 'g', None, 'e', 2),
            }
        ),
        (
            ['l'], {
                ('f', 12, 'd', None, None, 1),
            }
        ),
        (
            ['q'], {
                ('k', 19, 'm', None, None, 1),
            }
        ),
        (
            ['b'], {
                ('j', 5, 'g', 'o', None, 2),
            }
        ),
        # One from bottom.
        (
            ['t'], {
                ('n', 0, 'g', None, 'e', 2),
                ('e', 2, 'n', None, None, 1),
                ('g', 3, 'c', 'n', 'j', 3),
            }
        ),
        (
            ['a'], {
                ('r', 9, 'd', None, None, 1),
                ('d', 10, 'i', 'r', 'f', 3),
            }
        ),
        (
            ['a', 'r'], {
                ('l', 11, 'i', 'd', 'f', 2),
                ('d', 10, 'l', None, None, 1),
                ('f', 12, 'l', None, None, 1),
                ('i', 13, 'c', 'l', 'm', 4),
            }
        ),
        (
            ['f'], {
                ('l', 11, 'd', None, None, 1),
                ('d', 10, 'i', 'a', 'l', 3),
            }
        ),
        (
            ['f', 'l'], {
                ('r', 9, 'i', 'a', 'd', 2),
                ('a', 8, 'r', None, None, 1),
                ('d', 10, 'r', None, None, 1),
                ('i', 13, 'c', 'r', 'm', 4),
            }
        ),
        # Mid tree
        (
            ['g'], {
                ('e', 2, 'c', 't', 'j', 3),
                ('t', 1, 'e', 'n', None, 2),
                ('j', 5, 'e', 'o', 'b', 2),
                ('c', 7, None, 'e', 'i', 5),
            }
        ),
        (
            ['i'], {
                ('f', 12, 'c', 'd', 'm', 4),
                ('d', 10, 'f', 'a', 'l', 3),
                ('l', 11, 'd', None, None, 1),
                ('m', 17, 'f', 'h', 'k', 3),
                ('c', 7, None, 'g', 'f', 5),
            }
        ),
        # Root
        (
            ['c'], {
                ('b', 6, None, 'g', 'i', 5),
                ('j', 5, 'g', 'o', None, 2),
                ('g', 3, 'b', 't', 'j', 3),
                ('i', 13, 'b', 'd', 'm', 4),
            }
        ),
    )
#tree_nodes = (
#    ('a', 8),
#    ('b', 6),
#    ('c', 7),
#    ('d', 10),
#    ('e', 2),
#    ('f', 12),
#    ('g', 3),
#    ('h', 15),
#    ('i', 13),
#    ('j', 5),
#    ('k', 19),
#    ('l', 11),
#    ('m', 17),
#    ('n', 0),
#    ('o', 4),
#    ('p', 14),
#    ('q', 18),
#    ('r', 9),
#    ('s', 16),
#    ('t', 1),
#)
)
def test_removing_nodes(deploy_coinbase, deployed_contracts, ids_to_remove, expected_states):
    index_name = "test-{0}".format(''.join(ids_to_remove))

    grove = deployed_contracts.Grove

    for _id, value in tree_nodes:
        grove.insert(index_name, _id, value)

    index_id = grove.computeIndexId(deploy_coinbase, index_name)

    for _id in ids_to_remove:
        node_id = grove.computeNodeId(index_id, _id)
        assert grove.exists(index_id, _id) is True
        grove.remove(index_name, _id)
        assert grove.exists(index_id, _id) is False

    actual_states = set()

    for _id, _, _, _, _, _ in expected_states:
        node_id = grove.computeNodeId(index_id, _id)
        value = grove.getNodeValue(node_id)
        parent = grove.getNodeParent(node_id)
        left = grove.getNodeLeftChild(node_id)
        right = grove.getNodeRightChild(node_id)
        height = grove.getNodeHeight(node_id)

        actual_states.add((_id, value, parent, left, right, height))

    assert expected_states == actual_states

Example 30

Project: ethereum-grove
Source File: test_querying.py
View license
@pytest.mark.parametrize(
    'operator,value,expected',
    (
        # parent, left, right, height
        ("==", 7, (7, None, 4, 16, 6)),
        ("==", 6, (6, 4, 5, 6, 2)),
        ("==", 11, (11, 11, 7, None, 2)),
        ("==", 17, (17, 16, 16, 18, 4)),
        ("==", 15, None),
        ("==", 2, None),
        ("==", 8, None),
        # LT
        ("<", 4, (3, 3, None, None, 1)),
        ("<", 5, (4, 7, 3, 6, 4)),
        ("<", 8, (7, 11, None, None, 1)),
        ("<", 14, (13, 14, None, None, 1)),
        ("<", 16, (14, 12, 13, None, 2)),
        ("<", 17, (16, 16, None, None, 1)),
        ("<", 18, (17, 17, None, None, 1)),
        ("<", -1, None),
        ("<", 0, None),
        # GT
        (">", 6, (7, None, 4, 16, 6)),
        (">", 3, (4, 7, 3, 6, 4)),
        (">", 16, (17, 16, 16, 18, 4)),
        (">", 18, None),
        (">", 19, None),
        # LTE
        ("<=", 4, (4, 7, 3, 6, 4)),
        ("<=", 5, (5, 6, None, None, 1)),
        ("<=", 8, (7, 11, None, None, 1)),
        ("<=", 7, (7, 11, None, None, 1)),
        ("<=", 6, (6, 6, None, None, 1)),
        ("<=", 16, (16, 16, None, None, 1)),
        ("<=", 15, (14, 12, 13, None, 2)),
        ("<=", 0, (0, 1, None, None, 1)),
        ("<=", 18, (18, 18, None, None, 1)),
        ("<=", 19, (18, 18, None, None, 1)),
        ("<=", -1, None),
        # GTE
        (">=", 0, (0, 1, None, None, 1)),
        (">=", 3, (3, 4, 1, 3, 3)),
        (">=", 13, (13, 14, None, None, 1)),
        (">=", 14, (14, 12, 13, None, 2)),
        (">=", 15, (16, 7, 11, 17, 5)),
        (">=", 17, (17, 16, 16, 18, 4)),
        (">=", 18, (18, 17, 17, 18, 3)),
        (">=", 19, None),
    )
)
def test_tree_querying(deploy_coinbase, big_tree, operator, value, expected):
    index_id = big_tree.computeIndexId(deploy_coinbase, "test-querying")

    def get_val(node_id):
        if node_id is None:
            return None
        return big_tree.getNodeValue(big_tree.computeNodeId(index_id, node_id))

    _id = big_tree.query(index_id, operator, value)

    if _id is None:
        assert expected is None
    else:
        node_id = big_tree.computeNodeId(index_id, _id)

        val = big_tree.getNodeValue(node_id)
        parent = get_val(big_tree.getNodeParent(node_id))
        left = get_val(big_tree.getNodeLeftChild(node_id))
        right = get_val(big_tree.getNodeRightChild(node_id))
        height = big_tree.getNodeHeight(node_id)

        actual = (val, parent, left, right, height)
        assert actual == expected

Example 31

View license
@pytest.mark.parametrize(
    'value,expected_imports,expected_value',
    (
        (
            Operation(),
            {'populus.migrations.operations'},
            "populus.migrations.operations.Operation()\n",
        ),
        (
            SendTransaction({}),
            {'populus.migrations.operations'},
            """populus.migrations.operations.SendTransaction(
    transaction={},
)\n""",
        ),
        (
            SendTransaction({
                'to': Address.defer(key='contracts/Math'),
            }, timeout=1234),
            {'populus.migrations.operations', 'populus.migrations.deferred'},
            """populus.migrations.operations.SendTransaction(
    timeout=1234,
    transaction={
        'to': populus.migrations.deferred.Address.defer(
            key='contracts/Math',
        ),
    },
)\n""",
        ),
        (
            TransactContract(
                contract_name='Math',
                method_name='increment',
                arguments=[3],
                contract_address=Address.defer(key='contracts/Math'),
                timeout=1234,
            ),
            {'populus.migrations.operations', 'populus.migrations.deferred'},
            """populus.migrations.operations.TransactContract(
    arguments=[
        3,
    ],
    contract_address=populus.migrations.deferred.Address.defer(
        key='contracts/Math',
    ),
    contract_name='Math',
    method_name='increment',
    timeout=1234,
    transaction={},
)\n""",
        ),
    ),
)
def test_serialization_of_operation_instances(value, expected_imports,
                                              expected_value):
    actual_imports, actual_value = serialize_deconstructable(value)

    assert actual_value == expected_value
    assert actual_imports == expected_imports

Example 32

Project: web3.py
Source File: test_formatter.py
View license
@pytest.mark.parametrize(
    "value,expected",
    [({
            "data": '0x34234bf23bf4234',
            "value": 100,
            "from": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "to": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "nonce": 1000,
            "gas": 1000,
            "gasPrice": 1000
        },
        {
            "data": '0x34234bf23bf4234',
            "value": hex(100),
            "from": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "to": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "nonce": hex(1000),
            "gas": hex(1000),
            "gasPrice": hex(1000),
        }
    ),({
            "data": '0x34234bf23bf4234',
            "value": 100,
            "from": '00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "to": '00c5496aee77c1ba1f0854206a26dda82a81d6d8',
        },
        {
            "data": '0x34234bf23bf4234',
            "value": hex(100),
            "from": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "to": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
        }
    ),({
            "data": '0x34234bf23bf4234',
            "value": 100,
            "from": '00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "to": '00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "gas": 1000,
            "gasPrice": 1000
        },
        {
            "data": '0x34234bf23bf4234',
            "value": hex(100),
            "from": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "to": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "gas": hex(1000),
            "gasPrice": hex(1000),
        },
    ), ({
            "data": '0x34234bf23bf4234',
            "value": 100,
            "from": 'XE7338O073KYGTWWZN0F2WZ0R8PX5ZPPZS',
            "to": 'XE7338O073KYGTWWZN0F2WZ0R8PX5ZPPZS',
            "gas": 1000,
            "gasPrice": 1000
        },
        {
            "data": '0x34234bf23bf4234',
            "value": hex(100),
            "from": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "to": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "gas": hex(1000),
            "gasPrice": hex(1000),
        },
    ), ({
            "data": '0x34234bf23bf4234',
            "value": 100,
            "from": 'XE7338O073KYGTWWZN0F2WZ0R8PX5ZPPZS',
            "gas": 1000,
            "gasPrice": 1000
        },
        {
            "data": '0x34234bf23bf4234',
            "value": hex(100),
            "from": '0x00c5496aee77c1ba1f0854206a26dda82a81d6d8',
            "gas": hex(1000),
            "gasPrice": hex(1000),
        }
    )]
)
def test_input_transaction_formatter(web3_tester, value, expected):
    assert formatters.input_transaction_formatter(web3_tester.eth, value) == expected

Example 33

Project: web3.py
Source File: test_formatter.py
View license
@pytest.mark.parametrize(
    "value,expected",
    [
        ({
                "hash": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "parentHash": '0x83ffb245cfced97ccc5c75253d6960376d6c6dea93647397a543a72fdaea5265',
                "miner": '0xdcc6960376d6c6dea93647383ffb245cfced97cf',
                "stateRoot": '0x54dda68af07643f68739a6e9612ad157a26ae7e2ce81f77842bb5835fbcde583',
                "sha3Uncles": '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
                "bloom": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "difficulty": '0x3e8',
                "totalDifficulty": '0x3e8',
                "number": '0x3e8',
                "gasLimit": '0x3e8',
                "gasUsed": '0x3e8',
                "timestamp": '0x3e8',
                "extraData": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "nonce": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "size": '0x3e8'
            }, {
                "hash": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "parentHash": '0x83ffb245cfced97ccc5c75253d6960376d6c6dea93647397a543a72fdaea5265',
                "miner": '0xdcc6960376d6c6dea93647383ffb245cfced97cf',
                "stateRoot": '0x54dda68af07643f68739a6e9612ad157a26ae7e2ce81f77842bb5835fbcde583',
                "sha3Uncles": '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
                "bloom": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "difficulty": 1000,
                "totalDifficulty": 1000,
                "number": 1000,
                "gasLimit": 1000,
                "gasUsed": 1000,
                "timestamp": 1000,
                "extraData": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "nonce": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "size": 1000
        }),


        ({
                "hash": None,
                "parentHash": '0x83ffb245cfced97ccc5c75253d6960376d6c6dea93647397a543a72fdaea5265',
                "miner": None,
                "stateRoot": '0x54dda68af07643f68739a6e9612ad157a26ae7e2ce81f77842bb5835fbcde583',
                "sha3Uncles": '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
                "bloom": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "difficulty": '0x3e8',
                "totalDifficulty": '0x3e8',
                "number": None,
                "gasLimit": '0x3e8',
                "gasUsed": '0x3e8',
                "timestamp": '0x3e8',
                "extraData": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "nonce": None,
                "size": '0x3e8'
            }, {
                "hash": None,
                "parentHash": '0x83ffb245cfced97ccc5c75253d6960376d6c6dea93647397a543a72fdaea5265',
                "miner": None,
                "stateRoot": '0x54dda68af07643f68739a6e9612ad157a26ae7e2ce81f77842bb5835fbcde583',
                "sha3Uncles": '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
                "bloom": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "difficulty": 1000,
                "totalDifficulty": 1000,
                "number": None,
                "gasLimit": 1000,
                "gasUsed": 1000,
                "timestamp": 1000,
                "extraData": '0xd6960376d6c6dea93647383ffb245cfced97ccc5c7525397a543a72fdaea5265',
                "nonce": None,
                "size": 1000
        })
    ]
)
def test_output_block_formatter(value, expected):
    assert formatters.output_block_formatter(value) == expected

Example 34

Project: ploy
Source File: test_common.py
View license
@pytest.mark.parametrize("text, keyinfo", [
    (
        textwrap.dedent("""
            ec2: -----BEGIN SSH HOST KEY FINGERPRINTS-----
            ec2: 2048 a6:7f:6a:a5:8a:7c:26:45:46:ca:d9:d9:8c:f2:64:27 /etc/ssh/ssh_host_key.pub
            ec2: 2048 b6:57:b7:52:4e:36:94:ab:9c:ec:a1:b3:56:71:80:e0 /etc/ssh/ssh_host_rsa_key.pub
            ec2: 1024 62:47:49:82:83:9a:d8:1d:b8:c6:8f:dd:4d:d8:9a:2e /etc/ssh/ssh_host_dsa_key.pub
            ec2: -----END SSH HOST KEY FINGERPRINTS-----
            """),
        [
            SSHKeyFingerprint(keylen=2048, keytype='rsa1', fingerprint='a6:7f:6a:a5:8a:7c:26:45:46:ca:d9:d9:8c:f2:64:27'),
            SSHKeyFingerprint(keylen=2048, keytype='rsa', fingerprint='b6:57:b7:52:4e:36:94:ab:9c:ec:a1:b3:56:71:80:e0'),
            SSHKeyFingerprint(keylen=1024, keytype='dsa', fingerprint='62:47:49:82:83:9a:d8:1d:b8:c6:8f:dd:4d:d8:9a:2e')]),
    (
        textwrap.dedent("""
            -----BEGIN SSH HOST KEY FINGERPRINTS-----
            2048 2e:68:49:26:49:07:67:31:f1:33:92:18:09:c3:6a:ae /etc/ssh/ssh_host_rsa_key.pub (RSA)
            1024 4b:99:0e:4a:a4:3e:b4:e5:ef:42:5e:43:07:93:91:a0 /etc/ssh/ssh_host_dsa_key.pub (DSA)
            -----END SSH HOST KEY FINGERPRINTS-----
            """),
        [
            SSHKeyFingerprint(keylen=2048, keytype='rsa', fingerprint='2e:68:49:26:49:07:67:31:f1:33:92:18:09:c3:6a:ae'),
            SSHKeyFingerprint(keylen=1024, keytype='dsa', fingerprint='4b:99:0e:4a:a4:3e:b4:e5:ef:42:5e:43:07:93:91:a0')]),
    (
        textwrap.dedent("""
            2048 MD5:cd:be:b8:a2:57:bf:71:5c:ed:14:b8:27:e8:e1:4a:a6 ~/.ssh/id_dsa.pub (DSA)
            2048 SHA256:maRuD3fpz+6JXV5RZK/g5/rToUH9XrxyKgl7yewS6ZY ~/.ssh/id_dsa.pub (DSA)
            """),
        [
            SSHKeyFingerprint(keylen=2048, keytype='dsa', fingerprint='cd:be:b8:a2:57:bf:71:5c:ed:14:b8:27:e8:e1:4a:a6'),
            SSHKeyFingerprint(keylen=2048, keytype='dsa', fingerprint='SHA256:maRuD3fpz+6JXV5RZK/g5/rToUH9XrxyKgl7yewS6ZY')]),
    (
        textwrap.dedent("""
            ec2: #############################################################
            ec2: -----BEGIN SSH HOST KEY FINGERPRINTS-----
            ec2: 1024 7b:0d:a3:0d:9e:fc:f3:97:bb:a8:d2:1d:05:3f:d5:f9  [email protected] (DSA)
            ec2: 256 96:c6:3c:47:7b:11:eb:8a:ca:78:ed:20:d6:21:f2:b7  [email protected] (ECDSA)
            ec2: 256 56:0f:1a:4d:cc:66:0a:9e:90:d5:1d:98:3a:03:ef:b6  [email protected] (ED25519)
            ec2: 2048 b6:8a:43:51:72:af:49:88:a5:d6:c5:7f:3c:fd:91:70  [email protected] (RSA1)
            ec2: 2048 ef:85:3d:e6:ab:c4:18:88:81:63:08:0f:32:8a:9d:e0  [email protected] (RSA)
            ec2: -----END SSH HOST KEY FINGERPRINTS-----
            ec2: #############################################################
            """),
        [
            SSHKeyFingerprint(keylen=1024, keytype='dsa', fingerprint='7b:0d:a3:0d:9e:fc:f3:97:bb:a8:d2:1d:05:3f:d5:f9'),
            SSHKeyFingerprint(keylen=256, keytype='ecdsa', fingerprint='96:c6:3c:47:7b:11:eb:8a:ca:78:ed:20:d6:21:f2:b7'),
            SSHKeyFingerprint(keylen=256, keytype='ed25519', fingerprint='56:0f:1a:4d:cc:66:0a:9e:90:d5:1d:98:3a:03:ef:b6'),
            SSHKeyFingerprint(keylen=2048, keytype='rsa1', fingerprint='b6:8a:43:51:72:af:49:88:a5:d6:c5:7f:3c:fd:91:70'),
            SSHKeyFingerprint(keylen=2048, keytype='rsa', fingerprint='ef:85:3d:e6:ab:c4:18:88:81:63:08:0f:32:8a:9d:e0')])])
def test_parse_ssh_keygen(text, keyinfo):
    assert all(a.match(b) for a, b in zip(parse_ssh_keygen(text), keyinfo))

Example 35

Project: pre-commit
Source File: run_test.py
View license
@pytest.mark.parametrize(
    ('hook_stage', 'stage_for_first_hook', 'stage_for_second_hook',
     'expected_output'),
    (
        ('push', ['commit'], ['commit'], [b'', b'']),
        ('push', ['commit', 'push'], ['commit', 'push'],
         [b'hook 1', b'hook 2']),
        ('push', [], [], [b'hook 1', b'hook 2']),
        ('push', [], ['commit'], [b'hook 1', b'']),
        ('push', ['push'], ['commit'], [b'hook 1', b'']),
        ('push', ['commit'], ['push'], [b'', b'hook 2']),
        ('commit', ['commit', 'push'], ['commit', 'push'],
         [b'hook 1', b'hook 2']),
        ('commit', ['commit'], ['commit'], [b'hook 1', b'hook 2']),
        ('commit', [], [], [b'hook 1', b'hook 2']),
        ('commit', [], ['commit'], [b'hook 1', b'hook 2']),
        ('commit', ['push'], ['commit'], [b'', b'hook 2']),
        ('commit', ['commit'], ['push'], [b'hook 1', b'']),
    )
)
def test_local_hook_for_stages(
        repo_with_passing_hook, mock_out_store_directory,
        stage_for_first_hook,
        stage_for_second_hook,
        hook_stage,
        expected_output
):
    config = OrderedDict((
        ('repo', 'local'),
        ('hooks', (OrderedDict((
            ('id', 'flake8'),
            ('name', 'hook 1'),
            ('entry', 'python -m flake8.__main__'),
            ('language', 'system'),
            ('files', r'\.py$'),
            ('stages', stage_for_first_hook)
        )), OrderedDict((
            ('id', 'do_not_commit'),
            ('name', 'hook 2'),
            ('entry', 'DO NOT COMMIT'),
            ('language', 'pcre'),
            ('files', '^(.*)$'),
            ('stages', stage_for_second_hook)
        ))))
    ))
    add_config_to_repo(repo_with_passing_hook, config)

    with io.open('dummy.py', 'w') as staged_file:
        staged_file.write('"""TODO: something"""\n')
    cmd_output('git', 'add', 'dummy.py')

    _test_run(
        repo_with_passing_hook,
        {'hook_stage': hook_stage},
        expected_outputs=expected_output,
        expected_ret=0,
        stage=False
    )

Example 36

Project: mrq
Source File: test_interrupts.py
View license
@pytest.mark.parametrize(["p_flags"], PROCESS_CONFIGS)
def test_interrupt_worker_sigkill(worker, p_flags):
    """ Test what happens when we interrupt a running worker with 1 SIGKILL.

        SIGKILLs can't be intercepted by the process so the job should still be in 'started' state.
    """

    start_time = time.time()

    worker.start(
        flags=p_flags + " --config tests/fixtures/config-shorttimeout.py")

    cfg = json.loads(
        worker.send_task("tests.tasks.general.GetConfig", {}, block=True))

    assert cfg["tasks"]["tests.tasks.general.Add"]["timeout"] == 200

    job_id = worker.send_task(
        "tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 10}, block=False)

    time.sleep(1)

    worker.stop(block=True, sig=9, deps=False)

    time.sleep(1)

    # This is a bit tricky, but when getting the job from the current python environment, its timeout should
    # be the default 3600 and not 200 because we didn't configure ourselves
    # with config-shorttimeout.py
    job = Job(job_id).fetch().data
    assert Job(job_id).fetch().timeout == 3600

    assert job["status"] == "started"

    assert time.time() - start_time < 6

    # Then try the cleaning task that requeues started jobs

    # We need to fake the datestarted
    worker.mongodb_jobs.mrq_jobs.update({"_id": ObjectId(job_id)}, {"$set": {
        "datestarted": datetime.datetime.utcnow() - datetime.timedelta(seconds=300)
    }})

    assert Queue("default").size() == 0

    worker.start(queues="cleaning", deps=False, flush=False,
                 flags=" --config tests/fixtures/config-shorttimeout.py")

    res = worker.send_task("mrq.basetasks.cleaning.RequeueStartedJobs", {
                           "timeout": 110}, block=True, queue="cleaning")

    assert res["requeued"] == 0
    assert res["started"] == 2  # current job should count too

    assert Queue("default").size() == 0

    job = Job(job_id).fetch().data
    assert job["status"] == "started"
    assert job["queue"] == "default"

    # Now do it again with a small enough timeout
    res = worker.send_task("mrq.basetasks.cleaning.RequeueStartedJobs", {
                           "timeout": 90}, block=True, queue="cleaning")

    assert res["requeued"] == 1
    assert res["started"] == 2  # current job should count too
    assert Queue("default").size() == 1

    Queue("default").list_job_ids() == [str(job_id)]

    job = Job(job_id).fetch().data
    assert job["status"] == "queued"
    assert job["queue"] == "default"

Example 37

Project: mrq
Source File: test_jobaction.py
View license
@pytest.mark.parametrize(["p_query"], OPTS)
def test_cancel_by_path(worker, p_query):

    expected_action_jobs = p_query[1]

    # Start the worker with only one greenlet so that tasks execute
    # sequentially
    worker.start(flags="--greenlets 1", queues="default q1 q2")

    job_ids = []
    job_ids.append(worker.send_task("tests.tasks.general.Add", {
                   "a": 41, "b": 1, "sleep": 2}, queue="default", block=False))

    params = {
        "action": "cancel",
        "status": "queued"
    }
    params.update(p_query[0])

    requeue_job = worker.send_task(
        "mrq.basetasks.utils.JobAction", params, block=False)

    job_ids.append(worker.send_task(
        "tests.tasks.general.MongoInsert", {"a": 42}, queue="q1", block=False))
    job_ids.append(worker.send_task(
        "tests.tasks.general.MongoInsert", {"a": 42}, queue="q2", block=False))
    job_ids.append(worker.send_task(
        "tests.tasks.general.MongoInsert", {"a": 43}, queue="q2", block=False))
    job_ids.append(worker.send_task(
        "tests.tasks.general.MongoInsert2", {"a": 44}, queue="q1", block=False))

    Job(job_ids[-1]).wait(poll_interval=0.01)

    # Leave some time to unqueue job_id4 without executing.
    time.sleep(1)
    worker.stop(deps=False)

    jobs = [Job(job_id).fetch().data for job_id in job_ids]

    assert jobs[0]["status"] == "success"
    assert jobs[0]["result"] == 42

    assert Job(requeue_job).fetch().data["result"][
        "cancelled"] == expected_action_jobs

    # Check that the right number of jobs ran.
    assert worker.mongodb_jobs.tests_inserts.count() == len(
        job_ids) - 1 - expected_action_jobs

    action_jobs = list(worker.mongodb_jobs.mrq_jobs.find({"status": "cancel"}))
    assert len(action_jobs) == expected_action_jobs
    assert set([x.get("result") for x in action_jobs]) == set([None])

    assert Queue("default").size() == 0
    assert Queue("q1").size() == 0
    assert Queue("q2").size() == 0

    worker.mongodb_jobs.tests_inserts.remove({})

    # Then requeue the same jobs
    params = {
        "action": "requeue"
    }
    params.update(p_query[0])

    worker.start(flags="--gevent 1", start_deps=False, queues="default", flush=False)

    ret = worker.send_task("mrq.basetasks.utils.JobAction", params, block=True)

    assert ret["requeued"] == expected_action_jobs

    worker.stop(deps=False)

    assert worker.mongodb_jobs.mrq_jobs.find(
        {"status": "queued"}).count() == expected_action_jobs

    assert Queue("default").size() + Queue("q1").size() + \
        Queue("q2").size() == expected_action_jobs

    worker.stop_deps()

Example 38

Project: mrq
Source File: test_raw.py
View license
@pytest.mark.parametrize(["p_queue", "p_pushback", "p_timed", "p_flags"], [
    ["test_timed_set", False, True, "--greenlets 10"],
    ["pushback_timed_set", True, True, "--greenlets 10"],
    ["test_sorted_set", False, False, "--greenlets 1"]
])
def test_raw_sorted(worker, p_queue, p_pushback, p_timed, p_flags):

    worker.start(flags="%s --config tests/fixtures/config-raw1.py" %
                 p_flags, queues=p_queue)

    test_collection = worker.mongodb_logs.tests_inserts
    jobs_collection = worker.mongodb_jobs.mrq_jobs

    current_time = int(time.time())

    assert jobs_collection.count() == 0

    assert Queue(p_queue).size() == 0

    # Schedule one in the past, one in the future
    worker.send_raw_tasks(p_queue, {
        "aaa": current_time - 10,
        "bbb": current_time + 2,
        "ccc": current_time + 5
    }, block=False)

    # Re-schedule
    worker.send_raw_tasks(p_queue, {
        "ccc": current_time + 2
    }, block=False)

    time.sleep(1)

    if not p_timed:

        assert Queue(p_queue).size() == 0
        assert test_collection.count() == 3
        assert list(test_collection.find(projection={"params": 1, "_id": 0}).limit(1)) == [
            {"params": {"sorted_set": "aaa"}}
        ]
        return

    if p_pushback:
        assert Queue(p_queue).size() == 3
        assert set(Queue(p_queue).list_raw_jobs()) == set(["bbb", "ccc", "aaa"])
    else:
        assert Queue(p_queue).size() == 2
        assert set(Queue(p_queue).list_raw_jobs()) == set(["bbb", "ccc"])

    # The second one should not yet even exist in mrq_jobs
    assert jobs_collection.count() == 1
    assert list(jobs_collection.find())[0]["status"] == "success"

    assert list(test_collection.find(projection={"params": 1, "_id": 0})) == [
        {"params": {"timed_set": "aaa"}}
    ]

    # Then wait for the second job to be done
    time.sleep(2)

    if p_pushback:
        assert Queue(p_queue).size() == 3
    else:
        assert Queue(p_queue).size() == 0

    assert jobs_collection.count() == 3
    assert list(jobs_collection.find())[1]["status"] == "success"
    assert list(jobs_collection.find())[2]["status"] == "success"

    assert list(jobs_collection.find())[2]["worker"]

    assert test_collection.count() == 3

Example 39

Project: atomic-reactor
Source File: test_koji_promote.py
View license
    @pytest.mark.parametrize(('apis',
                              'docker_registry',
                              'pulp_registries',
                              'metadata_only',
                              'blocksize',
                              'target'), [
        ('v1-only',
         False,
         1,
         False,
         None,
         'images-docker-candidate'),

        ('v1+v2',
         True,
         2,
         False,
         10485760,
         None),

        ('v2-only',
         True,
         1,
         True,
         None,
         None),

        ('v1+v2',
         True,
         0,
         False,
         10485760,
         None),

    ])
    @pytest.mark.parametrize('has_config', [
        True,
        False,
    ])
    def test_koji_promote_success(self, tmpdir, apis, docker_registry,
                                  pulp_registries,
                                  metadata_only, blocksize, target, os_env, has_config):
        session = MockedClientSession('')
        component = 'component'
        name = 'ns/name'
        version = '1.0'
        release = '1'

        if has_config and not docker_registry:
            # Not a valid combination
            has_config = False

        tasker, workflow = mock_environment(tmpdir,
                                            session=session,
                                            name=name,
                                            component=component,
                                            version=version,
                                            release=release,
                                            docker_registry=docker_registry,
                                            pulp_registries=pulp_registries,
                                            blocksize=blocksize,
                                            has_config=has_config)
        runner = create_runner(tasker, workflow, metadata_only=metadata_only,
                               blocksize=blocksize, target=target)
        runner.run()

        data = session.metadata
        if metadata_only:
            mdonly = set()
        else:
            mdonly = set(['metadata_only'])

        assert set(data.keys()) == set([
            'metadata_version',
            'build',
            'buildroots',
            'output',
        ])

        assert data['metadata_version'] in ['0', 0]

        build = data['build']
        assert isinstance(build, dict)

        buildroots = data['buildroots']
        assert isinstance(buildroots, list)
        assert len(buildroots) > 0

        output_files = data['output']
        assert isinstance(output_files, list)

        assert set(build.keys()) == set([
            'name',
            'version',
            'release',
            'source',
            'start_time',
            'end_time',
            'extra',          # optional but always supplied
            'metadata_only',  # only when True
        ]) - mdonly

        assert build['name'] == component
        assert build['version'] == version
        assert build['release'] == release
        assert build['source'] == 'git://hostname/path#123456'
        start_time = build['start_time']
        assert isinstance(start_time, int) and start_time
        end_time = build['end_time']
        assert isinstance(end_time, int) and end_time
        if metadata_only:
            assert isinstance(build['metadata_only'], bool)
            assert build['metadata_only']

        extra = build['extra']
        assert isinstance(extra, dict)
        assert 'image' in extra
        image = extra['image']
        assert isinstance(image, dict)

        for buildroot in buildroots:
            self.validate_buildroot(buildroot)

            # Unique within buildroots in this metadata
            assert len([b for b in buildroots
                        if b['id'] == buildroot['id']]) == 1

        for output in output_files:
            self.validate_output(output, metadata_only, has_config,
                                 expect_digest=docker_registry)
            buildroot_id = output['buildroot_id']

            # References one of the buildroots
            assert len([buildroot for buildroot in buildroots
                        if buildroot['id'] == buildroot_id]) == 1

            if metadata_only:
                assert isinstance(output['metadata_only'], bool)
                assert output['metadata_only']

        files = session.uploaded_files

        # There should be a file in the list for each output
        # except for metadata-only imports, in which case there
        # will be no upload for the image itself
        assert isinstance(files, list)
        expected_uploads = len(output_files)
        if metadata_only:
            expected_uploads -= 1

        assert len(files) == expected_uploads

        # The correct blocksize argument should have been used
        if blocksize is not None:
            assert blocksize == session.blocksize

        build_id = runner.plugins_results[KojiPromotePlugin.key]
        assert build_id == "123"

        if target is not None:
            assert session.build_tags[build_id] == session.DEST_TAG
            assert session.tag_task_state == 'CLOSED'

Example 40

Project: atomic-reactor
Source File: test_tag_and_push.py
View license
@pytest.mark.parametrize("use_secret", [
    True,
    False,
])
@pytest.mark.parametrize(("image_name", "logs", "should_raise", "has_config"), [
    (TEST_IMAGE, PUSH_LOGS_1_X, False, False),
    (TEST_IMAGE, PUSH_LOGS_1_9, False, False),
    (TEST_IMAGE, PUSH_LOGS_1_10, False, True),
    (TEST_IMAGE, PUSH_LOGS_1_10_NOT_IN_STATUS, False, False),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_X, True, False),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_9, True, False),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_10, True, True),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_10_NOT_IN_STATUS, True, True),
    (TEST_IMAGE, PUSH_ERROR_LOGS, True, False),
])
def test_tag_and_push_plugin(
        tmpdir, monkeypatch, image_name, logs, should_raise, has_config, use_secret):

    if MOCK:
        mock_docker()
        flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs),
                 login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'})

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    workflow.tag_conf.add_primary_image(image_name)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "user", "email": "[email protected]", "password": "mypassword"}}
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e'
    media_type = 'application/vnd.docker.distribution.manifest.v2+json'

    response_config_json = {
        'config': {
            'digest': CONFIG_DIGEST,
            'mediaType': 'application/octet-stream',
            'size': 4132
        },
        'layers': [
            {
                'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 71907148
            },
            {
                'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 3945724
            }
        ],
        'mediaType': media_type,
        'schemaVersion': 2
    }

    response_json = {
        'config': {
            'Size': 12509448,
            'architecture': 'amd64',
            'author': 'Red Hat, Inc.',
            'config': {
                'Cmd': ['/bin/rsyslog.sh'],
                'Entrypoint': None,
                'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88',
                'Labels': {
                    'Architecture': 'x86_64',
                    'Authoritative_Registry': 'registry.access.redhat.com',
                    'BZComponent': 'rsyslog-docker',
                    'Name': 'rhel7/rsyslog',
                    'Release': '28.vrutkovs.31',
                    'Vendor': 'Red Hat, Inc.',
                    'Version': '7.2',
                },
            },
            'created': '2016-10-07T10:20:05.38595Z',
            'docker_version': '1.9.1',
            'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
            'os': 'linux',
            'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d'
        },
        'container_config': {
            'foo': 'bar',
            'spam': 'maps'
        },
        'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
        'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88'
    }

    if not has_config:
        response_json = None

    config_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE,)
    config_url = "https://{}/v2/{}/manifests/{}".format(LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)
    blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    config_response_config_v1 = requests.Response()
    (flexmock(config_response_config_v1,
              raise_for_status=lambda: None,
              json=response_config_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json',
                'Docker-Content-Digest': DIGEST_V1
              }
    ))

    config_response_config_v2 = requests.Response()
    (flexmock(config_response_config_v2,
              raise_for_status=lambda: None,
              json=response_config_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json',
                'Docker-Content-Digest': DIGEST_V2
              }
    ))

    blob_config = requests.Response()
    (flexmock(blob_config, raise_for_status=lambda: None, json=response_json))

    def custom_get(url, headers, **kwargs):
        if url == config_latest_url:
            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v1+json':
                return config_response_config_v1

            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                return config_response_config_v2

        if url == config_url:
            return config_response_config_v2

        if url == blob_url:
            return blob_config

    (flexmock(requests)
        .should_receive('get')
        .replace_with(custom_get)
    )

    runner = PostBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': TagAndPushPlugin.key,
            'args': {
                'registries': {
                    LOCALHOST_REGISTRY: {
                        'insecure': True,
                        'secret': secret_path
                    }
                }
            },
        }]
    )

    if should_raise:
        with pytest.raises(Exception):
            runner.run()
    else:
        output = runner.run()
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2)
            assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == expected_digest.v1
            assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == expected_digest.v2

            if has_config:
                assert isinstance(workflow.push_conf.docker_registries[0].config, dict)
            else:
                assert workflow.push_conf.docker_registries[0].config is None

Example 41

Project: atomic-reactor
Source File: test_inner.py
View license
@pytest.mark.parametrize(('plugins', 'should_fail', 'should_log'), [
    # No 'name' key, prebuild
    ({
        'prebuild_plugins': [{'args': {}},
                             {'name': 'pre_watched',
                              'args': {
                                  'watcher': Watcher(),
                              }
                             }],
      },
     True,  # is fatal
     True,  # logs error
    ),

    # No 'name' key, postbuild
    ({
        'postbuild_plugins': [{'args': {}},
                              {'name': 'post_watched',
                               'args': {
                                   'watcher': Watcher(),
                               }
                              }],
      },
     True,  # is fatal
     True,  # logs error
    ),

    # No 'name' key, prepub
    ({
        'prepublish_plugins': [{'args': {}},
                               {'name': 'prepub_watched',
                                'args': {
                                    'watcher': Watcher(),
                                },
                               }]},
     True,  # is fatal
     True,  # logs error
    ),
     

    # No 'name' key, exit
    ({
        'exit_plugins': [{'args': {}},
                         {'name': 'exit_watched',
                          'args': {
                              'watcher': Watcher(),
                          }
                         }]
      },
     False,  # not fatal
     True,   # logs error
    ),

    # No 'args' key, prebuild
    ({'prebuild_plugins': [{'name': 'pre'},
                           {'name': 'pre_watched',
                            'args': {
                                'watcher': Watcher(),
                            }
                           }]},
     False,  # not fatal
     False,  # no error logged
    ),

    # No 'args' key, postbuild
    ({'postbuild_plugins': [{'name': 'post'},
                            {'name': 'post_watched',
                             'args': {
                                 'watcher': Watcher(),
                             }
                            }]},
     False,  # not fatal,
     False,  # no error logged
    ),

    # No 'args' key, prepub
    ({'prepublish_plugins': [{'name': 'prepub'},
                             {'name': 'prepub_watched',
                              'args': {
                                  'watcher': Watcher(),
                              }
                            }]},
     False,  # not fatal,
     False,  # no error logged
    ),

    # No 'args' key, exit
    ({'exit_plugins': [{'name': 'exit'},
                       {'name': 'exit_watched',
                        'args': {
                            'watcher': Watcher(),
                        }
                       }]},
     False,  # not fatal
     False,  # no error logged
    ),

    # No such plugin, prebuild
    ({'prebuild_plugins': [{'name': 'no plugin',
                            'args': {}},
                           {'name': 'pre_watched',
                            'args': {
                                'watcher': Watcher(),
                            }
                           }]},
     True,  # is fatal
     True,  # logs error
    ),

    # No such plugin, postbuild
    ({'postbuild_plugins': [{'name': 'no plugin',
                             'args': {}},
                            {'name': 'post_watched',
                             'args': {
                                 'watcher': Watcher(),
                             }
                            }]},
     True,  # is fatal
     True,  # logs error
    ),

    # No such plugin, prepub
    ({'prepublish_plugins': [{'name': 'no plugin',
                              'args': {}},
                             {'name': 'prepub_watched',
                              'args': {
                                  'watcher': Watcher(),
                              }
                             }]},
     True,  # is fatal
     True,  # logs error
    ),

    # No such plugin, exit
    ({'exit_plugins': [{'name': 'no plugin',
                        'args': {}},
                       {'name': 'exit_watched',
                        'args': {
                            'watcher': Watcher(),
                        }
                       }]},
     False,  # not fatal
     True,   # logs error
    ),

    # No such plugin, prebuild, not required
    ({'prebuild_plugins': [{'name': 'no plugin',
                            'args': {},
                            'required': False},
                           {'name': 'pre_watched',
                            'args': {
                                'watcher': Watcher(),
                            }
                           }]},
     False,  # not fatal
     False,  # does not log error
    ),

    # No such plugin, postbuild, not required
    ({'postbuild_plugins': [{'name': 'no plugin',
                             'args': {},
                             'required': False},
                            {'name': 'post_watched',
                             'args': {
                                 'watcher': Watcher(),
                             }
                            }]},
     False,  # not fatal
     False,  # does not log error
    ),

    # No such plugin, prepub, not required
    ({'prepublish_plugins': [{'name': 'no plugin',
                              'args': {},
                              'required': False},
                             {'name': 'prepub_watched',
                              'args': {
                                  'watcher': Watcher(),
                              }
                             }]},
     False,  # not fatal
     False,  # does not log error
    ),

    # No such plugin, exit, not required
    ({'exit_plugins': [{'name': 'no plugin',
                        'args': {},
                        'required': False},
                       {'name': 'exit_watched',
                        'args': {
                            'watcher': Watcher(),
                        }
                       }]},
     False,  # not fatal
     False,  # does not log error
    ),
])
def test_plugin_errors(request, plugins, should_fail, should_log):
    """
    Try bad plugin configuration.
    """

    this_file = inspect.getfile(PreRaises)
    mock_docker()
    fake_builder = MockInsideBuilder()
    flexmock(InsideBuilder).new_instances(fake_builder)
    fake_logger = FakeLogger()

    existing_logger = atomic_reactor.plugin.logger

    def restore_logger():
        atomic_reactor.plugin.logger = existing_logger

    request.addfinalizer(restore_logger)
    atomic_reactor.plugin.logger = fake_logger

    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
                                   plugin_files=[this_file],
                                   **plugins)

    # Find the 'watcher' parameter
    watchers = [conf.get('args', {}).get('watcher')
                for plugin in plugins.values()
                for conf in plugin]
    watcher = [x for x in watchers if x][0]

    if should_fail:
        with pytest.raises(PluginFailedException):
            workflow.build_docker_image()

        assert not watcher.was_called()
    else:
        workflow.build_docker_image()
        assert watcher.was_called()

    if should_log:
        assert len(fake_logger.errors) > 0
    else:
        assert len(fake_logger.errors) == 0

Example 42

Project: osbs-client
Source File: test_api.py
View license
    @pytest.mark.parametrize(('kind', 'expect_name'), [
        ('ImageStreamTag', 'registry:5000/buildroot:latest'),
        ('DockerImage', 'buildroot:latest'),
    ])
    def test_scratch_build_config(self, kind, expect_name):
        config = Configuration()
        osbs = OSBS(config, config)

        build_json = {
            'apiVersion': osbs.os_conf.get_openshift_api_version(),

            'metadata': {
                'name': 'build',
                'labels': {
                    'git-repo-name': 'reponame',
                    'git-branch': 'branch',
                },
            },

            'spec': {
                'strategy': {
                    'customStrategy': {
                        'from': {
                            'kind': kind,
                            'name': 'buildroot:latest',
                        },
                    },
                },
            },
        }

        build_request = flexmock(
            render=lambda: build_json,
            is_auto_instantiated=lambda: False,
            scratch=True)

        updated_build_json = copy.deepcopy(build_json)
        updated_build_json['kind'] = 'Build'
        updated_build_json['metadata']['labels']['scratch'] = 'true'
        updated_build_json['spec']['serviceAccount'] = 'builder'
        img = updated_build_json['spec']['strategy']['customStrategy']['from']
        img['kind'] = 'DockerImage'
        img['name'] = expect_name
        build_name = 'scratch-%s' % datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        updated_build_json['metadata']['name'] = build_name

        if kind == 'ImageStreamTag':
            (flexmock(osbs.os)
                .should_receive('get_image_stream_tag')
                .with_args('buildroot:latest')
                .once()
                .and_return(flexmock(json=lambda: {
                    "apiVersion": "v1",
                    "kind": "ImageStreamTag",
                    "image": {
                        "dockerImageReference": expect_name,
                    },
                })))
        else:
            (flexmock(osbs.os)
                .should_receive('get_image_stream_tag')
                .never())

        (flexmock(osbs.os)
            .should_receive('create_build')
            .with_args(updated_build_json)
            .once()
            .and_return(flexmock(json=lambda: {'spam': 'maps'})))

        (flexmock(osbs.os)
            .should_receive('create_build_config')
            .never())

        (flexmock(osbs.os)
            .should_receive('update_build_config')
            .never())

        build_response = osbs._create_scratch_build(build_request)
        assert build_response.json == {'spam': 'maps'}

Example 43

Project: osbs-client
Source File: test_conf.py
View license
    @pytest.mark.parametrize(('config', 'kwargs', 'cli_args',
                              'login', 'expected'), [
        ({'default': {'token': 'conf'}},
         {},
         {},
         None,
         'conf'),

        ({'default': {'token_file': 'conf_file'}},
         {},
         {},
         None,
         'conf_file'),

        ({'default': {'token': 'conf',
                      'token_file': 'conf_file'}},
         {},
         {},
         None,
         'conf'),

        ({'default': {}},
         {'token': 'kw'},
         {},
         None,
         'kw'),

        ({'default': {}},
         {'token_file': 'kw_file'},
         {},
         None,
         'kw_file'),

        ({'default': {}},
         {'token': 'kw',
          'token_file': 'kw_file'},
         {},
         None,
         'kw'),

        ({'default': {'token': 'conf'}},
         {'token': 'kw'},
         {},
         None,
         'kw'),

        ({'default': {'token_file': 'conf_file'}},
         {'token': 'kw'},
         {},
         None,
         'kw'),

        ({'default': {'token': 'conf'}},
         {'token_file': 'kw_file'},
         {},
         None,
         'kw_file'),

        ({'default': {'token_file': 'conf_file'}},
         {'token_file': 'kw_file'},
         {},
         None,
         'kw_file'),

        ({'default': {}},
         {},
         {'token': 'cli'},
         None,
         'cli'),

        ({'default': {}},
         {},
         {'token_file': 'cli_file'},
         None,
         'cli_file'),

        ({'default': {}},
         {},
         {'token': 'cli',
          'token_file': 'cli_file'},
         None,
         'cli'),

        ({'default': {'token': 'conf'}},
         {},
         {'token': 'cli'},
         None,
         'cli'),

        ({'default': {'token_file': 'conf_file'}},
         {},
         {'token': 'cli'},
         None,
         'cli'),

        ({'default': {'token': 'conf'}},
         {},
         {'token_file': 'cli_file'},
         None,
         'cli_file'),

        ({'default': {'token_file': 'conf_file'}},
         {},
         {'token_file': 'cli_file'},
         None,
         'cli_file'),

        ({'default': {'token_file': 'conf_file'}},
         {},
         {},
         'login_file',
         'conf_file'),

        ({'default': {}},
         {},
         {'token_file': 'cli_file'},
         'login_file',
         'cli_file'),

        ({'default': {}},
         {},
         {},
         'login_file',
         'login_file'),
    ])
    def test_oauth2_token(self, config, kwargs, cli_args, login, expected):
        if 'token_file' in kwargs:
            tmpf = self.tmpfile_with_content(kwargs['token_file'])
            kwargs['token_file'] = tmpf.name

        if login:
            login_tmpf = self.tmpfile_with_content(login)

        if 'login_file' == expected:
            (flexmock(utils)
                .should_receive('get_instance_token_file_name')
                .with_args('default')
                .and_return(login_tmpf.name))


        with self.build_cli_args(cli_args) as args:
            with self.config_file(config) as config_file:
                conf = Configuration(conf_file=config_file, cli_args=args,
                                     **kwargs)

                assert conf.get_oauth2_token() == expected

Example 44

Project: hamster-lib
Source File: test_time.py
View license
    @pytest.mark.parametrize(('timeframe', 'expectation'), [
        (
            time_helpers.TimeFrame(
                start_date=None,
                start_time=None,
                end_date=None,
                end_time=None,
                offset=datetime.timedelta(minutes=90)
            ),
            (
                datetime.datetime(2015, 12, 10, 11, 0, 0),
                None
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=None,
                start_time=None,
                end_date=None,
                end_time=None,
                offset=None
            ),
            (
                None,
                None
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=datetime.date(2015, 12, 1),
                start_time=None,
                end_date=datetime.date(2015, 12, 4),
                end_time=None,
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 1, 5, 30, 0),
                datetime.datetime(2015, 12, 5, 5, 29, 59)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=None,
                start_time=datetime.time(18, 55),
                end_date=None,
                end_time=datetime.time(23, 2),
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 10, 18, 55, 0),
                datetime.datetime(2015, 12, 10, 23, 2, 0)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=datetime.date(2015, 12, 1),
                start_time=None,
                end_date=None,
                end_time=datetime.time(17, 0, 0),
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 1, 5, 30, 0),
                datetime.datetime(2015, 12, 10, 17, 0, 0)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=datetime.date(2015, 12, 1),
                start_time=None,
                end_date=None,
                end_time=datetime.time(2, 0, 0),
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 1, 5, 30, 0),
                datetime.datetime(2015, 12, 10, 2, 0, 0)
            ),
        ),
    ])
    @freeze_time('2015-12-10 12:30')
    def test_various_valid_timeframes_partial(self, base_config, timeframe, expectation):
        """Test that completing timeframe only where some info is present works.""",
        assert time_helpers.complete_timeframe(timeframe, base_config,
            partial=True) == expectation

Example 45

Project: hamster-lib
Source File: test_time.py
View license
    @pytest.mark.parametrize(('timeframe', 'expectation'), [
        (
            time_helpers.TimeFrame(
                start_date=None,
                start_time=None,
                end_date=None,
                end_time=None,
                offset=datetime.timedelta(minutes=90)
            ),
            (
                datetime.datetime(2015, 12, 10, 11, 0, 0),
                datetime.datetime(2015, 12, 11, 5, 29, 59)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=None,
                start_time=None,
                end_date=None,
                end_time=None,
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 10, 5, 30, 0),
                datetime.datetime(2015, 12, 11, 5, 29, 59)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=datetime.date(2015, 12, 1),
                start_time=None,
                end_date=datetime.date(2015, 12, 4),
                end_time=None,
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 1, 5, 30, 0),
                datetime.datetime(2015, 12, 5, 5, 29, 59)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=None,
                start_time=datetime.time(18, 55),
                end_date=None,
                end_time=datetime.time(23, 2),
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 10, 18, 55, 0),
                datetime.datetime(2015, 12, 10, 23, 2, 0)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=datetime.date(2015, 12, 1),
                start_time=None,
                end_date=None,
                end_time=datetime.time(17, 0, 0),
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 1, 5, 30, 0),
                datetime.datetime(2015, 12, 10, 17, 0, 0)
            ),
        ),
        (
            time_helpers.TimeFrame(
                start_date=datetime.date(2015, 12, 1),
                start_time=None,
                end_date=None,
                end_time=datetime.time(2, 0, 0),
                offset=None
            ),
            (
                datetime.datetime(2015, 12, 1, 5, 30, 0),
                datetime.datetime(2015, 12, 10, 2, 0, 0)
            ),
        ),
    ])
    @freeze_time('2015-12-10 12:30')
    def test_various_valid_timeframes(self, base_config, timeframe, expectation):
        """Test that completing an partial timeframe results in expected results.""",
        assert time_helpers.complete_timeframe(timeframe, base_config) == expectation

Example 46

Project: cryptography
Source File: test_ec.py
View license
    @pytest.mark.parametrize(
        "vector",
        load_vectors_from_file(
            os.path.join(
                "asymmetric", "ECDH",
                "KASValidityTest_ECCStaticUnified_NOKC_ZZOnly_init.fax"),
            load_kasvs_ecdh_vectors
        )
    )
    def test_key_exchange_with_vectors(self, backend, vector):
        _skip_exchange_algorithm_unsupported(
            backend, ec.ECDH(), ec._CURVE_TYPES[vector['curve']]
        )

        key_numbers = vector['IUT']
        private_numbers = ec.EllipticCurvePrivateNumbers(
            key_numbers['d'],
            ec.EllipticCurvePublicNumbers(
                key_numbers['x'],
                key_numbers['y'],
                ec._CURVE_TYPES[vector['curve']]()
            )
        )
        # Errno 5 and 6 indicates a bad public key, this doesn't test the ECDH
        # code at all
        if vector['fail'] and vector['errno'] in [5, 6]:
            with pytest.raises(ValueError):
                private_numbers.private_key(backend)
            return
        else:
            private_key = private_numbers.private_key(backend)

        peer_numbers = vector['CAVS']
        public_numbers = ec.EllipticCurvePublicNumbers(
            peer_numbers['x'],
            peer_numbers['y'],
            ec._CURVE_TYPES[vector['curve']]()
        )
        # Errno 1 and 2 indicates a bad public key, this doesn't test the ECDH
        # code at all
        if vector['fail'] and vector['errno'] in [1, 2]:
            with pytest.raises(ValueError):
                public_numbers.public_key(backend)
            return
        else:
            peer_pubkey = public_numbers.public_key(backend)

        z = private_key.exchange(ec.ECDH(), peer_pubkey)
        z = int(hexlify(z).decode('ascii'), 16)
        # At this point fail indicates that one of the underlying keys was
        # changed. This results in a non-matching derived key.
        if vector['fail']:
            assert z != vector['Z']
        else:
            assert z == vector['Z']

Example 47

Project: flake8
Source File: test_checker.py
View license
@pytest.mark.parametrize('results, expected_order', [
    # No entries should be added
    ([], []),
    # Results are correctly ordered
    ([('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE)], [0, 1]),
    # Reversed order of lines
    ([('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE)], [1, 0]),
    # Columns are not ordered correctly (when reports are ordered correctly)
    ([('A101', 1, 2, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE)], [1, 0, 2]),
    ([('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 1, 2, 'placeholder error', PLACEHOLDER_CODE)], [1, 2, 0]),
    ([('A101', 1, 2, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 2, 2, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 2, 1, 'placeholder error', PLACEHOLDER_CODE)], [0, 2, 1]),
    ([('A101', 1, 3, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 2, 2, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 3, 1, 'placeholder error', PLACEHOLDER_CODE)], [0, 1, 2]),
    ([('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 1, 3, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 2, 2, 'placeholder error', PLACEHOLDER_CODE)], [0, 1, 2]),
    # Previously sort column and message (so reversed) (see bug 196)
    ([('A101', 1, 1, 'placeholder error', PLACEHOLDER_CODE),
      ('A101', 2, 1, 'charlie error', PLACEHOLDER_CODE)], [0, 1]),
])
def test_report_order(results, expected_order):
    """
    Test in which order the results will be reported.

    It gets a list of reports from the file checkers and verifies that the
    result will be ordered independent from the original report.
    """
    def count_side_effect(name, sorted_results):
        """Side effect for the result handler to tell all are reported."""
        return len(sorted_results)

    # To simplify the parameters (and prevent copy & pasting) reuse report
    # tuples to create the expected result lists from the indexes
    expected_results = [results[index] for index in expected_order]

    file_checker = mock.Mock(spec=['results', 'display_name'])
    file_checker.results = results
    file_checker.display_name = 'placeholder'

    style_guide = mock.Mock(spec=['options'])

    # Create a placeholder manager without arguments or plugins
    # Just add one custom file checker which just provides the results
    manager = checker.Manager(style_guide, [], [])
    manager.checkers = [file_checker]

    # _handle_results is the first place which gets the sorted result
    # Should something non-private be mocked instead?
    handler = mock.Mock()
    handler.side_effect = count_side_effect
    manager._handle_results = handler

    assert manager.report() == (len(results), len(results))
    handler.assert_called_once_with('placeholder', expected_results)

Example 48

Project: pytest
Source File: test_assertion.py
View license
    @pytest.mark.parametrize('mode', ['plain', 'rewrite'])
    @pytest.mark.parametrize('plugin_state', ['development', 'installed'])
    def test_installed_plugin_rewrite(self, testdir, mode, plugin_state):
        # Make sure the hook is installed early enough so that plugins
        # installed via setuptools are re-written.
        testdir.tmpdir.join('hampkg').ensure(dir=1)
        contents = {
            'hampkg/__init__.py': """
                import pytest

                @pytest.fixture
                def check_first2():
                    def check(values, value):
                        assert values.pop(0) == value
                    return check
            """,
            'spamplugin.py': """
            import pytest
            from hampkg import check_first2

            @pytest.fixture
            def check_first():
                def check(values, value):
                    assert values.pop(0) == value
                return check
            """,
            'mainwrapper.py': """
            import pytest, pkg_resources

            plugin_state = "{plugin_state}"

            class DummyDistInfo:
                project_name = 'spam'
                version = '1.0'

                def _get_metadata(self, name):
                    # 'RECORD' meta-data only available in installed plugins
                    if name == 'RECORD' and plugin_state == "installed":
                        return ['spamplugin.py,sha256=abc,123',
                                'hampkg/__init__.py,sha256=abc,123']
                    # 'SOURCES.txt' meta-data only available for plugins in development mode
                    elif name == 'SOURCES.txt' and plugin_state == "development":
                        return ['spamplugin.py',
                                'hampkg/__init__.py']
                    return []

            class DummyEntryPoint:
                name = 'spam'
                module_name = 'spam.py'
                attrs = ()
                extras = None
                dist = DummyDistInfo()

                def load(self, require=True, *args, **kwargs):
                    import spamplugin
                    return spamplugin

            def iter_entry_points(name):
                yield DummyEntryPoint()

            pkg_resources.iter_entry_points = iter_entry_points
            pytest.main()
            """.format(plugin_state=plugin_state),
            'test_foo.py': """
            def test(check_first):
                check_first([10, 30], 30)

            def test2(check_first2):
                check_first([10, 30], 30)
            """,
        }
        testdir.makepyfile(**contents)
        result = testdir.run(sys.executable, 'mainwrapper.py', '-s', '--assert=%s' % mode)
        if mode == 'plain':
            expected = 'E       AssertionError'
        elif mode == 'rewrite':
            expected = '*assert 10 == 30*'
        else:
            assert 0
        result.stdout.fnmatch_lines([expected])

Example 49

Project: pytest
Source File: test_terminal.py
View license
@pytest.mark.parametrize("exp_color, exp_line, stats_arg", [
    # The method under test only cares about the length of each
    # dict value, not the actual contents, so tuples of anything
    # suffice

    # Important statuses -- the highest priority of these always wins
    ("red", "1 failed", {"failed": (1,)}),
    ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),

    ("red", "1 error", {"error": (1,)}),
    ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),

    # (a status that's not known to the code)
    ("yellow", "1 weird", {"weird": (1,)}),
    ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),

    ("yellow", "1 pytest-warnings", {"warnings": (1,)}),
    ("yellow", "1 passed, 1 pytest-warnings", {"warnings": (1,),
                                               "passed": (1,)}),

    ("green", "5 passed", {"passed": (1,2,3,4,5)}),


    # "Boring" statuses.  These have no effect on the color of the summary
    # line.  Thus, if *every* test has a boring status, the summary line stays
    # at its default color, i.e. yellow, to warn the user that the test run
    # produced no useful information
    ("yellow", "1 skipped", {"skipped": (1,)}),
    ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}),

    ("yellow", "1 deselected", {"deselected": (1,)}),
    ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}),

    ("yellow", "1 xfailed", {"xfailed": (1,)}),
    ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}),

    ("yellow", "1 xpassed", {"xpassed": (1,)}),
    ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}),

    # Likewise if no tests were found at all
    ("yellow", "no tests ran", {}),

    # Test the empty-key special case
    ("yellow", "no tests ran", {"": (1,)}),
    ("green", "1 passed", {"": (1,), "passed": (1,)}),


    # A couple more complex combinations
    ("red", "1 failed, 2 passed, 3 xfailed",
        {"passed": (1,2), "failed": (1,), "xfailed": (1,2,3)}),

    ("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed",
        {"passed": (1,),
        "skipped": (1,2),
        "deselected": (1,2,3),
        "xfailed": (1,2)}),
])
def test_summary_stats(exp_line, exp_color, stats_arg):
    print("Based on stats: %s" % stats_arg)
    print("Expect summary: \"%s\"; with color \"%s\"" % (exp_line, exp_color))
    (line, color) = build_summary_stats_line(stats_arg)
    print("Actually got:   \"%s\"; with color \"%s\"" % (line, color))
    assert line == exp_line
    assert color == exp_color

Example 50

Project: pytest-bdd
Source File: test_multiline.py
View license
@pytest.mark.parametrize(["feature_text", "expected_text"], [
    (
        textwrap.dedent("""
        Scenario: Multiline step using sub indentation
            Given I have a step with:
                Some

                Extra
                Lines
            Then the text should be parsed with correct indentation
        """),
        textwrap.dedent("""
        Some

        Extra
        Lines
        """)[1: -1]
    ),
    (
        textwrap.dedent("""
        Scenario: Multiline step using sub indentation
            Given I have a step with:
                Some

              Extra
             Lines

            Then the text should be parsed with correct indentation
        """),
        textwrap.dedent("""
           Some

         Extra
        Lines
        """)[1:-1]
    ),
    (
        textwrap.dedent("""
        Feature:
        Scenario: Multiline step using sub indentation
            Given I have a step with:
                Some
                Extra
                Lines

        """),
        textwrap.dedent("""
        Some
        Extra
        Lines
        """)[1:-1]
    ),
])
def test_multiline(request, tmpdir, feature_text, expected_text):
    file_name = tmpdir.join('test.feature')
    with file_name.open('w') as fd:
        fd.write(feature_text)

    @scenario(file_name.strpath, 'Multiline step using sub indentation')
    def test_multiline(request):
        assert get_fixture_value(request, 'i_have_text') == expected_text
    test_multiline(request)