os.path.join

Here are the examples of the python api os.path.join taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

169 Examples 7

Example 151

Project: chumpy Source File: api_compatibility.py
def main():
    
    #sample_array
    
    ###############################
    hd2('Array Creation Routines')
    
    hd3('Ones and zeros')

    r('empty', {'shape': (2,4,2)}, {'dtype': np.uint8, 'order': 'C'})
    r('empty_like', {'prototype': np.empty((2,4,2))}, {'dtype': np.float64, 'order': 'C'})
    r('eye', {'N': 10}, {'M': 5, 'k': 0, 'dtype': np.float64})
    r('identity', {'n': 10}, {'dtype': np.float64})
    r('ones', {'shape': (2,4,2)}, {'dtype': np.uint8, 'order': 'C'})
    r('ones_like', {'a': np.empty((2,4,2))}, {'dtype': np.float64, 'order': 'C'})
    r('zeros', {'shape': (2,4,2)}, {'dtype': np.uint8, 'order': 'C'})
    r('zeros_like', {'a': np.empty((2,4,2))}, {'dtype': np.float64, 'order': 'C'})
    
    hd3('From existing data')
    r('array', {'object': [1,2,3]}, {'dtype': np.float64, 'order': 'C', 'subok': False, 'ndmin': 2})
    r('asarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('asanyarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('ascontiguousarray', {'a': np.array([1,2,3])}, {'dtype': np.float64})
    r('asmatrix', {'data': np.array([1,2,3])}, {'dtype': np.float64})
    r('copy', (np.array([1,2,3]),), {})
    r('frombuffer', {'buffer': np.array([1,2,3])}, {})
    m('fromfile')
    r('fromfunction', {'function': lambda i, j: i + j, 'shape': (3, 3)}, {'dtype': np.float64})
    # function, shape, **kwargs
    # lambda i, j: i + j, (3, 3), dtype=int
    r('fromiter', {'iter': [1,2,3,4], 'dtype': np.float64}, {'count': 2})
    r('fromstring', {'string': '\x01\x02', 'dtype': np.uint8}, {})
    r('loadtxt', {'fname': StringIO("0 1\n2 3")}, {})

    hd3('Creating record arrays (wont be implemented)')
    hd3('Creating character arrays (wont be implemented)')

    hd3('Numerical ranges')
    r('arange', {'start': 0, 'stop': 10}, {'step': 2, 'dtype': np.float64})
    r('linspace', {'start': 0, 'stop': 10}, {'num': 2, 'endpoint': 10, 'retstep': 1})
    r('logspace', {'start': 0, 'stop': 10}, {'num': 2, 'endpoint': 10, 'base': 1})
    r('meshgrid', ([1,2,3], [4,5,6]), {})
    m('mgrid')
    m('ogrid')
    
    hd3('Building matrices')
    r('diag', {'v': np.arange(9).reshape((3,3))}, {'k': 0})
    r('diagflat', {'v': [[1,2], [3,4]]}, {})
    r('tri', {'N': 3}, {'M': 5, 'k': 2, 'dtype': np.float64})
    r('tril', {'m': [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]}, {'k': -1})
    r('triu', {'m': [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]}, {'k': -1})
    r('vander', {'x': np.array([1, 2, 3, 5])}, {'N': 3})
    
    ###############################
    hd2('Array manipulation routines')
    
    hd3('Basic operations')
    r('copyto', {'dst': np.eye(3), 'src': np.eye(3)}, {})
    
    hd3('Changing array shape')
    r('reshape', {'a': np.eye(3), 'newshape': (9,)}, {'order' : 'C'})
    r('ravel', {'a': np.eye(3)}, {'order' : 'C'})
    m('flat')
    m('flatten')
    
    hd3('Transpose-like operations')
    r('rollaxis', {'a': np.ones((3,4,5,6)), 'axis': 3}, {'start': 0})
    r('swapaxes', {'a': np.array([[1,2,3]]), 'axis1': 0, 'axis2': 1}, {})
    r('transpose', {'a': np.arange(4).reshape((2,2))}, {'axes': (1,0)})
    
    hd3('Changing number of dimensions')
    r('atleast_1d', (np.eye(3),), {})
    r('atleast_2d', (np.eye(3),), {})
    r('atleast_3d', (np.eye(3),), {})
    m('broadcast')
    m('broadcast_arrays')
    r('expand_dims', (np.array([1,2]),2), {})
    r('squeeze', {'a': (np.array([[[1,2,3]]]))}, {})
    
    hd3('Changing kind of array')
    r('asarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('asanyarray', {'a': np.array([1,2,3])}, {'dtype': np.float64, 'order': 'C'})
    r('asmatrix', {'data': np.array([1,2,3])}, {})
    r('asfarray', {'a': np.array([1,2,3])}, {})
    r('asfortranarray', {'a': np.array([1,2,3])}, {})
    r('asscalar', {'a': np.array([24])}, {})
    r('require', {'a': np.array([24])}, {})
    
    hd3('Joining arrays')
    m('column_stack')
    r('concatenate', ((np.eye(3), np.eye(3)),1), {})
    r('dstack', ((np.eye(3), np.eye(3)),), {})
    r('hstack', ((np.eye(3), np.eye(3)),), {})
    r('vstack', ((np.eye(3), np.eye(3)),), {})

    hd3('Splitting arrays')
    m('array_split')
    m('dsplit')
    m('hsplit')
    m('split')
    m('vsplit')

    hd3('Tiling arrays')
    r('tile', (np.array([0, 1, 2]),2), {})
    r('repeat', (np.array([[1,2],[3,4]]), 3), {'axis': 1})

    hd3('Adding and removing elements')
    m('delete')
    m('insert')
    m('append')
    m('resize')
    m('trim_zeros')
    m('unique')
    
    hd3('Rearranging elements')
    r('fliplr', (np.eye(3),), {})
    r('flipud', (np.eye(3),), {})
    r('reshape', {'a': np.eye(3), 'newshape': (9,)}, {'order' : 'C'})
    r('roll', (np.arange(10), 2), {})
    r('rot90', (np.arange(4).reshape((2,2)),), {})
        
    ###############################
    hd2('Linear algebra (numpy.linalg)')
    
    extra_args = {'nplib': numpy.linalg, 'chlib': ch.linalg}
    
    hd3('Matrix and dot products')
    r('dot', {'a': np.eye(3), 'b': np.eye(3)}, {})
    r('dot', {'a': np.eye(3).ravel(), 'b': np.eye(3).ravel()}, {})
    r('vdot', (np.eye(3).ravel(), np.eye(3).ravel()), {})
    r('inner', (np.eye(3).ravel(), np.eye(3).ravel()), {})
    r('outer', (np.eye(3).ravel(), np.eye(3).ravel()), {})
    r('tensordot', {'a': np.eye(3), 'b': np.eye(3)}, {})
    m('einsum')
    r('matrix_power', {'M': np.eye(3), 'n': 2}, {}, **extra_args)
    r('kron', {'a': np.eye(3), 'b': np.eye(3)}, {})
        
    hd3('Decompositions')
    r('cholesky', {'a': np.eye(3)}, {}, **extra_args)
    r('qr', {'a': np.eye(3)}, {}, **extra_args)
    r('svd', (np.eye(3),), {}, **extra_args)
    
    hd3('Matrix eigenvalues')
    r('eig', (np.eye(3),), {}, **extra_args)
    r('eigh', (np.eye(3),), {}, **extra_args)
    r('eigvals', (np.eye(3),), {}, **extra_args)
    r('eigvalsh', (np.eye(3),), {}, **extra_args)
    
    hd3('Norms and other numbers')
    r('norm', (np.eye(3),), {}, **extra_args)
    r('cond', (np.eye(3),), {}, **extra_args)
    r('det', (np.eye(3),), {}, **extra_args)
    r('slogdet', (np.eye(3),), {}, **extra_args)
    r('trace', (np.eye(3),), {})
    
    hd3('Solving equations and inverting matrices')
    r('solve', (np.eye(3),np.ones(3)), {}, **extra_args)
    r('tensorsolve', (np.eye(3),np.ones(3)), {}, **extra_args)
    r('lstsq', (np.eye(3),np.ones(3)), {}, **extra_args)
    r('inv', (np.eye(3),), {}, **extra_args)
    r('pinv', (np.eye(3),), {}, **extra_args)
    r('tensorinv', (np.eye(4*6).reshape((4,6,8,3)),), {'ind': 2}, **extra_args)
    
    hd2('Mathematical functions')

    hd3('Trigonometric functions')
    r('sin', (np.arange(3),), {})
    r('cos', (np.arange(3),), {})
    r('tan', (np.arange(3),), {})
    r('arcsin', (np.arange(3)/3.,), {})
    r('arccos', (np.arange(3)/3.,), {})
    r('arctan', (np.arange(3)/3.,), {})
    r('hypot', (np.arange(3),np.arange(3)), {})
    r('arctan2', (np.arange(3),np.arange(3)), {})
    r('degrees', (np.arange(3),), {})
    r('radians', (np.arange(3),), {})
    r('unwrap', (np.arange(3),), {})
    r('unwrap', (np.arange(3),), {})
    r('deg2rad', (np.arange(3),), {})
    r('rad2deg', (np.arange(3),), {})
    
    hd3('Hyperbolic functions')
    r('sinh', (np.arange(3),), {})
    r('cosh', (np.arange(3),), {})
    r('tanh', (np.arange(3),), {})
    r('arcsinh', (np.arange(3)/9.,), {})
    r('arccosh', (-np.arange(3)/9.,), {})
    r('arctanh', (np.arange(3)/9.,), {})
    
    hd3('Rounding')
    r('around', (np.arange(3),), {})
    r('round_', (np.arange(3),), {})
    r('rint', (np.arange(3),), {})
    r('fix', (np.arange(3),), {})
    r('floor', (np.arange(3),), {})
    r('ceil', (np.arange(3),), {})
    r('trunc', (np.arange(3),), {})
    
    hd3('Sums, products, differences')
    r('prod', (np.arange(3),), {})
    r('sum', (np.arange(3),), {})
    r('nansum', (np.arange(3),), {})
    r('cuemprod', (np.arange(3),), {})
    r('cuemsum', (np.arange(3),), {})
    r('diff', (np.arange(3),), {})
    r('ediff1d', (np.arange(3),), {})
    r('gradient', (np.arange(3),), {})
    r('cross', (np.arange(3), np.arange(3)), {})
    r('trapz', (np.arange(3),), {})
    
    hd3('Exponents and logarithms')
    r('exp', (np.arange(3),), {})
    r('expm1', (np.arange(3),), {})
    r('exp2', (np.arange(3),), {})
    r('log', (np.arange(3),), {})
    r('log10', (np.arange(3),), {})
    r('log2', (np.arange(3),), {})
    r('log1p', (np.arange(3),), {})
    r('logaddexp', (np.arange(3), np.arange(3)), {})
    r('logaddexp2', (np.arange(3), np.arange(3)), {})
    
    hd3('Other special functions')
    r('i0', (np.arange(3),), {})
    r('sinc', (np.arange(3),), {})
    
    hd3('Floating point routines')
    r('signbit', (np.arange(3),), {})
    r('copysign', (np.arange(3), np.arange(3)), {})
    r('frexp', (np.arange(3),), {})
    r('ldexp', (np.arange(3), np.arange(3)), {})
    
    hd3('Arithmetic operations')
    r('add', (np.arange(3), np.arange(3)), {})
    r('reciprocal', (np.arange(3),), {})
    r('negative', (np.arange(3),), {})
    r('multiply', (np.arange(3), np.arange(3)), {})
    r('divide', (np.arange(3), np.arange(3)), {})
    r('power', (np.arange(3), np.arange(3)), {})
    r('subtract', (np.arange(3), np.arange(3)), {})
    r('true_divide', (np.arange(3), np.arange(3)), {})
    r('floor_divide', (np.arange(3), np.arange(3)), {})
    r('fmod', (np.arange(3), np.arange(3)), {})
    r('mod', (np.arange(3), np.arange(3)), {})
    r('modf', (np.arange(3),), {})
    r('remainder', (np.arange(3), np.arange(3)), {})
    
    hd3('Handling complex numbers')
    m('angle')
    m('real')
    m('imag')
    m('conj')
    
    hd3('Miscellaneous')
    r('convolve', (np.arange(3), np.arange(3)), {})
    r('clip', (np.arange(3), 0, 2), {})
    r('sqrt', (np.arange(3),), {})
    r('square', (np.arange(3),), {})
    r('absolute', (np.arange(3),), {})
    r('fabs', (np.arange(3),), {})
    r('sign', (np.arange(3),), {})
    r('maximum', (np.arange(3), np.arange(3)), {})
    r('minimum', (np.arange(3), np.arange(3)), {})
    r('fmax', (np.arange(3), np.arange(3)), {})
    r('fmin', (np.arange(3), np.arange(3)), {})
    r('nan_to_num', (np.arange(3),), {})
    r('real_if_close', (np.arange(3),), {})
    r('interp', (2.5, [1,2,3], [3,2,0]), {})
    
    extra_args = {'nplib': numpy.random, 'chlib': ch.random}
    
    hd2('Random sampling (numpy.random)')
    hd3('Simple random data')
    r('rand', (3,), {}, **extra_args)
    r('randn', (3,), {}, **extra_args)
    r('randint', (3,), {}, **extra_args)
    r('random_integers', (3,), {}, **extra_args)
    r('random_sample', (3,), {}, **extra_args)
    r('random', (3,), {}, **extra_args)
    r('ranf', (3,), {}, **extra_args)
    r('sample', (3,), {}, **extra_args)
    r('choice', (np.ones(3),), {}, **extra_args)
    r('bytes', (3,), {}, **extra_args)
    
    hd3('Permutations')
    r('shuffle', (np.ones(3),), {}, **extra_args)
    r('permutation', (3,), {}, **extra_args)
    
    hd3('Distributions (these all pass)')
    r('beta', (.5, .5), {}, **extra_args)
    r('binomial', (.5, .5), {}, **extra_args)
    r('chisquare', (.5,), {}, **extra_args)
    r('dirichlet', ((10, 5, 3), 20,), {}, **extra_args)
    r('exponential', [], {}, **extra_args)
    r('f', [1,48,1000], {}, **extra_args)
    r('gamma', [.5], {}, **extra_args)
    make_row('...AND 28 OTHERS...', 'passed', 'passed', 'lightgreen', 'lightgreen')
    
    
    hd3('Random generator')
    r('seed', [], {}, **extra_args)
    r('get_state', [], {}, **extra_args)
    r('set_state', [np.random.get_state()], {}, **extra_args)
    
    ####################################
    hd2('Statistics')
    hd3('Order statistics')
    r('amin', (np.eye(3),),{})
    r('amax', (np.eye(3),),{})
    r('nanmin', (np.eye(3),),{})
    r('nanmax', (np.eye(3),),{})
    r('ptp', (np.eye(3),),{})
    r('percentile', (np.eye(3),50),{})

    hd3('Averages and variance')
    r('median', (np.eye(3),),{})
    r('average', (np.eye(3),),{})
    r('mean', (np.eye(3),),{})
    r('std', (np.eye(3),),{})
    r('var', (np.eye(3),),{})
    r('nanmean', (np.eye(3),),{})
    r('nanstd', (np.eye(3),),{})
    r('nanvar', (np.eye(3),),{})
    

    hd3('Correlating')
    r('corrcoef', (np.eye(3),),{})
    r('correlate', ([1, 2, 3], [0, 1, 0.5]),{})
    r('cov', (np.eye(3),),{})
    
    hd3('Histograms')
    r('histogram', (np.eye(3),),{})
    r('histogram2d', (np.eye(3).ravel(),np.eye(3).ravel()),{})
    r('histogramdd', (np.eye(3).ravel(),),{})
    r('bincount', (np.asarray(np.eye(3).ravel(), np.uint32),),{})
    r('digitize', (np.array([0.2, 6.4, 3.0, 1.6]), np.array([0.0, 1.0, 2.5, 4.0, 10.0])),{})
    
    ####################################
    hd2('Sorting, searching, and counting')
    
    hd3('Sorting')
    r('sort', (np.array([1,3,1,2.]),), {})
    m('lexsort')
    m('argsort')
    m('msort')
    m('sort_complex')
    m('partition')
    m('argpartition')
    
# sort(a[, axis, kind, order])    Return a sorted copy of an array.
# lexsort(keys[, axis])    Perform an indirect sort using a sequence of keys.
# argsort(a[, axis, kind, order])    Returns the indices that would sort an array.
# ndarray.sort([axis, kind, order])    Sort an array, in-place.
# msort(a)    Return a copy of an array sorted along the first axis.
# sort_complex(a)    Sort a complex array using the real part first, then the imaginary part.
# partition(a, kth[, axis, kind, order])    Return a partitioned copy of an array.
# argpartition(a, kth[, axis, kind, order])    Perform an indirect partition along the given axis using the algorithm specified by the kind keyword.
    
    a5 = np.arange(5)

    hd3('Searching')
    r('argmax', (a5,), {})
    r('nanargmax', (a5,), {})
    r('argmin', (a5,), {})
    r('nanargmin', (a5,), {})
    r('argwhere', (a5,), {})
    r('nonzero', (a5,), {})
    r('flatnonzero', (a5,), {})
    r('where', (a5>1,), {})
    r('searchsorted', (a5,a5), {})
    r('extract', (lambda x : x > 1, a5), {})

# argmax(a[, axis])    Indices of the maximum values along an axis.
# nanargmax(a[, axis])    Return the indices of the maximum values in the specified axis ignoring
# argmin(a[, axis])    Return the indices of the minimum values along an axis.
# nanargmin(a[, axis])    Return the indices of the minimum values in the specified axis ignoring
# argwhere(a)    Find the indices of array elements that are non-zero, grouped by element.
# nonzero(a)    Return the indices of the elements that are non-zero.
# flatnonzero(a)    Return indices that are non-zero in the flattened version of a.
# where(condition, [x, y])    Return elements, either from x or y, depending on condition.
# searchsorted(a, v[, side, sorter])    Find indices where elements should be inserted to maintain order.
# extract(condition, arr)    Return the elements of an array that satisfy some condition.    
    
    hd3('Counting')
    r('count_nonzero', (a5,), {})
    #count_nonzero(a)	Counts the number of non-zero values in the array a.
    
    

# histogram(a[, bins, range, normed, weights, ...])    Compute the histogram of a set of data.
# histogram2d(x, y[, bins, range, normed, weights])    Compute the bi-dimensional histogram of two data samples.
# histogramdd(sample[, bins, range, normed, ...])    Compute the multidimensional histogram of some data.
# bincount(x[, weights, minlength])    Count number of occurrences of each value in array of non-negative ints.
# digitize(x, bins[, right])    Return the indices of the bins to which each value in input array belongs.    

        
    global src
    src = '<html><body><table border=1>' + src + '</table></body></html>'    
    open(join(split(__file__)[0], 'api_compatibility.html'), 'w').write(src)
    
    print 'passed %d, not passed %d' % (num_passed, num_not_passed)

Example 152

Project: pyOCD Source File: flash_test.py
def flash_test(board_id):
    with MbedBoard.chooseBoard(board_id=board_id, frequency=1000000) as board:
        target_type = board.getTargetType()

        test_clock = 10000000
        if target_type == "nrf51":
            # Override clock since 10MHz is too fast
            test_clock = 1000000
        if target_type == "ncs36510":
            # Override clock since 10MHz is too fast
            test_clock = 1000000

        memory_map = board.target.getMemoryMap()
        ram_regions = [region for region in memory_map if region.type == 'ram']
        ram_region = ram_regions[0]

        ram_start = ram_region.start
        ram_size = ram_region.length

        # Grab boot flash and any regions coming immediately after
        rom_region = memory_map.getBootMemory()
        rom_start = rom_region.start
        rom_size = rom_region.length
        for region in memory_map:
            if region.isFlash and (region.start == rom_start + rom_size):
                rom_size += region.length

        target = board.target
        link = board.link
        flash = board.flash

        link.set_clock(test_clock)
        link.set_deferred_transfer(True)

        test_pass_count = 0
        test_count = 0
        result = FlashTestResult()

        def print_progress(progress):
            assert progress >= 0.0
            assert progress <= 1.0
            assert (progress == 0 and print_progress.prev_progress == 1.0) or (progress >= print_progress.prev_progress)

            # Reset state on 0.0
            if progress == 0.0:
                print_progress.prev_progress = 0
                print_progress.backwards_progress = False
                print_progress.done = False

            # Check for backwards progress
            if progress < print_progress.prev_progress:
                print_progress.backwards_progress = True
            print_progress.prev_progress = progress

            # print progress bar
            if not print_progress.done:
                sys.stdout.write('\r')
                i = int(progress * 20.0)
                sys.stdout.write("[%-20s] %3d%%" % ('=' * i, round(progress * 100)))
                sys.stdout.flush()

            # Finish on 1.0
            if progress >= 1.0:
                if not print_progress.done:
                    print_progress.done = True
                    sys.stdout.write("\n")
                    if print_progress.backwards_progress:
                        print("Progress went backwards during flash")
        print_progress.prev_progress = 0

        binary_file = os.path.join(parentdir, 'binaries', board.getTestBinary())
        with open(binary_file, "rb") as f:
            data = f.read()
        data = struct.unpack("%iB" % len(data), data)
        unused = rom_size - len(data)

        addr = rom_start
        size = len(data)

        # Turn on extra checks for the next 4 tests
        flash.setFlashAlgoDebug(True)

        print("\r\n\r\n------ Test Basic Page Erase ------")
        info = flash.flashBlock(addr, data, False, False, progress_cb=print_progress)
        data_flashed = target.readBlockMemoryUnaligned8(addr, size)
        if same(data_flashed, data) and info.program_type is FlashBuilder.FLASH_PAGE_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Basic Chip Erase ------")
        info = flash.flashBlock(addr, data, False, True, progress_cb=print_progress)
        data_flashed = target.readBlockMemoryUnaligned8(addr, size)
        if same(data_flashed, data) and info.program_type is FlashBuilder.FLASH_CHIP_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Smart Page Erase ------")
        info = flash.flashBlock(addr, data, True, False, progress_cb=print_progress)
        data_flashed = target.readBlockMemoryUnaligned8(addr, size)
        if same(data_flashed, data) and info.program_type is FlashBuilder.FLASH_PAGE_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Smart Chip Erase ------")
        info = flash.flashBlock(addr, data, True, True, progress_cb=print_progress)
        data_flashed = target.readBlockMemoryUnaligned8(addr, size)
        if same(data_flashed, data) and info.program_type is FlashBuilder.FLASH_CHIP_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        flash.setFlashAlgoDebug(False)

        print("\r\n\r\n------ Test Basic Page Erase (Entire chip) ------")
        new_data = list(data)
        new_data.extend(unused * [0x77])
        info = flash.flashBlock(addr, new_data, False, False, progress_cb=print_progress)
        if info.program_type == FlashBuilder.FLASH_PAGE_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
            result.page_erase_rate = float(len(new_data)) / float(info.program_time)
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Fast Verify ------")
        info = flash.flashBlock(addr, new_data, progress_cb=print_progress, fast_verify=True)
        if info.program_type == FlashBuilder.FLASH_PAGE_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Offset Write ------")
        addr = rom_start + rom_size / 2
        page_size = flash.getPageInfo(addr).size
        new_data = [0x55] * page_size * 2
        info = flash.flashBlock(addr, new_data, progress_cb=print_progress)
        data_flashed = target.readBlockMemoryUnaligned8(addr, len(new_data))
        if same(data_flashed, new_data) and info.program_type is FlashBuilder.FLASH_PAGE_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Multiple Block Writes ------")
        addr = rom_start + rom_size / 2
        page_size = flash.getPageInfo(addr).size
        more_data = [0x33] * page_size * 2
        addr = (rom_start + rom_size / 2) + 1 #cover multiple pages
        fb = flash.getFlashBuilder()
        fb.addData(rom_start, data)
        fb.addData(addr, more_data)
        fb.program(progress_cb=print_progress)
        data_flashed = target.readBlockMemoryUnaligned8(rom_start, len(data))
        data_flashed_more = target.readBlockMemoryUnaligned8(addr, len(more_data))
        if same(data_flashed, data) and same(data_flashed_more, more_data):
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Overlapping Blocks ------")
        test_pass = False
        addr = (rom_start + rom_size / 2) #cover multiple pages
        page_size = flash.getPageInfo(addr).size
        new_data = [0x33] * page_size
        fb = flash.getFlashBuilder()
        fb.addData(addr, new_data)
        try:
            fb.addData(addr + 1, new_data)
        except ValueError as e:
            print("Exception: %s" % e)
            test_pass = True
        if test_pass:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Empty Block Write ------")
        # Freebee if nothing asserts
        fb = flash.getFlashBuilder()
        fb.program()
        print("TEST PASSED")
        test_pass_count += 1
        test_count += 1

        print("\r\n\r\n------ Test Missing Progress Callback ------")
        # Freebee if nothing asserts
        addr = rom_start
        flash.flashBlock(rom_start, data, True)
        print("TEST PASSED")
        test_pass_count += 1
        test_count += 1

        # Only run test if the reset handler can be programmed (rom start at address 0)
        if rom_start == 0:
            print("\r\n\r\n------ Test Non-Thumb reset handler ------")
            non_thumb_data = list(data)
            # Clear bit 0 of 2nd word - reset handler
            non_thumb_data[4] = non_thumb_data[4] & ~1
            flash.flashBlock(rom_start, non_thumb_data)
            flash.flashBlock(rom_start, data)
            print("TEST PASSED")
            test_pass_count += 1
            test_count += 1

        # Note - The decision based tests below are order dependent since they
        # depend on the previous state of the flash

        print("\r\n\r\n------ Test Chip Erase Decision ------")
        new_data = list(data)
        new_data.extend([0xff] * unused) # Pad with 0xFF
        info = flash.flashBlock(addr, new_data, progress_cb=print_progress)
        if info.program_type == FlashBuilder.FLASH_CHIP_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
            result.chip_erase_rate_erased = float(len(new_data)) / float(info.program_time)
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Chip Erase Decision 2 ------")
        new_data = list(data)
        new_data.extend([0x00] * unused) # Pad with 0x00
        info = flash.flashBlock(addr, new_data, progress_cb=print_progress)
        if info.program_type == FlashBuilder.FLASH_CHIP_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
            result.chip_erase_rate = float(len(new_data)) / float(info.program_time)
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Page Erase Decision ------")
        new_data = list(data)
        new_data.extend([0x00] * unused) # Pad with 0x00
        info = flash.flashBlock(addr, new_data, progress_cb=print_progress)
        if info.program_type == FlashBuilder.FLASH_PAGE_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
            result.page_erase_rate_same = float(len(new_data)) / float(info.program_time)
            result.analyze = info.analyze_type
            result.analyze_time = info.analyze_time
            result.analyze_rate = float(len(new_data)) / float(info.analyze_time)
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\n------ Test Page Erase Decision 2 ------")
        new_data = list(data)
        size_same = unused * 5 / 6
        size_differ = unused - size_same
        new_data.extend([0x00] * size_same) # Pad 5/6 with 0x00 and 1/6 with 0xFF
        new_data.extend([0x55] * size_differ)
        info = flash.flashBlock(addr, new_data, progress_cb=print_progress)
        if info.program_type == FlashBuilder.FLASH_PAGE_ERASE:
            print("TEST PASSED")
            test_pass_count += 1
        else:
            print("TEST FAILED")
        test_count += 1

        print("\r\n\r\nTest Summary:")
        print("Pass count %i of %i tests" % (test_pass_count, test_count))
        if test_pass_count == test_count:
            print("FLASH TEST SCRIPT PASSED")
        else:
            print("FLASH TEST SCRIPT FAILED")

        target.reset()

        result.passed = test_count == test_pass_count
        return result

Example 153

Project: ccs-calendarserver Source File: test_index_file.py
    @inlineCallbacks
    def test_index_timespan_per_user(self):
        data = (
            (
                "#1.1 Single per-user non-recurring component",
                "1.1",
                """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
                "20080601T000000Z", "20080602T000000Z",
                "mailto:[email protected]",
                (
                    (
                        "user01",
                        (('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),),
                    ),
                    (
                        "user02",
                        (('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
                    ),
                ),
            ),
            (
                "#1.2 Two per-user non-recurring component",
                "1.2",
                """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
                "20080601T000000Z", "20080602T000000Z",
                "mailto:[email protected]",
                (
                    (
                        "user01",
                        (('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),),
                    ),
                    (
                        "user02",
                        (('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
                    ),
                    (
                        "user03",
                        (('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),),
                    ),
                ),
            ),
            (
                "#2.1 Single per-user simple recurring component",
                "2.1",
                """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
                "20080601T000000Z", "20080603T000000Z",
                "mailto:[email protected]",
                (
                    (
                        "user01",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
                            ('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),
                        ),
                    ),
                    (
                        "user02",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
                            ('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
                        ),
                    ),
                ),
            ),
            (
                "#2.2 Two per-user simple recurring component",
                "2.2",
                """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
                "20080601T000000Z", "20080603T000000Z",
                "mailto:[email protected]",
                (
                    (
                        "user01",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
                            ('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'T'),
                        ),
                    ),
                    (
                        "user02",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
                            ('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
                        ),
                    ),
                    (
                        "user03",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
                            ('N', "2008-06-02 12:00:00", "2008-06-02 13:00:00", 'B', 'F'),
                        ),
                    ),
                ),
            ),
            (
                "#3.1 Single per-user complex recurring component",
                "3.1",
                """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.1
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-1.1
RECURRENCE-ID:20080602T120000Z
DTSTART:20080602T130000Z
DTEND:20080602T140000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.1
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080602T120000Z
TRANSP:OPAQUE
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
                "20080601T000000Z", "20080604T000000Z",
                "mailto:[email protected]",
                (
                    (
                        "user01",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
                            ('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
                            ('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
                        ),
                    ),
                    (
                        "user02",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
                            ('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
                            ('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'F'),
                        ),
                    ),
                ),
            ),
            (
                "#3.2 Two per-user complex recurring component",
                "3.2",
                """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-1.2
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
RRULE:FREQ=DAILY;COUNT=10
END:VEVENT
BEGIN:VEVENT
UID:12345-67890-1.2
RECURRENCE-ID:20080602T120000Z
DTSTART:20080602T130000Z
DTEND:20080602T140000Z
DTSTAMP:20080601T120000Z
ORGANIZER;CN="User 01":mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
END:VEVENT
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user01
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080602T120000Z
TRANSP:OPAQUE
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
BEGIN:X-CALENDARSERVER-PERUSER
UID:12345-67890-1.2
X-CALENDARSERVER-PERUSER-UID:user02
BEGIN:X-CALENDARSERVER-PERINSTANCE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:Test
TRIGGER;RELATED=START:-PT10M
END:VALARM
END:X-CALENDARSERVER-PERINSTANCE
BEGIN:X-CALENDARSERVER-PERINSTANCE
RECURRENCE-ID:20080603T120000Z
TRANSP:TRANSPARENT
END:X-CALENDARSERVER-PERINSTANCE
END:X-CALENDARSERVER-PERUSER
END:VCALENDAR
""",
                "20080601T000000Z", "20080604T000000Z",
                "mailto:[email protected]",
                (
                    (
                        "user01",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'T'),
                            ('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
                            ('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
                        ),
                    ),
                    (
                        "user02",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
                            ('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
                            ('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'T'),
                        ),
                    ),
                    (
                        "user03",
                        (
                            ('N', "2008-06-01 12:00:00", "2008-06-01 13:00:00", 'B', 'F'),
                            ('N', "2008-06-02 13:00:00", "2008-06-02 14:00:00", 'B', 'F'),
                            ('N', "2008-06-03 12:00:00", "2008-06-03 13:00:00", 'B', 'F'),
                        ),
                    ),
                ),
            ),
        )

        for description, name, calendar_txt, trstart, trend, organizer, peruserinstances in data:
            calendar = Component.fromString(calendar_txt)

            with open(os.path.join(self.indexDirPath.path, name), "w") as f:
                f.write(calendar_txt)

            self.db.addResource(name, calendar)
            self.assertTrue(self.db.resourceExists(name), msg=description)

            # Create fake filter element to match time-range
            filter = caldavxml.Filter(
                caldavxml.ComponentFilter(
                    caldavxml.ComponentFilter(
                        TimeRange(
                            start=trstart,
                            end=trend,
                        ),
                        name=("VEVENT", "VFREEBUSY", "VAVAILABILITY"),
                    ),
                    name="VCALENDAR",
                )
            )
            filter = Filter(filter)

            for useruid, instances in peruserinstances:
                resources = yield self.db.indexedSearch(filter, useruid=useruid, fbtype=True)
                index_results = set()
                for _ignore_name, _ignore_uid, type, test_organizer, float, start, end, fbtype, transp in resources:
                    self.assertEqual(test_organizer, organizer, msg=description)
                    index_results.add((str(float), str(start), str(end), str(fbtype), str(transp),))

                self.assertEqual(set(instances), index_results, msg="%s, user:%s" % (description, useruid,))

            self.db.deleteResource(name)

Example 154

Project: mayaseed Source File: ms_render_settings.py
def ms_renderSettings_nodeInitializer():
    # define attributes
    # output directory
    output_dir_string = OpenMaya.MFnStringData().create(os.path.join("<ProjectDir>mayaseed", "<SceneName>"))
    output_dir_Attr = OpenMaya.MFnTypedAttribute()
    ms_renderSettings.output_dir = output_dir_Attr.create("output_directory", "out_dir", OpenMaya.MFnData.kString, output_dir_string)
    ms_renderSettings.addAttribute(ms_renderSettings.output_dir)

    # output file
    output_file_string = OpenMaya.MFnStringData().create("<SceneName>.#.appleseed")
    output_file_Attr = OpenMaya.MFnTypedAttribute()
    ms_renderSettings.output_file = output_file_Attr.create("output_file", "out_file", OpenMaya.MFnData.kString, output_file_string)  
    ms_renderSettings.addAttribute(ms_renderSettings.output_file)

    # export maya lights
    export_maya_lights_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_maya_lights = export_maya_lights_nAttr.create("export_maya_lights", "export_maya_lights", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.export_maya_lights)

    # convert textures to exr
    convert_textures_to_exr_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.convert_textures_to_exr = convert_textures_to_exr_nAttr.create("convert_textures_to_exr", "convert_tex_to_exr", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.convert_textures_to_exr)

    # convert shading nodes to textures
    convert_shading_nodes_to_textures_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.convert_shading_nodes_to_textures = convert_shading_nodes_to_textures_nAttr.create("convert_shading_nodes_to_textures", "convert_shading_nodes", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.convert_shading_nodes_to_textures)

    # overwrite existing textures
    overwrite_existing_textures_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.overwrite_existing_textures = overwrite_existing_textures_nAttr.create("overwrite_existing_textures", "overwrite_exrs", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.overwrite_existing_textures)

    # overwrite existing geometry
    overwrite_existing_geometry_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.overwrite_existing_geometry = overwrite_existing_geometry_nAttr.create("overwrite_existing_geometry", "overwrite_geo", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.overwrite_existing_geometry)

    # export camera blur
    export_camera_blur_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_camera_blur = export_camera_blur_nAttr.create("export_camera_blur", "camera_blur", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.export_camera_blur)

    # export transformation blur
    export_transformation_blur_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_transformation_blur = export_transformation_blur_nAttr.create("export_transformation_blur", "transformation_blur", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.export_transformation_blur)

    # export deformation blur
    export_deformation_blur_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_deformation_blur = export_deformation_blur_nAttr.create("export_deformation_blur", "deformation_blur", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.export_deformation_blur)

    # motion samples
    motion_samples_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.motion_samples = motion_samples_AttrInt.create("motion_samples", "motion_samples", OpenMaya.MFnNumericData.kInt, 2)
    motion_samples_AttrInt.setHidden(False)
    motion_samples_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.motion_samples)

    # shutter open time
    shutter_open_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.shutter_open_time = shutter_open_AttrFloat.create("shutter_open_time", "shutter_open_time", OpenMaya.MFnNumericData.kFloat, 0)
    shutter_open_AttrFloat.setHidden(False)
    shutter_open_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.shutter_open_time)

    # shutter close time
    shutter_close_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.shutter_close_time = shutter_close_AttrFloat.create("shutter_close_time", "shutter_close_time", OpenMaya.MFnNumericData.kFloat, 1)
    shutter_close_AttrFloat.setHidden(False)
    shutter_close_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.shutter_close_time)

    # export animation
    export_animation_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_animation = export_animation_nAttr.create("export_animation", "export_animation", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.export_animation)

    # start_frame
    start_frame_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.start_frame = start_frame_AttrInt.create("animation_start_frame", "start_frame", OpenMaya.MFnNumericData.kInt, 1)
    start_frame_AttrInt.setHidden(False)
    start_frame_AttrInt.setKeyable(False)
    ms_renderSettings.addAttribute(ms_renderSettings.start_frame)

    # end frame
    end_frame_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.end_frame = end_frame_AttrInt.create("animation_end_frame", "end_frame", OpenMaya.MFnNumericData.kInt, 100)
    end_frame_AttrInt.setHidden(False)
    end_frame_AttrInt.setKeyable(False)
    ms_renderSettings.addAttribute(ms_renderSettings.end_frame)

    # export animated textures
    export_animated_textures_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_animated_textures = export_animated_textures_nAttr .create("export_animated_textures", "animated_textures", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.export_animated_textures)

    # environent -----------------------------------------------
    # environment message
    environment_msgAttr = OpenMaya.MFnMessageAttribute()
    ms_renderSettings.environment = environment_msgAttr.create("environment", "env")   
    ms_renderSettings.addAttribute(ms_renderSettings.environment)

    # render_sky
    render_skynAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.render_sky = render_skynAttr.create("render_sky", "render_sky", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.render_sky)

    # scene index of refraction
    scene_ior_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.scene_ior = scene_ior_AttrFloat.create("scene_index_of_refraction", "scene_ior", OpenMaya.MFnNumericData.kFloat, 1)
    scene_ior_AttrFloat.setHidden(False)
    scene_ior_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.scene_ior)

    # cameras --------------------------------------------------
    # export all cameras as thin lens bool attribute
    export_all_cameras_as_thin_lens_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_all_cameras_as_thin_lens = export_all_cameras_as_thin_lens_nAttr.create("export_all_cameras_as_thin_lens", "export_thinlens", OpenMaya.MFnNumericData.kBoolean)
    ms_renderSettings.addAttribute(ms_renderSettings.export_all_cameras_as_thin_lens)

    # output ---------------------------------------------------
    # camera
    camera_msgAttr = OpenMaya.MFnMessageAttribute()
    ms_renderSettings.camera = camera_msgAttr.create("camera", "cam")
    ms_renderSettings.addAttribute(ms_renderSettings.camera)

    # color space
    color_space_enumAttr = OpenMaya.MFnEnumAttribute()
    ms_renderSettings.color_space = color_space_enumAttr.create("color_space", "col_space")
    color_space_enumAttr.addField("sRGB", 0)
    color_space_enumAttr.addField("Linear RGB", 1)
    color_space_enumAttr.addField("Spectral", 2)
    color_space_enumAttr.addField("ciexyz", 3)
    ms_renderSettings.addAttribute(ms_renderSettings.color_space)

    # resolution width
    width_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.width = width_AttrInt.create("frame_width", "width", OpenMaya.MFnNumericData.kInt, 1280)
    width_AttrInt.setHidden(False)
    width_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.width)

    # resolution height
    height_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.height = height_AttrInt.create("frame_height", "height", OpenMaya.MFnNumericData.kInt, 720)
    height_AttrInt.setHidden(False)
    height_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.height)

    # export straight
    export_straight_alpha_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.export_straight_alpha = export_straight_alpha_nAttr.create("export_straight_alpha", "export_straight", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.export_straight_alpha)

    # config ---------------------------------------------------
    # sampler
    sampler_enumAttr = OpenMaya.MFnEnumAttribute()
    ms_renderSettings.sampler = sampler_enumAttr.create("sampler", "sampler", 1)
    sampler_enumAttr.addField("Adaptive", 0)
    sampler_enumAttr.addField("Uniform", 1)
    ms_renderSettings.addAttribute(ms_renderSettings.sampler)

    # uniform sampler
    # uniform_samples
    uniform_samples_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.uniform_samples = uniform_samples_AttrInt.create("uniform_samples", "uniform_samples", OpenMaya.MFnNumericData.kInt, 64)
    uniform_samples_AttrInt.setHidden(False)
    uniform_samples_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.uniform_samples)

    # decorrelate_pixel
    uniform_decorrelate_pixelsnAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.uniform_decorrelate_pixels = uniform_decorrelate_pixelsnAttr.create("uniform_decorrelate_pixels", "uniform_decorrelate_pixels", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.uniform_decorrelate_pixels)

    # adaptive_sampler
    # adaptive_min_samples
    adaptive_min_samples_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.adaptive_min_samples = adaptive_min_samples_AttrInt.create("adaptive_min_samples", "adaptive_min_samples", OpenMaya.MFnNumericData.kInt, 16)
    adaptive_min_samples_AttrInt.setHidden(False)
    adaptive_min_samples_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.adaptive_min_samples)

    # adaptive_max_samples
    adaptive_max_samples_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.adaptive_max_samples = adaptive_max_samples_AttrInt.create("adaptive_max_samples", "adaptive_max_samples", OpenMaya.MFnNumericData.kInt, 128)
    adaptive_max_samples_AttrInt.setHidden(False)
    adaptive_max_samples_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.adaptive_max_samples)

    # adaptive_quality
    adaptive_quality_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.adaptive_quality = adaptive_quality_AttrFloat.create("adaptive_quality", "adaptive_quality", OpenMaya.MFnNumericData.kFloat, 3)
    adaptive_quality_AttrFloat.setHidden(False)
    adaptive_quality_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.adaptive_quality)

    # enable_ibl
    pt_iblnAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_ibl = pt_iblnAttr.create("pt_ibl", "pt_ibl", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_ibl)

    # enable_caustics
    pt_causticsnAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_caustics = pt_causticsnAttr.create("pt_caustics", "pt_caustics", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_caustics)

    # enable_dl
    pt_enable_dlnAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_direct_lighting = pt_enable_dlnAttr.create("pt_direct_lighting", "pt_direct_lighting", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_direct_lighting)

    # next_event_estimation
    pt_next_event_estimationnAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_next_event_estimation = pt_next_event_estimationnAttr.create("pt_next_event_estimation", "pt_next_event_estimation", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_next_event_estimation)

    # pt_max_bounces
    pt_max_bounces_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_max_bounces = pt_max_bounces_AttrFloat.create("pt_max_bounces", "pt_max_bounces", OpenMaya.MFnNumericData.kInt, 4)
    pt_max_bounces_AttrFloat.setHidden(False)
    pt_max_bounces_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_max_bounces)

    # pt_light_samples
    pt_light_samples_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_light_samples = pt_light_samples_AttrFloat.create("pt_light_samples", "pt_light_samples", OpenMaya.MFnNumericData.kFloat, 6)
    pt_light_samples_AttrFloat.setHidden(False)
    pt_light_samples_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_light_samples)

    # pt_environment_samples
    pt_environment_samples_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_environment_samples = pt_environment_samples_AttrFloat.create("pt_environment_samples", "pt_environment_samples", OpenMaya.MFnNumericData.kFloat, 1)
    pt_environment_samples_AttrFloat.setHidden(False)
    pt_environment_samples_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_environment_samples)

    # pt_max_ray_intensity
    pt_max_ray_intensity_AttrFloat = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.pt_max_ray_intensity = pt_max_ray_intensity_AttrFloat.create("pt_max_ray_intensity", "pt_max_ray_intensity", OpenMaya.MFnNumericData.kFloat, 0)
    pt_max_ray_intensity_AttrFloat.setHidden(False)
    pt_max_ray_intensity_AttrFloat.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.pt_max_ray_intensity)

    # enable_importance_sampling
    enable_importance_samplingnAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.enable_importance_sampling = enable_importance_samplingnAttr.create("enable_importance_sampling", "enable_importance_sampling", OpenMaya.MFnNumericData.kBoolean, True)
    ms_renderSettings.addAttribute(ms_renderSettings.enable_importance_sampling)

    # advanced ---------------------------------------------------
    # profile export
    profile_export_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.profile_export = profile_export_nAttr.create("profile_export", "profile_export", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.profile_export)

    # autodetect alpha
    autodetect_alpha_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.autodetect_alpha = autodetect_alpha_nAttr.create("autodetect_alpha", "autodetect_alpha", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.autodetect_alpha)

    # force_linear_texture_interpretation
    force_linear_texture_interpretation_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.force_linear_texture_interpretation = force_linear_texture_interpretation_nAttr.create("force_linear_texture_interpretation", "force_linear_texture_interpretation", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.force_linear_texture_interpretation)

    # force_linear_color_interpretation
    force_linear_color_interpretation_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.force_linear_color_interpretation = force_linear_color_interpretation_nAttr.create("force_linear_color_interpretation", "force_linear_color_interpretation", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.force_linear_color_interpretation)

    # tile_width
    tile_width_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.tile_width = tile_width_AttrInt.create("tile_width", "tile_width", OpenMaya.MFnNumericData.kInt, 64)
    tile_width_AttrInt.setHidden(False)
    tile_width_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.tile_width)

    # tile_height
    tile_height_AttrInt = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.tile_height = tile_height_AttrInt.create("tile_height", "tile_height", OpenMaya.MFnNumericData.kInt, 64)
    tile_height_AttrInt.setHidden(False)
    tile_height_AttrInt.setKeyable(True)
    ms_renderSettings.addAttribute(ms_renderSettings.tile_height)

    # use long object names
    use_long_names_nAttr = OpenMaya.MFnNumericAttribute()
    ms_renderSettings.use_long_names = use_long_names_nAttr.create("use_long_names", "use_long_names", OpenMaya.MFnNumericData.kBoolean, False)
    ms_renderSettings.addAttribute(ms_renderSettings.use_long_names)

Example 155

Project: rst2pdf-py3-dev Source File: styles.py
    def __init__(self, flist, font_path=None, style_path=None, def_dpi=300):
        log.info('Using stylesheets: %s' % ','.join(flist))
        # find base path
        if hasattr(sys, 'frozen'):
            self.PATH = abspath(dirname(sys.executable))
        else:
            self.PATH = abspath(dirname(__file__))

        # flist is a list of stylesheet filenames.
        # They will be loaded and merged in order.
        # but the two default stylesheets will always
        # be loaded first
        flist = [join(self.PATH, 'styles', 'styles.style'),
                 join(self.PATH, 'styles', 'default.style')] + flist

        self.def_dpi = def_dpi
        if font_path is None:
            font_path = []
        font_path += ['.', os.path.join(self.PATH, 'fonts')]
        self.FontSearchPath = [os.path.expanduser(p) for p in font_path]

        if style_path is None:
            style_path = []
        style_path += ['.', os.path.join(self.PATH, 'styles'),
                       '~/.rst2pdf/styles']
        self.StyleSearchPath = [os.path.expanduser(p) for p in style_path]
        self.FontSearchPath = list(set(self.FontSearchPath))
        self.StyleSearchPath = list(set(self.StyleSearchPath))

        log.info('FontPath:%s' % self.FontSearchPath)
        log.info('StylePath:%s' % self.StyleSearchPath)

        findfonts.flist = self.FontSearchPath
        # Page width, height
        self.pw = 0
        self.ph = 0

        # Page size [w,h]
        self.ps = None

        # Margins (top,bottom,left,right,gutter)
        self.tm = 0
        self.bm = 0
        self.lm = 0
        self.rm = 0
        self.gm = 0

        # text width
        self.tw = 0

        # Default emsize, later it will be the fontSize of the base style
        self.emsize = 10

        self.languages = []

        ssdata = self.readSheets(flist)

        # Get pageSetup data from all stylessheets in order:
        self.ps = pagesizes.A4
        self.page = {}
        for data, ssname in ssdata:
            page = data.get('pageSetup', {})
            if page:
                self.page.update(page)
                pgs = page.get('size', None)
                if pgs:  # A standard size
                    pgs = pgs.upper()
                    if pgs in pagesizes.__dict__:
                        self.ps = list(pagesizes.__dict__[pgs])
                        self.psname = pgs
                        if 'width' in self.page:
                            del(self.page['width'])
                        if 'height' in self.page:
                            del(self.page['height'])
                    elif pgs.endswith('-LANDSCAPE'):
                        self.psname = pgs.split('-')[0]
                        self.ps = list(pagesizes.landscape(pagesizes.__dict__[self.psname]))
                        if 'width' in self.page:
                            del(self.page['width'])
                        if 'height' in self.page:
                            del(self.page['height'])
                    else:
                        log.critical('Unknown page size %s in stylesheet %s' %
                            (page['size'], ssname))
                        continue
                else:  # A custom size
                    if 'size'in self.page:
                        del(self.page['size'])
                    # The sizes are expressed in some unit.
                    # For example, 2cm is 2 centimeters, and we need
                    # to do 2*cm (cm comes from reportlab.lib.units)
                    if 'width' in page:
                        self.ps[0] = self.adjustUnits(page['width'])
                    if 'height' in page:
                        self.ps[1] = self.adjustUnits(page['height'])
                self.pw, self.ph = self.ps
                if 'margin-left' in page:
                    self.lm = self.adjustUnits(page['margin-left'])
                if 'margin-right' in page:
                    self.rm = self.adjustUnits(page['margin-right'])
                if 'margin-top' in page:
                    self.tm = self.adjustUnits(page['margin-top'])
                if 'margin-bottom' in page:
                    self.bm = self.adjustUnits(page['margin-bottom'])
                if 'margin-gutter' in page:
                    self.gm = self.adjustUnits(page['margin-gutter'])
                if 'spacing-header' in page:
                    self.ts = self.adjustUnits(page['spacing-header'])
                if 'spacing-footer' in page:
                    self.bs = self.adjustUnits(page['spacing-footer'])
                if 'firstTemplate' in page:
                    self.firstTemplate = page['firstTemplate']

                # tw is the text width.
                # We need it to calculate header-footer height
                # and compress literal blocks.
                self.tw = self.pw - self.lm - self.rm - self.gm

        # Get page templates from all stylesheets
        self.pageTemplates = {}
        for data, ssname in ssdata:
            templates = data.get('pageTemplates', {})
            # templates is a dictionary of pageTemplates
            for key in templates:
                template = templates[key]
                # template is a dict.
                # template[´frames'] is a list of frames
                if key in self.pageTemplates:
                    self.pageTemplates[key].update(template)
                else:
                    self.pageTemplates[key] = template

        # Get font aliases from all stylesheets in order
        self.fontsAlias = {}
        for data, ssname in ssdata:
            self.fontsAlias.update(data.get('fontsAlias', {}))

        embedded_fontnames = []
        self.embedded = []
        # Embed all fonts indicated in all stylesheets
        for data, ssname in ssdata:
            embedded = data.get('embeddedFonts', [])

            for font in embedded:
                try:
                    # Just a font name, try to embed it
                    if isinstance(font, str):
                        # See if we can find the font
                        fname, pos = findfonts.guessFont(font)
                        if font in embedded_fontnames:
                            pass
                        else:
                            fontList = findfonts.autoEmbed(font)
                            if fontList:
                                embedded_fontnames.append(font)
                        if not fontList:
                            if (fname, pos) in embedded_fontnames:
                                fontList = None
                            else:
                                fontList = findfonts.autoEmbed(fname)
                        if fontList is not None:
                            self.embedded += fontList
                            # Maybe the font we got is not called
                            # the same as the one we gave
                            # so check that out
                            suff = ["", "-Oblique", "-Bold", "-BoldOblique"]
                            if not fontList[0].startswith(font):
                                # We need to create font aliases, and use them
                                for fname, aliasname in zip(
                                        fontList,
                                        [font + suffix for suffix in suff]):
                                    self.fontsAlias[aliasname] = fname
                        continue

                    # Each "font" is a list of four files, which will be
                    # used for regular / bold / italic / bold+italic
                    # versions of the font.
                    # If your font doesn't have one of them, just repeat
                    # the regular font.

                    # Example, using the Tuffy font from
                    # http://tulrich.com/fonts/
                    # "embeddedFonts" : [
                    #                    ["Tuffy.ttf",
                    #                     "Tuffy_Bold.ttf",
                    #                     "Tuffy_Italic.ttf",
                    #                     "Tuffy_Bold_Italic.ttf"]
                    #                   ],

                    # The fonts will be registered with the file name,
                    # minus the extension.

                    if font[0].lower().endswith('.ttf'):  # A True Type font
                        for variant in font:
                            location = self.findFont(variant)
                            pdfmetrics.registerFont(
                                TTFont(str(variant.split('.')[0]), location))
                            log.info('Registering font: %s from %s' %
                                     (str(variant.split('.')[0]), location))
                            self.embedded.append(str(variant.split('.')[0]))

                        # And map them all together
                        regular, bold, italic, bolditalic = [
                            variant.split('.')[0] for variant in font]
                        addMapping(regular, 0, 0, regular)
                        addMapping(regular, 0, 1, italic)
                        addMapping(regular, 1, 0, bold)
                        addMapping(regular, 1, 1, bolditalic)
                    else:  # A Type 1 font
                        # For type 1 fonts we require
                        # [FontName,regular,italic,bold,bolditalic]
                        # where each variant is a (pfbfile,afmfile) pair.
                        # For example, for the URW palladio from TeX:
                        # ["Palatino",("uplr8a.pfb","uplr8a.afm"),
                        #             ("uplri8a.pfb","uplri8a.afm"),
                        #             ("uplb8a.pfb","uplb8a.afm"),
                        #             ("uplbi8a.pfb","uplbi8a.afm")]
                        regular = pdfmetrics.EmbeddedType1Face(*font[1])
                        italic = pdfmetrics.EmbeddedType1Face(*font[2])
                        bold = pdfmetrics.EmbeddedType1Face(*font[3])
                        bolditalic = pdfmetrics.EmbeddedType1Face(*font[4])

                except Exception as e:
                    try:
                        if isinstance(font, list):
                            fname = font[0]
                        else:
                            fname = font
                        log.error("Error processing font %s: %s",
                                  os.path.splitext(fname)[0], str(e))
                        log.error("Registering %s as Helvetica alias", fname)
                        self.fontsAlias[fname] = 'Helvetica'
                    except Exception as e:
                        log.critical("Error processing font %s: %s",
                                     fname, str(e))
                        continue

        # Go though all styles in all stylesheets and find all fontNames.
        # Then decide what to do with them
        for data, ssname in ssdata:
            for [skey, style] in self.stylepairs(data):
                for key in style:
                    if key == 'fontName' or key.endswith('FontName'):
                        # It's an alias, replace it
                        if style[key] in self.fontsAlias:
                            style[key] = self.fontsAlias[style[key]]
                        # Embedded already, nothing to do
                        if style[key] in self.embedded:
                            continue
                        # Standard font, nothing to do
                        if style[key] in (
                            "Courier",
                            "Courier-Bold",
                            "Courier-BoldOblique",
                            "Courier-Oblique",
                            "Helvetica",
                            "Helvetica-Bold",
                            "Helvetica-BoldOblique",
                            "Helvetica-Oblique",
                            "Symbol",
                            "Times-Bold",
                            "Times-BoldItalic",
                            "Times-Italic",
                            "Times-Roman",
                            "ZapfDingbats"
                        ):
                            continue
                        # Now we need to do something
                        # See if we can find the font
                        fname, pos = findfonts.guessFont(style[key])

                        if style[key] in embedded_fontnames:
                            pass
                        else:
                            fontList = findfonts.autoEmbed(style[key])
                            if fontList:
                                embedded_fontnames.append(style[key])
                        if not fontList:
                            if (fname, pos) in embedded_fontnames:
                                fontList = None
                            else:
                                fontList = findfonts.autoEmbed(fname)
                            if fontList:
                                embedded_fontnames.append((fname, pos))
                        if fontList:
                            self.embedded += fontList
                            # Maybe the font we got is not called
                            # the same as the one we gave so check that out
                            suff = ["", "-Bold", "-Oblique", "-BoldOblique"]
                            if not fontList[0].startswith(style[key]):
                                # We need to create font aliases, and use them
                                basefname = style[key].split('-')[0]
                                for fname, aliasname in zip(
                                    fontList,
                                    [basefname + suffix for suffix in suff]
                                ):
                                    self.fontsAlias[aliasname] = fname
                                style[key] = self.fontsAlias[basefname +
                                                             suff[pos]]
                        else:
                            log.error('Unknown font: "%s",'
                                      "replacing with Helvetica", style[key])
                            style[key] = "Helvetica"

        # log.info('FontList: %s'%self.embedded)
        # log.info('FontAlias: %s'%self.fontsAlias)
        # Get styles from all stylesheets in order
        self.stylesheet = {}
        self.styles = []
        self.linkColor = 'navy'
        # FIXME: linkColor should probably not be a global
        #        style, and tocColor should probably not
        #        be a special case, but for now I'm going
        #        with the flow...
        self.tocColor = None
        for data, ssname in ssdata:
            self.linkColor = data.get('linkColor') or self.linkColor
            self.tocColor = data.get('tocColor') or self.tocColor
            for [skey, style] in self.stylepairs(data):
                sdict = {}
                # FIXME: this is done completely backwards
                for key in style:
                    # Handle color references by name
                    if key == 'color' or key.endswith('Color') and style[key]:
                        style[key] = formatColor(style[key])

                    # Yet another workaround for the unicode bug in
                    # reportlab's toColor
                    elif key == 'commands':
                        style[key] = validateCommands(style[key])
                        # for command in style[key]:
                            # c=command[0].upper()
                            # if c=='ROWBACKGROUNDS':
                                # command[3]=[str(c) for c in command[3]]
                            # elif c in ['BOX','INNERGRID'] or c.startswith('LINE'):
                                # command[4]=str(command[4])

                    # Handle alignment constants
                    elif key == 'alignment':
                        style[key] = dict(TA_LEFT=0,
                                          LEFT=0,
                                          TA_CENTER=1,
                                          CENTER=1,
                                          TA_CENTRE=1,
                                          CENTRE=1,
                                          TA_RIGHT=2,
                                          RIGHT=2,
                                          TA_JUSTIFY=4,
                                          JUSTIFY=4,
                                          DECIMAL=8,)[style[key].upper()]

                    elif key == 'language':
                        if not style[key] in self.languages:
                            self.languages.append(style[key])

                    # Make keys str instead of unicode (required by reportlab)
                    sdict[str(key)] = style[key]
                    sdict['name'] = skey
                # If the style already exists, update it
                if skey in self.stylesheet:
                    self.stylesheet[skey].update(sdict)
                else:  # New style
                    self.stylesheet[skey] = sdict
                    self.styles.append(sdict)

        # If the stylesheet has a style name docutils won't reach
        # make a copy with a sanitized name.
        # This may make name collisions possible but that should be
        # rare (who would have custom_name and custom-name in the
        # same stylesheet? ;-)
        # Issue 339

        styles2 = []
        for s in self.styles:
            if not re.match("^[a-z](-?[a-z0-9]+)*$", s['name']):
                s2 = copy.copy(s)
                s2['name'] = docutils.nodes.make_id(s['name'])
                log.warning(('%s is an invalid docutils class name, adding ' +
                             'alias %s') % (s['name'], s2['name']))
                styles2.append(s2)
        self.styles.extend(styles2)

        # And create  reportlabs stylesheet
        self.StyleSheet = StyleSheet1()
        # Patch to make the code compatible with reportlab from SVN 2.4+ and
        # 2.4
        if not hasattr(self.StyleSheet, 'has_key'):
            self.StyleSheet.__class__.has_key = lambda s, k: k in s
        for s in self.styles:
            if 'parent' in s:
                if s['parent'] is None:
                    if s['name'] != 'base':
                        s['parent'] = self.StyleSheet['base']
                    else:
                        del(s['parent'])
                else:
                    s['parent'] = self.StyleSheet[s['parent']]
            else:
                if s['name'] != 'base':
                    s['parent'] = self.StyleSheet['base']

            # If the style has no bulletFontName but it has a fontName, set it
            if ('bulletFontName' not in s) and ('fontName' in s):
                s['bulletFontName'] = s['fontName']

            hasFS = True
            # Adjust fontsize units
            if 'fontSize' not in s:
                s['fontSize'] = s['parent'].fontSize
                s['trueFontSize'] = None
                hasFS = False
            elif 'parent' in s:
                # This means you can set the fontSize to
                # "2cm" or to "150%" which will be calculated
                # relative to the parent style
                s['fontSize'] = self.adjustUnits(s['fontSize'],
                                                 s['parent'].fontSize)
                s['trueFontSize'] = s['fontSize']
            else:
                # If s has no parent, it's base, which has
                # an explicit point size by default and %
                # makes no sense, but guess it as % of 10pt
                s['fontSize'] = self.adjustUnits(s['fontSize'], 10)

            # If the leading is not set, but the size is, set it
            if 'leading' not in s and hasFS:
                s['leading'] = 1.2 * s['fontSize']

            # If the bullet font size is not set, set it as fontSize
            if ('bulletFontSize' not in s) and ('fontSize' in s):
                s['bulletFontSize'] = s['fontSize']

            # If the borderPadding is a list and wordaxe <=0.3.2,
            # convert it to an integer. Workaround for Issue
            if (
                'borderPadding' in s and
                HAS_WORDAXE and
                wordaxe_version <= 'wordaxe 0.3.2' and
                isinstance(s['borderPadding'], list)
            ):
                log.warning(('Using a borderPadding list in style %s with ' +
                             'wordaxe <= 0.3.2. That is  not supported, so ' +
                             'it will probably look wrong') % s['name'])
                s['borderPadding'] = s['borderPadding'][0]

            self.StyleSheet.add(ParagraphStyle(**s))

        self.emsize = self['base'].fontSize
        # Make stdFont the basefont, for Issue 65
        reportlab.rl_config.canvas_basefontname = self['base'].fontName
        # Make stdFont the default font for table cell styles (Issue 65)
        reportlab.platypus.tables.CellStyle.fontname = self['base'].fontName

Example 156

Project: Arelle Source File: TableStructure.py
def evaluateTableIndex(modelXbrl, lang=None):
    usgaapRoleDefinitionPattern = re.compile(r"([0-9]+) - (Statement|Disclosure|Schedule|Docuement) - (.+)")
    ifrsRoleDefinitionPattern = re.compile(r"\[([0-9]+)\] (.+)")
    # build EFM rendering-compatible index
    definitionElrs = dict((modelXbrl.roleTypeDefinition(roleURI, lang), roleType)
                          for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris
                          for roleType in modelXbrl.roleTypes.get(roleURI,()))
    sortedRoleTypes = sorted(definitionElrs.items(), key=lambda item: item[0])
    disclosureSystem = modelXbrl.modelManager.disclosureSystem
    _usgaapStyleELRs = _isJpFsa = _ifrsStyleELRs = False
    if disclosureSystem.validationType == "EFM":
        _usgaapStyleELRs = True
    elif "jp-fsa" in modelXbrl.modelManager.disclosureSystem.names:
        _isJpFsa = True
    else:
        # attempt to determine type
        if any(usgaapRoleDefinitionPattern.match(r[0]) for r in sortedRoleTypes if r[0]):
            _usgaapStyleELRs = True
        elif any(ifrsRoleDefinitionPattern.match(r[0]) for r in sortedRoleTypes if r[0]):
            _ifrsStyleELRs = True
    if _usgaapStyleELRs:
        COVER    = "1Cover"
        STMTS    = "2Financial Statements"
        NOTES    = "3Notes to Financial Statements"
        POLICIES = "4Accounting Policies"
        TABLES   = "5Notes Tables"
        DETAILS  = "6Notes Details"
        UNCATEG  = "7Uncategorized"
        isRR = any(ns.startswith("http://xbrl.sec.gov/rr/") for ns in modelXbrl.namespaceDocs.keys() if ns)
        tableGroup = None
        firstTableLinkroleURI = None
        firstDocuementLinkroleURI = None
        for roleDefinition, roleType in sortedRoleTypes:
            roleType._tableChildren = []
            match = usgaapRoleDefinitionPattern.match(roleDefinition) if roleDefinition else None
            if not match: 
                roleType._tableIndex = (UNCATEG, "", roleType.roleURI)
                continue
            seq, tblType, tblName = match.groups()
            if isRR:
                tableGroup = COVER
            elif not tableGroup:
                tableGroup = ("Paren" in tblName and COVER or tblType == "Statement" and STMTS or
                              "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or
                              "(Detail" in tblName and DETAILS or COVER)
            elif tableGroup == COVER:
                tableGroup = (tblType == "Statement" and STMTS or "Paren" in tblName and COVER or
                              "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or
                              "(Detail" in tblName and DETAILS or NOTES)
            elif tableGroup == STMTS:
                tableGroup = ((tblType == "Statement" or "Paren" in tblName) and STMTS or
                              "(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or
                              "(Detail" in tblName and DETAILS or NOTES)
            elif tableGroup == NOTES:
                tableGroup = ("(Polic" in tblName and POLICIES or "(Table" in tblName and TABLES or 
                              "(Detail" in tblName and DETAILS or tblType == "Disclosure" and NOTES or UNCATEG)
            elif tableGroup == POLICIES:
                tableGroup = ("(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or 
                              ("Paren" in tblName or "(Polic" in tblName) and POLICIES or UNCATEG)
            elif tableGroup == TABLES:
                tableGroup = ("(Detail" in tblName and DETAILS or 
                              ("Paren" in tblName or "(Table" in tblName) and TABLES or UNCATEG)
            elif tableGroup == DETAILS:
                tableGroup = (("Paren" in tblName or "(Detail" in tblName) and DETAILS or UNCATEG)
            else:
                tableGroup = UNCATEG
            if firstTableLinkroleURI is None and tableGroup == COVER:
                firstTableLinkroleURI = roleType.roleURI
            if tblType == "Docuement" and not firstDocuementLinkroleURI:
                firstDocuementLinkroleURI = roleType.roleURI
            roleType._tableIndex = (tableGroup, seq, tblName)

        # flow allocate facts to roles (SEC presentation groups)
        if not modelXbrl.qnameDimensionDefaults: # may not have run validatino yet
            from arelle import ValidateXbrlDimensions
            ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
        reportedFacts = set() # facts which were shown in a higher-numbered ELR table
        factsByQname = modelXbrl.factsByQname
        reportingPeriods = set()
        nextEnd = None
        deiFact = {}
        for conceptName in ("DocuementPeriodEndDate", "DocuementType", "CurrentFiscalPeriodEndDate"):
            for concept in modelXbrl.nameConcepts[conceptName]:
                for fact in factsByQname[concept.qname]:
                    deiFact[conceptName] = fact
                    if fact.context is not None:
                        reportingPeriods.add((None, fact.context.endDatetime)) # for instant
                        reportingPeriods.add((fact.context.startDatetime, fact.context.endDatetime)) # for startEnd
                        nextEnd = fact.context.startDatetime
                        duration = (fact.context.endDatetime - fact.context.startDatetime).days + 1
                        break
        if "DocuementType" in deiFact:
            fact = deiFact["DocuementType"]
            if fact.xValid >= VALID and "-Q" in fact.xValue or "": # fact may be invalid
                # need quarterly and yr to date durations
                endDatetime = fact.context.endDatetime
                # if within 2 days of end of month use last day of month
                endDatetimeMonth = endDatetime.month
                if (endDatetime + timedelta(2)).month != endDatetimeMonth:
                    # near end of month
                    endOfMonth = True
                    while endDatetime.month == endDatetimeMonth:
                        endDatetime += timedelta(1) # go forward to next month
                else:
                    endOfMonth = False
                startYr = endDatetime.year
                startMo = endDatetime.month - 3
                if startMo <= 0:
                    startMo += 12
                    startYr -= 1
                startDatetime = datetime(startYr, startMo, endDatetime.day, endDatetime.hour, endDatetime.minute, endDatetime.second)
                if endOfMonth:
                    startDatetime -= timedelta(1)
                    endDatetime -= timedelta(1)
                reportingPeriods.add((startDatetime, endDatetime))
                duration = 91
        # find preceding compatible default context periods
        while (nextEnd is not None):
            thisEnd = nextEnd
            prevMaxStart = thisEnd - timedelta(duration * .9)
            prevMinStart = thisEnd - timedelta(duration * 1.1)
            nextEnd = None
            for cntx in modelXbrl.contexts.values():
                if (cntx.isStartEndPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime and
                    prevMinStart <= cntx.startDatetime <= prevMaxStart):
                    reportingPeriods.add((None, cntx.endDatetime))
                    reportingPeriods.add((cntx.startDatetime, cntx.endDatetime))
                    nextEnd = cntx.startDatetime
                    break
                elif (cntx.isInstantPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime):
                    reportingPeriods.add((None, cntx.endDatetime))
        stmtReportingPeriods = set(reportingPeriods)       

        sortedRoleTypes.reverse() # now in descending order
        for i, roleTypes in enumerate(sortedRoleTypes):
            roleDefinition, roleType = roleTypes
            # find defined non-default axes in pre hierarchy for table
            tableFacts = set()
            tableGroup, tableSeq, tableName = roleType._tableIndex
            roleURIdims, priItemQNames = EFMlinkRoleURIstructure(modelXbrl, roleType.roleURI)
            for priItemQName in priItemQNames:
                for fact in factsByQname[priItemQName]:
                    cntx = fact.context
                    # non-explicit dims must be default
                    if (cntx is not None and
                        all(dimQn in modelXbrl.qnameDimensionDefaults
                            for dimQn in (roleURIdims.keys() - cntx.qnameDims.keys())) and
                        all(mdlDim.memberQname in roleURIdims[dimQn]
                            for dimQn, mdlDim in cntx.qnameDims.items()
                            if dimQn in roleURIdims)):
                        # the flow-up part, drop
                        cntxStartDatetime = cntx.startDatetime
                        cntxEndDatetime = cntx.endDatetime
                        if (tableGroup != STMTS or
                            (cntxStartDatetime, cntxEndDatetime) in stmtReportingPeriods and
                             (fact not in reportedFacts or
                              all(dimQn not in cntx.qnameDims # unspecified dims are all defaulted if reported elsewhere
                                  for dimQn in (cntx.qnameDims.keys() - roleURIdims.keys())))):
                            tableFacts.add(fact)
                            reportedFacts.add(fact)
            roleType._tableFacts = tableFacts
            
            # find parent if any
            closestParentType = None
            closestParentMatchLength = 0
            for _parentRoleDefinition, parentRoleType in sortedRoleTypes[i+1:]:
                matchLen = parentNameMatchLen(tableName, parentRoleType)
                if matchLen > closestParentMatchLength:
                    closestParentMatchLength = matchLen
                    closestParentType = parentRoleType
            if closestParentType is not None:
                closestParentType._tableChildren.insert(0, roleType)
                
            # remove lesser-matched children if there was a parent match
            unmatchedChildRoles = set()
            longestChildMatchLen = 0
            numChildren = 0
            for childRoleType in roleType._tableChildren:
                matchLen = parentNameMatchLen(tableName, childRoleType)
                if matchLen < closestParentMatchLength:
                    unmatchedChildRoles.add(childRoleType)
                elif matchLen > longestChildMatchLen:
                    longestChildMatchLen = matchLen
                    numChildren += 1
            if numChildren > 1: 
                # remove children that don't have the full match pattern length to parent
                for childRoleType in roleType._tableChildren:
                    if (childRoleType not in unmatchedChildRoles and 
                        parentNameMatchLen(tableName, childRoleType) < longestChildMatchLen):
                        unmatchedChildRoles.add(childRoleType)

            for unmatchedChildRole in unmatchedChildRoles:
                roleType._tableChildren.remove(unmatchedChildRole)

            for childRoleType in roleType._tableChildren:
                childRoleType._tableParent = roleType
                
            unmatchedChildRoles = None # dereference
        
        global UGT_TOPICS
        if UGT_TOPICS is None:
            try:
                from arelle import FileSource
                fh = FileSource.openFileStream(modelXbrl.modelManager.cntlr, 
                                               os.path.join(modelXbrl.modelManager.cntlr.configDir, "ugt-topics.zip/ugt-topics.json"),
                                               'r', 'utf-8')
                UGT_TOPICS = json.load(fh)
                fh.close()
                for topic in UGT_TOPICS:
                    topic[6] = set(topic[6]) # change concept abstracts list into concept abstracts set
                    topic[7] = set(topic[7]) # change concept text blocks list into concept text blocks set
                    topic[8] = set(topic[8]) # change concept names list into concept names set
            except Exception as ex:
                    UGT_TOPICS = None

        if UGT_TOPICS is not None:
            def roleUgtConcepts(roleType):
                roleConcepts = set()
                for rel in modelXbrl.relationshipSet(XbrlConst.parentChild, roleType.roleURI).modelRelationships:
                    if isinstance(rel.toModelObject, ModelConcept):
                        roleConcepts.add(rel.toModelObject.name)
                    if isinstance(rel.fromModelObject, ModelConcept):
                        roleConcepts.add(rel.fromModelObject.name)
                if hasattr(roleType, "_tableChildren"):
                    for _tableChild in roleType._tableChildren:
                        roleConcepts |= roleUgtConcepts(_tableChild)
                return roleConcepts
            topicMatches = {} # topicNum: (best score, roleType)
    
            for roleDefinition, roleType in sortedRoleTypes:
                roleTopicType = 'S' if roleDefinition.startswith('S') else 'D'
                if getattr(roleType, "_tableParent", None) is None:                
                    # rooted tables in reverse order
                    concepts = roleUgtConcepts(roleType)
                    for i, ugtTopic in enumerate(UGT_TOPICS):
                        if ugtTopic[0] == roleTopicType:
                            countAbstracts = len(concepts & ugtTopic[6])
                            countTextBlocks = len(concepts & ugtTopic[7])
                            countLineItems = len(concepts & ugtTopic[8])
                            if countAbstracts or countTextBlocks or countLineItems:
                                _score = (10 * countAbstracts +
                                          1000 * countTextBlocks +
                                          countLineItems / len(concepts))
                                if i not in topicMatches or _score > topicMatches[i][0]:
                                    topicMatches[i] = (_score, roleType)
            for topicNum, scoredRoleType in topicMatches.items():
                _score, roleType = scoredRoleType
                if _score > getattr(roleType, "_tableTopicScore", 0):
                    ugtTopic = UGT_TOPICS[topicNum]
                    roleType._tableTopicScore = _score
                    roleType._tableTopicType = ugtTopic[0]
                    roleType._tableTopicName = ugtTopic[3]
                    roleType._tableTopicCode = ugtTopic[4]
                    # print ("Match score {:.2f} topic {} preGrp {}".format(_score, ugtTopic[3], roleType.definition))
        return (firstTableLinkroleURI or firstDocuementLinkroleURI), None # no restriction on contents linkroles
    elif _isJpFsa:
        # find ELR with only iod:identifierItem subs group concepts
        roleElrs = dict((roleURI, roleType)
                        for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris
                        for roleType in modelXbrl.roleTypes.get(roleURI,()))
        roleIdentifierItems = {}
        for roleURI, roleType in roleElrs.items():
            roleType._tableChildren = []
            relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI)
            for rootConcept in relSet.rootConcepts:
                if rootConcept.substitutionGroupQname and rootConcept.substitutionGroupQname.localName == "identifierItem":
                    roleIdentifierItems[rootConcept] = roleType
        linkroleUri = None
        for roleURI, roleType in roleElrs.items():
            relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI)
            def addRoleIdentifiers(fromConcept, parentRoleType, visited):
                for rel in relSet.fromModelObject(fromConcept):
                    _fromConcept = rel.fromModelObject
                    _toConcept = rel.toModelObject
                    if isinstance(_fromConcept, ModelConcept) and isinstance(_toConcept, ModelConcept):
                        _fromSubQn = _fromConcept.substitutionGroupQname
                        _toSubQn = _toConcept.substitutionGroupQname
                        if ((parentRoleType is not None or
                             (_fromSubQn and _fromSubQn.localName == "identifierItem" and _fromConcept in roleIdentifierItems )) and
                            _toSubQn and _toSubQn.localName == "identifierItem" and
                            _toConcept in roleIdentifierItems):
                            if parentRoleType is None:
                                parentRoleType = roleIdentifierItems[_fromConcept]
                            _toRoleType = roleIdentifierItems[_toConcept]
                            if _toConcept not in parentRoleType._tableChildren:
                                parentRoleType._tableChildren.append(_toRoleType)
                            if _toConcept not in visited:
                                visited.add(_toConcept)
                                addRoleIdentifiers(_toConcept, _toRoleType, visited)
                                visited.discard(_toConcept)
                        elif _toConcept not in visited:
                            visited.add(_toConcept)
                            addRoleIdentifiers(_toConcept, parentRoleType, visited)
                            visited.discard(_toConcept)
            for rootConcept in relSet.rootConcepts:
                addRoleIdentifiers(rootConcept, None, set())
                if not linkroleUri and len(roleType._tableChildren) > 0:
                    linkroleUri = roleURI
        return linkroleUri, linkroleUri  # only show linkroleUri in index table   
    elif _ifrsStyleELRs: 
        for roleType in definitionElrs.values():
            roleType._tableChildren = []
        return sortedRoleTypes[0][1], None # first link role in order             
    return None, None

Example 157

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: setup.py
def configuration(parent_package='',top_path=None):
    from numpy.distutils.misc_util import Configuration, dot_join
    from numpy.distutils.system_info import get_info

    config = Configuration('core', parent_package, top_path)
    local_dir = config.local_path
    codegen_dir = join(local_dir, 'code_generators')

    if is_released(config):
        warnings.simplefilter('error', MismatchCAPIWarning)

    # Check whether we have a mismatch between the set C API VERSION and the
    # actual C API VERSION
    check_api_version(C_API_VERSION, codegen_dir)

    generate_umath_py = join(codegen_dir, 'generate_umath.py')
    n = dot_join(config.name, 'generate_umath')
    generate_umath = imp.load_module('_'.join(n.split('.')),
                                     open(generate_umath_py, 'U'), generate_umath_py,
                                     ('.py', 'U', 1))

    header_dir = 'include/numpy'  # this is relative to config.path_in_package

    cocache = CallOnceOnly()

    def generate_config_h(ext, build_dir):
        target = join(build_dir, header_dir, 'config.h')
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)

        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s', target)

            # Check sizeof
            moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)

            # Check math library and C99 math funcs availability
            mathlibs = check_mathlib(config_cmd)
            moredefs.append(('MATHLIB', ','.join(mathlibs)))

            check_math_capabilities(config_cmd, moredefs, mathlibs)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])

            # Signal check
            if is_npy_no_signal():
                moredefs.append('__NPY_PRIVATE_NO_SIGNAL')

            # Windows checks
            if sys.platform == 'win32' or os.name == 'nt':
                win32_checks(moredefs)

            # C99 restrict keyword
            moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))

            # Inline check
            inline = config_cmd.check_inline()

            # Check whether we need our own wide character support
            if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
                PYTHON_HAS_UNICODE_WIDE = True
            else:
                PYTHON_HAS_UNICODE_WIDE = False

            if ENABLE_SEPARATE_COMPILATION:
                moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))

            # Get long double representation
            if sys.platform != 'darwin':
                rep = check_long_double_representation(config_cmd)
                if rep in ['INTEL_EXTENDED_12_BYTES_LE',
                           'INTEL_EXTENDED_16_BYTES_LE',
                           'MOTOROLA_EXTENDED_12_BYTES_BE',
                           'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
                           'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
                           'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
                    moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
                else:
                    raise ValueError("Unrecognized long double format: %s" % rep)

            # Py3K check
            if sys.version_info[0] == 3:
                moredefs.append(('NPY_PY3K', 1))

            # Generate the config.h file from moredefs
            target_f = open(target, 'w')
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0], d[1]))

            # define inline to our keyword, or nothing
            target_f.write('#ifndef __cplusplus\n')
            if inline == 'inline':
                target_f.write('/* #undef inline */\n')
            else:
                target_f.write('#define inline %s\n' % inline)
            target_f.write('#endif\n')

            # add the guard to make sure config.h is never included directly,
            # but always through npy_config.h
            target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")

            target_f.close()
            print('File:', target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print('EOF')
        else:
            mathlibs = []
            target_f = open(target)
            for line in target_f:
                s = '#define MATHLIB'
                if line.startswith(s):
                    value = line[len(s):].strip()
                    if value:
                        mathlibs.extend(value.split(','))
            target_f.close()

        # Ugly: this can be called within a library and not an extension,
        # in which case there is no libraries attributes (and none is
        # needed).
        if hasattr(ext, 'libraries'):
            ext.libraries.extend(mathlibs)

        incl_dir = os.path.dirname(target)
        if incl_dir not in config.numpy_include_dirs:
            config.numpy_include_dirs.append(incl_dir)

        return target

    def generate_numpyconfig_h(ext, build_dir):
        """Depends on config.h: generate_config_h has to be called before !"""
        # put private include directory in build_dir on search path
        # allows using code generation in headers headers
        config.add_include_dirs(join(build_dir, "src", "private"))

        target = join(build_dir, header_dir, '_numpyconfig.h')
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)
        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s', target)

            # Check sizeof
            ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)

            if is_npy_no_signal():
                moredefs.append(('NPY_NO_SIGNAL', 1))

            if is_npy_no_smp():
                moredefs.append(('NPY_NO_SMP', 1))
            else:
                moredefs.append(('NPY_NO_SMP', 0))

            mathlibs = check_mathlib(config_cmd)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])

            if ENABLE_SEPARATE_COMPILATION:
                moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))

            # Check wether we can use inttypes (C99) formats
            if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
                moredefs.append(('NPY_USE_C99_FORMATS', 1))

            # visibility check
            hidden_visibility = visibility_define(config_cmd)
            moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))

            # Add the C API/ABI versions
            moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
            moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))

            # Add moredefs to header
            target_f = open(target, 'w')
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0], d[1]))

            # Define __STDC_FORMAT_MACROS
            target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
            target_f.close()

            # Dump the numpyconfig.h header to stdout
            print('File: %s' % target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print('EOF')
        config.add_data_files((header_dir, target))
        return target

    def generate_api_func(module_name):
        def generate_api(ext, build_dir):
            script = join(codegen_dir, module_name + '.py')
            sys.path.insert(0, codegen_dir)
            try:
                m = __import__(module_name)
                log.info('executing %s', script)
                h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
            finally:
                del sys.path[0]
            config.add_data_files((header_dir, h_file),
                                  (header_dir, doc_file))
            return (h_file,)
        return generate_api

    generate_numpy_api = generate_api_func('generate_numpy_api')
    generate_ufunc_api = generate_api_func('generate_ufunc_api')

    config.add_include_dirs(join(local_dir, "src", "private"))
    config.add_include_dirs(join(local_dir, "src"))
    config.add_include_dirs(join(local_dir))

    config.add_data_files('include/numpy/*.h')
    config.add_include_dirs(join('src', 'npymath'))
    config.add_include_dirs(join('src', 'multiarray'))
    config.add_include_dirs(join('src', 'umath'))
    config.add_include_dirs(join('src', 'npysort'))

    config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
    config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
    config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
    config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])

    config.numpy_include_dirs.extend(config.paths('include'))

    deps = [join('src', 'npymath', '_signbit.c'),
            join('include', 'numpy', '*object.h'),
            join(codegen_dir, 'genapi.py'),
            ]

    #######################################################################
    #                            dummy module                             #
    #######################################################################

    # npymath needs the config.h and numpyconfig.h files to be generated, but
    # build_clib cannot handle generate_config_h and generate_numpyconfig_h
    # (don't ask). Because clib are generated before extensions, we have to
    # explicitly add an extension which has generate_config_h and
    # generate_numpyconfig_h as sources *before* adding npymath.

    config.add_extension('_dummy',
                         sources=[join('src', 'dummymodule.c'),
                                  generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api]
                         )

    #######################################################################
    #                          npymath library                            #
    #######################################################################

    subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])

    def get_mathlib_info(*args):
        # Another ugly hack: the mathlib info is known once build_src is run,
        # but we cannot use add_installed_pkg_config here either, so we only
        # update the substition dictionary during npymath build
        config_cmd = config.get_config_cmd()

        # Check that the toolchain works, to fail early if it doesn't
        # (avoid late errors with MATHLIB which are confusing if the
        # compiler does not work).
        st = config_cmd.try_link('int main(void) { return 0;}')
        if not st:
            raise RuntimeError("Broken toolchain: cannot link a simple C program")
        mlibs = check_mathlib(config_cmd)

        posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
        msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
        subst_dict["posix_mathlib"] = posix_mlib
        subst_dict["msvc_mathlib"] = msvc_mlib

    npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
                       join('src', 'npymath', 'ieee754.c.src'),
                       join('src', 'npymath', 'npy_math_complex.c.src'),
                       join('src', 'npymath', 'halffloat.c')
                       ]
    config.add_installed_library('npymath',
            sources=npymath_sources + [get_mathlib_info],
            install_dir='lib')
    config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
            subst_dict)
    config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
            subst_dict)

    #######################################################################
    #                         npysort library                             #
    #######################################################################

    # This library is created for the build but it is not installed
    npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
                       join('src', 'npysort', 'mergesort.c.src'),
                       join('src', 'npysort', 'heapsort.c.src'),
                       join('src', 'private', 'npy_partition.h.src'),
                       join('src', 'npysort', 'selection.c.src'),
                       join('src', 'private', 'npy_binsearch.h.src'),
                       join('src', 'npysort', 'binsearch.c.src'),
                       ]
    config.add_library('npysort',
                       sources=npysort_sources,
                       include_dirs=[])

    #######################################################################
    #                        multiarray module                            #
    #######################################################################

    # Multiarray version: this function is needed to build foo.c from foo.c.src
    # when foo.c is included in another file and as such not in the src
    # argument of build_ext command
    def generate_multiarray_templated_sources(ext, build_dir):
        from numpy.distutils.misc_util import get_cmd

        subpath = join('src', 'multiarray')
        sources = [join(local_dir, subpath, 'scalartypes.c.src'),
                   join(local_dir, subpath, 'arraytypes.c.src'),
                   join(local_dir, subpath, 'nditer_templ.c.src'),
                   join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
                   join(local_dir, subpath, 'einsum.c.src'),
                   join(local_dir, 'src', 'private', 'templ_common.h.src')
                   ]

        # numpy.distutils generate .c from .c.src in weird directories, we have
        # to add them there as they depend on the build_dir
        config.add_include_dirs(join(build_dir, subpath))
        cmd = get_cmd('build_src')
        cmd.ensure_finalized()
        cmd.template_sources(sources, ext)

    multiarray_deps = [
            join('src', 'multiarray', 'arrayobject.h'),
            join('src', 'multiarray', 'arraytypes.h'),
            join('src', 'multiarray', 'array_assign.h'),
            join('src', 'multiarray', 'buffer.h'),
            join('src', 'multiarray', 'calculation.h'),
            join('src', 'multiarray', 'cblasfuncs.h'),
            join('src', 'multiarray', 'common.h'),
            join('src', 'multiarray', 'convert_datatype.h'),
            join('src', 'multiarray', 'convert.h'),
            join('src', 'multiarray', 'conversion_utils.h'),
            join('src', 'multiarray', 'ctors.h'),
            join('src', 'multiarray', 'descriptor.h'),
            join('src', 'multiarray', 'getset.h'),
            join('src', 'multiarray', 'hashdescr.h'),
            join('src', 'multiarray', 'iterators.h'),
            join('src', 'multiarray', 'mapping.h'),
            join('src', 'multiarray', 'methods.h'),
            join('src', 'multiarray', 'multiarraymodule.h'),
            join('src', 'multiarray', 'nditer_impl.h'),
            join('src', 'multiarray', 'numpymemoryview.h'),
            join('src', 'multiarray', 'number.h'),
            join('src', 'multiarray', 'numpyos.h'),
            join('src', 'multiarray', 'refcount.h'),
            join('src', 'multiarray', 'scalartypes.h'),
            join('src', 'multiarray', 'sequence.h'),
            join('src', 'multiarray', 'shape.h'),
            join('src', 'multiarray', 'ucsnarrow.h'),
            join('src', 'multiarray', 'usertypes.h'),
            join('src', 'multiarray', 'vdot.h'),
            join('src', 'private', 'npy_config.h'),
            join('src', 'private', 'templ_common.h.src'),
            join('src', 'private', 'lowlevel_strided_loops.h'),
            join('include', 'numpy', 'arrayobject.h'),
            join('include', 'numpy', '_neighborhood_iterator_imp.h'),
            join('include', 'numpy', 'npy_endian.h'),
            join('include', 'numpy', 'arrayscalars.h'),
            join('include', 'numpy', 'noprefix.h'),
            join('include', 'numpy', 'npy_interrupt.h'),
            join('include', 'numpy', 'npy_3kcompat.h'),
            join('include', 'numpy', 'npy_math.h'),
            join('include', 'numpy', 'halffloat.h'),
            join('include', 'numpy', 'npy_common.h'),
            join('include', 'numpy', 'npy_os.h'),
            join('include', 'numpy', 'utils.h'),
            join('include', 'numpy', 'ndarrayobject.h'),
            join('include', 'numpy', 'npy_cpu.h'),
            join('include', 'numpy', 'numpyconfig.h'),
            join('include', 'numpy', 'ndarraytypes.h'),
            join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
            join('include', 'numpy', '_numpyconfig.h.in'),
            # add library sources as distuils does not consider libraries
            # dependencies
            ] + npysort_sources + npymath_sources

    multiarray_src = [
            join('src', 'multiarray', 'alloc.c'),
            join('src', 'multiarray', 'arrayobject.c'),
            join('src', 'multiarray', 'arraytypes.c.src'),
            join('src', 'multiarray', 'array_assign.c'),
            join('src', 'multiarray', 'array_assign_scalar.c'),
            join('src', 'multiarray', 'array_assign_array.c'),
            join('src', 'multiarray', 'buffer.c'),
            join('src', 'multiarray', 'calculation.c'),
            join('src', 'multiarray', 'compiled_base.c'),
            join('src', 'multiarray', 'common.c'),
            join('src', 'multiarray', 'convert.c'),
            join('src', 'multiarray', 'convert_datatype.c'),
            join('src', 'multiarray', 'conversion_utils.c'),
            join('src', 'multiarray', 'ctors.c'),
            join('src', 'multiarray', 'datetime.c'),
            join('src', 'multiarray', 'datetime_strings.c'),
            join('src', 'multiarray', 'datetime_busday.c'),
            join('src', 'multiarray', 'datetime_busdaycal.c'),
            join('src', 'multiarray', 'descriptor.c'),
            join('src', 'multiarray', 'dtype_transfer.c'),
            join('src', 'multiarray', 'einsum.c.src'),
            join('src', 'multiarray', 'flagsobject.c'),
            join('src', 'multiarray', 'getset.c'),
            join('src', 'multiarray', 'hashdescr.c'),
            join('src', 'multiarray', 'item_selection.c'),
            join('src', 'multiarray', 'iterators.c'),
            join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
            join('src', 'multiarray', 'mapping.c'),
            join('src', 'multiarray', 'methods.c'),
            join('src', 'multiarray', 'multiarraymodule.c'),
            join('src', 'multiarray', 'nditer_templ.c.src'),
            join('src', 'multiarray', 'nditer_api.c'),
            join('src', 'multiarray', 'nditer_constr.c'),
            join('src', 'multiarray', 'nditer_pywrap.c'),
            join('src', 'multiarray', 'number.c'),
            join('src', 'multiarray', 'numpymemoryview.c'),
            join('src', 'multiarray', 'numpyos.c'),
            join('src', 'multiarray', 'refcount.c'),
            join('src', 'multiarray', 'sequence.c'),
            join('src', 'multiarray', 'shape.c'),
            join('src', 'multiarray', 'scalarapi.c'),
            join('src', 'multiarray', 'scalartypes.c.src'),
            join('src', 'multiarray', 'usertypes.c'),
            join('src', 'multiarray', 'ucsnarrow.c'),
            join('src', 'multiarray', 'vdot.c'),
            join('src', 'private', 'templ_common.h.src'),
            ]

    blas_info = get_info('blas_opt', 0)
    if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
        extra_info = blas_info
        # These files are also in MANIFEST.in so that they are always in
        # the source distribution independently of HAVE_CBLAS.
        multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
                               join('src', 'multiarray', 'python_xerbla.c'),
                               ])
        if uses_accelerate_framework(blas_info):
            multiarray_src.extend(get_sgemv_fix())
    else:
        extra_info = {}

    if not ENABLE_SEPARATE_COMPILATION:
        multiarray_deps.extend(multiarray_src)
        multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
        multiarray_src.append(generate_multiarray_templated_sources)

    config.add_extension('multiarray',
                         sources=multiarray_src +
                                 [generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api,
                                  join(codegen_dir, 'generate_numpy_api.py'),
                                  join('*.py')],
                         depends=deps + multiarray_deps,
                         libraries=['npymath', 'npysort'],
                         extra_info=extra_info)

    #######################################################################
    #                           umath module                              #
    #######################################################################

    # umath version: this function is needed to build foo.c from foo.c.src
    # when foo.c is included in another file and as such not in the src
    # argument of build_ext command
    def generate_umath_templated_sources(ext, build_dir):
        from numpy.distutils.misc_util import get_cmd

        subpath = join('src', 'umath')
        sources = [
            join(local_dir, subpath, 'loops.h.src'),
            join(local_dir, subpath, 'loops.c.src'),
            join(local_dir, subpath, 'scalarmath.c.src'),
            join(local_dir, subpath, 'simd.inc.src')]

        # numpy.distutils generate .c from .c.src in weird directories, we have
        # to add them there as they depend on the build_dir
        config.add_include_dirs(join(build_dir, subpath))
        cmd = get_cmd('build_src')
        cmd.ensure_finalized()
        cmd.template_sources(sources, ext)

    def generate_umath_c(ext, build_dir):
        target = join(build_dir, header_dir, '__umath_generated.c')
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        script = generate_umath_py
        if newer(script, target):
            f = open(target, 'w')
            f.write(generate_umath.make_code(generate_umath.defdict,
                                             generate_umath.__file__))
            f.close()
        return []

    umath_src = [
            join('src', 'umath', 'umathmodule.c'),
            join('src', 'umath', 'reduction.c'),
            join('src', 'umath', 'funcs.inc.src'),
            join('src', 'umath', 'simd.inc.src'),
            join('src', 'umath', 'loops.h.src'),
            join('src', 'umath', 'loops.c.src'),
            join('src', 'umath', 'ufunc_object.c'),
            join('src', 'umath', 'scalarmath.c.src'),
            join('src', 'umath', 'ufunc_type_resolution.c')]

    umath_deps = [
            generate_umath_py,
            join('src', 'multiarray', 'common.h'),
            join('src', 'private', 'templ_common.h.src'),
            join('src', 'umath', 'simd.inc.src'),
            join(codegen_dir, 'generate_ufunc_api.py'),
            join('src', 'private', 'ufunc_override.h')] + npymath_sources

    if not ENABLE_SEPARATE_COMPILATION:
        umath_deps.extend(umath_src)
        umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
        umath_src.append(generate_umath_templated_sources)
        umath_src.append(join('src', 'umath', 'funcs.inc.src'))
        umath_src.append(join('src', 'umath', 'simd.inc.src'))

    config.add_extension('umath',
                         sources=umath_src +
                                 [generate_config_h,
                                 generate_numpyconfig_h,
                                 generate_umath_c,
                                 generate_ufunc_api],
                         depends=deps + umath_deps,
                         libraries=['npymath'],
                         )

    #######################################################################
    #                        umath_tests module                           #
    #######################################################################

    config.add_extension('umath_tests',
                    sources=[join('src', 'umath', 'umath_tests.c.src')])

    #######################################################################
    #                   custom rational dtype module                      #
    #######################################################################

    config.add_extension('test_rational',
                    sources=[join('src', 'umath', 'test_rational.c.src')])

    #######################################################################
    #                        struct_ufunc_test module                     #
    #######################################################################

    config.add_extension('struct_ufunc_test',
                    sources=[join('src', 'umath', 'struct_ufunc_test.c.src')])

    #######################################################################
    #                     multiarray_tests module                         #
    #######################################################################

    config.add_extension('multiarray_tests',
                    sources=[join('src', 'multiarray', 'multiarray_tests.c.src')])

    #######################################################################
    #                        operand_flag_tests module                    #
    #######################################################################

    config.add_extension('operand_flag_tests',
                    sources=[join('src', 'umath', 'operand_flag_tests.c.src')])

    config.add_data_dir('tests')
    config.add_data_dir('tests/data')

    config.make_svn_version_py()

    return config

Example 158

Project: Honeybee Source File: Honeybee_Read Annual Result I.py
def main(illFilesAddress, testPts, testVecs, occFiles, lightingControlGroups, SHDGroupI_Sensors, SHDGroupII_Sensors, DLAIllumThresholds, runInBackground = False):
    
    if sc.sticky.has_key('honeybee_release'):

        try:
            if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
            if sc.sticky['honeybee_release'].isInputMissing(ghenv.Component): return -1
        except:
            warning = "You need a newer version of Honeybee to use this compoent." + \
            " Use updateHoneybee component to update userObjects.\n" + \
            "If you have already updated userObjects drag Honeybee_Honeybee component " + \
            "into canvas and try again."
            w = gh.GH_RuntimeMessageLevel.Warning
            ghenv.Component.AddRuntimeMessage(w, warning)
            return -1
            
        hb_folders = sc.sticky["honeybee_folders"]
        hb_RADPath = hb_folders["RADPath"]
        hb_RADLibPath = hb_folders["RADLibPath"]
        hb_DSPath = hb_folders["DSPath"]
        hb_DSCore = hb_folders["DSCorePath"]
        hb_DSLibPath = hb_folders["DSLibPath"]
    else:
        msg = "You should first let Honeybee to fly first..."
        
        return msg, None
    
    daysimHeaderKeywords = ["project_name", "project_directory", "bin_directory", "tmp_directory", "Template_File",
        "place", "latitude", "longitude", "time_zone", "site_elevation", "time_step",
        "wea_data_short_file", "wea_data_short_file_units", "lower_direct_threshold", "lower_diffuse_threshold",
        "output_units", "sensor_file_unit", "material_file", "geometry_file", 
        "radiance_source_files", "sensor_file", "viewpoint_file", "AdaptiveZoneApplies", "dgp_image_x_size", "dgp_image_y_size",
        "ab", "ad", "as", "ar", "aa", "lr", "st", "sj", "lw", "dj", "ds", "dr", "dp", 
        "occupancy", "minimum_illuminance_level", "daylight_savings_time", "shading", "electric_lighting_system",
        "sensor_file_info", "daylight_autonomy_active_RGB", "electric_lighting", "direct_sunlight_file", "thermal_simulation",
        "user_profile", "PNGScheduleExists" ]
    
    # I will remove this function later and just use WriteDS class
    
    
    class genDefaultLightingControl(object):
        
        def __init__(self, sensorPts = [], cntrlType = 4, lightingPower = 250, lightingSetpoint = 300, ballastLossFactor = 20, standbyPower = 3, delayTime = 5):
            
            self.sensorPts = sensorPts
            self.lightingControlStr = self.getLightingControlStr(cntrlType, lightingPower, lightingSetpoint, ballastLossFactor, standbyPower, delayTime)
        
        def getLightingControlStr(self, cntrlType, lightingPower = 250, lightingSetpoint = 300, ballastLossFactor = 20, standbyPower = 3, delayTime = 5):
            
            cntrlType += 1
            
            # manual control
            lightingControlDict = {
            1 : 'manualControl',
            2 : 'onlyOffSensor',
            3 : 'onWhenOccupied',
            4 : 'dimming',
            5 : 'onlyOffSensorAndDimming',
            6 : 'onWithDimming'}
            
            lightingStr = `cntrlType` + " " + lightingControlDict[cntrlType] + " " + `lightingPower` + " 1 "
            
            if cntrlType != 1:
                lightingStr += `standbyPower` + " "
            
            if cntrlType > 3:
                lightingStr += `ballastLossFactor` + " " + `lightingSetpoint` + " "
            
            if cntrlType != 1 and cntrlType!=4:
                lightingStr += `delayTime`
            
            lightingStr += "\n"
            
            return lightingStr
    
    
    
    def isSensor(testPt, sensors):
        for pt in sensors:
            if pt==None: return False
            if pt.DistanceTo(testPt) < sc.doc.ModelAbsoluteTolerance:
                # this is a senor point
                return True
        # not a sensor
        return False
    
    msg = str.Empty
    
    # PREPARATION/CHECKING THE INPUTS #
    
    # number of spaces
    # this component considers each branch as a separate space and will generate
    # a separate heading file for each space and generate a separate set of results
    numOfSpaces = testPts.BranchCount
    
    # number of total points
    numOfPts = testPts.DataCount
    
    # set up illuminance levels for the spaces if they are not already set
    if len(DLAIllumThresholds)==0: DLAIllumThresholds = [300] * numOfSpaces
    
    # check for occupancy file
    occupancyFilesFolder = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "DaysimCSVOCC\\")

    if len(occFiles)!=0:
        for fileCount, fileName in enumerate(occFiles):
            if fileName.lower().endswith(".csv"):
                try:
                    if not os.path.isfile(fileName):
                        msg = "Can't find the occupancy file: " + fileName
                        return msg, None
                except:
                    msg = "Occupancy file address is not valid."
                    return msg, None
            else:
                #try:
                # might be an energyplus schedule
                filePath = convertEPScheduleToDSSchedule(fileName, occupancyFilesFolder)
                occFiles[fileCount] = filePath
    else:
        daysimOccFile = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "DaysimCSVOCC\\userDefinedOcc_9to17.csv")
        occFiles = [daysimOccFile] * numOfSpaces
        if not os.path.isfile(daysimOccFile):
            msg = "Can't find the default occupancy file at: " + daysimOccFile + \
                  "\nYou can generate an occupancy file and connect the file address to occupancyFiles_ input."
            return msg, None
        
    # separate daylighting controls for each space
    
    class SHDGroupSensors(object):
        def __init__(self, sensorsList):
            self.intSensors = sensorsList[0]
            self.extSensors = sensorsList[1]
    
    lightingControls = []
    SHDGroupISensors = []
    SHDGroupIISensors = []
    originalIllFiles = []
    testPoints = []
    testVectors = []
    numOfPtsInEachSpace = []
    # collect the data for spaces
    for branchNum in range(numOfSpaces):
        
        ptList = list(testPts.Branch(branchNum))
        
        testPoints.append(ptList)
        
        numOfPtsInEachSpace.append(len(ptList))
        
        try: testVectors.append(list(testVecs.Branch(branchNum)))
        except: testVectors.append([rc.Geometry.Vector3d.ZAxis] * testPts.Branch(branchNum).Count)
        
        try: lightingControls.append(list(lightingControlGroups.Branch(branchNum)))
        except: lightingControls.append([genDefaultLightingControl()])
        try: SHDGroupISensors.append(SHDGroupSensors(SHDGroupI_Sensors.Branch(branchNum)))
        except: SHDGroupISensors.append(None)
        try: SHDGroupIISensors.append(SHDGroupSensors((SHDGroupII_Sensors.Branch(branchNum))))
        except: SHDGroupIISensors.append(None)
    
    # create a place holder for each shading group
    # sort the ill files based on their names
    originalIllFilesSorted = convertIllFileDaraTreeIntoSortedDictionary(illFilesAddress)

    # number of points should be the same in all the illfile lists
    # that's why I just try the first list of the ill files
    numOfPtsInEachFile = []
    for illFile in originalIllFilesSorted[0][0]:
        with open(illFile, "r") as illInf:
            for lineCount, line in enumerate(illInf):
                if not line.startswith("#"):
                    numOfPtsInEachFile.append(len(line.strip().split(" ")) - 4)
                    break
    
    # find the current project directory that could be differnt from the old one
    projectDirectory = os.path.dirname(originalIllFilesSorted[0][0][0]) + "\\"
    # print numOfPtsInEachFile
    #print numOfPtsInEachSpace
    
    # make sure the number of points inside the ill file matches the number of points
    # inside the point list
    if sum(numOfPtsInEachFile) != numOfPts:
        msg = "Number of points in ill files: " + `sum(numOfPtsInEachFile)` + \
              " doesn't match the number of points in point files: " + `numOfPts`
        return msg, None
   
    # find the heading files and creat multiple ill files for the study
    heaFiles = []
    filePath =  os.path.dirname(originalIllFilesSorted[0][0][0])
    try:
        files = os.listdir(filePath)
    except:
        msg = "Can't find the heading files (*.hea) at " + filePath
        return msg, None
    
    for fileName in files:
        if fileName.EndsWith(".hea"): heaFiles.append(fileName)

    # sort heading files and pt files
    try: heaFiles = sorted(heaFiles, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-1]))
    except: pass
    
    # copy one of the heading files to be modified
    heaFile = heaFiles[0]
    with open(os.path.join(filePath, heaFile), "r") as heainf:
        baseHea = heainf.readlines()
    
    modifiedHeaBase = str.Empty
    keywordsToBeRemoved = ["daylight_autonomy_active_RGB", "electric_lighting", "direct_sunlight_file", "thermal_simulation", "occupancy_profile",
                           "continuous_daylight_autonomy_active_RGB", "UDI_100_active_RGB", "UDI_100_2000_active_RGB", "UDI_2000_active_RGB",
                           "DDS_sensor_file", "DDS_file", "sensor_file_info"]
    
    linesToBePassed = []
    
    for lineCount, line in enumerate(baseHea):
        line = line.strip()
        if not lineCount in linesToBePassed:
            if line.split(" ")[0] == ("sensor_file"):
                modifiedHeaBase += "sensor_file [sensor_file]\n"
            elif line.startswith("occupancy-file"):
                modifiedHeaBase += "occupancy-file [occupancy]\n"
            elif line.startswith("occupancy"):
                modifiedHeaBase += "occupancy 5 [occupancy]\n"
            elif line.startswith("project_name"):
                projectName = line.split("project_name")[-1].strip()
                modifiedHeaBase += "project_name       [project_name]\n"
            elif line.startswith("project_directory"):
                # projectDirectory = line.split("project_directory")[-1].strip()
                modifiedHeaBase += "project_directory   " + projectDirectory + "\n"
            elif line.startswith("tmp_directory"):
                # create a place holder for the new temp file
                modifiedHeaBase += "tmp_directory      " + os.path.join(projectDirectory, "tmp[spaceCount]") + "\\\n"
                
            elif line.startswith("daylight_savings_time"):
                modifiedHeaBase += "daylight_savings_time 1\n"
            elif line.startswith("minimum_illuminance_level"):
                modifiedHeaBase += "minimum_illuminance_level [minimum_illuminance_level]\n"
            elif line.split(" ")[0] == "shading":
                
                # add the place holder for new dc and ill file names
                if line.find(".ill") >= 0: line = line.replace(".ill", "[spaceCount].ill")
                if line.find(".dc") >= 0: line = line.replace(".dc", "[spaceCount].dc")
                
                shadingStr = line + "\n"
                for lineC in range(lineCount + 1, len(baseHea)):
                    line = baseHea[lineC].strip()
                    if lineCount > len(baseHea) or line == str.Empty or line.startswith("=") or line.split(" ")[0] in daysimHeaderKeywords:
                        # good example here that I should have used the while loop instead!
                        break
                    else:
                        linesToBePassed.append(lineC)
                        # add the place holder for new dc and ill file names
                        if line.find(".ill") >= 0:
                            line = line.replace(".ill", "[spaceCount].ill")
                        
                        # I'm not sure if I really need to modify the .dc files
                        # based on the graph on daysim page it should only look
                        # for the ill files and not the dc files
                        if line.find(".dc") >= 0:
                            line = line.replace(".dc", "[spaceCount].dc")
                        
                        linesToBePassed.append(lineC)
                        shadingStr += line + "\n"
                
                modifiedHeaBase += shadingStr
                
                
                #modifiedHeaBase.append("minimum_illuminance_level [minimum_illuminance_level]\n")
            elif line.split(" ")[0] == "electric_lighting_system" or line.split(" ")[0] == "user_profile":
                # remove the lines related to electric lighting system as the new ones should be assigned
                for lineC in range(lineCount + 1, len(baseHea)):
                    line = baseHea[lineC].strip()
                    if lineCount > len(baseHea) or line == str.Empty or line.startswith("=") or line.split(" ")[0] in daysimHeaderKeywords:
                        # good example here that I should have used the while loop instead!
                        break
                    else:
                        linesToBePassed.append(lineC)
                
                
            elif line.split(" ")[0] in keywordsToBeRemoved:
                pass
            else:
                modifiedHeaBase += line + "\n"
    
    # clean the parts that are related to lighting control and schedule
    
    ##replace
    
    # re-write the ill files based on the number of points in each space
    # if the study is only for a single space then all the ill files should be merged
    # considering the structure of .ill files and the fact that the files can be really 
    # huge this part can take long. It is good to consider a new name for these files so
    # in case the user has already ran the study for this folder the script just use the
    # available files
    
    # generate new files for each space
    # check if the files are already generated once
    # not a good idea at all - many users don't really change the file name and run several
    # studies with the same name in this case, results will always stay the same! Oops...
    firstRun = True

    newIllFileNamesDict = {}
    for shdGroupCounter, illFileList in originalIllFilesSorted.items():
        newIllFileNamesDict[shdGroupCounter] = []
        for shadingStateCount in range(len(illFileList)):
            for spaceCount in range(numOfSpaces):
                newIllFileName  = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".ill"
                newDcFileName  = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".dc"
                newIllFileNamesDict[shdGroupCounter].append(newIllFileName) #collect ill files to calculate sDA
                #if not (os.path.isfile(newIllFileName) and os.path.isfile(newDcFileName)):
                #   firstRun = True
                #   break
    

    # open all the available ill files and put them in the dictionary
    illFilesDict = {}
    newIllFilesDict = {}
    if firstRun:
        
        for shdGroupCounter, illFileList in originalIllFilesSorted.items():
            
            for shadingStateCount, shadingStateFiles in enumerate(illFileList):
                # create a place holder for new .ill files for each shading group
                newIllFileNamesDict[shdGroupCounter] = []
                
                # open all the files for this shading state into memory
                for counter, illFile in enumerate(illFileList[shadingStateCount]):
                    illfile = open(illFile, "r")
                    illFilesDict[counter] = illfile #put each ill file from each cpu separate/ I don't know why I have done this
                
                # open new ill files for each space and put them in the same directory
                for spaceCount in range(numOfSpaces):
                    newIllFileName  = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".ill"
                    newIllFileNamesDict[shdGroupCounter].append(newIllFileName) #collect new ill file names to calculate sDA
                    
                    newIllFile = open(newIllFileName, "w")
                    newIllFilesDict[spaceCount] = newIllFile
                
                # all the files will have the same length of 8760 lines for the hours of the year
                for line in range(8760):
                    # merge the line from all the source file
                    mergedLine = []
                    for illFileKey in illFilesDict.keys():
                        line = illFilesDict[illFileKey].readline()
                        
                        if illFileKey==0:
                            dateInfo = line.strip().split(" ")[:4]
                        mergedLine.extend(line.strip().split(" ")[4:])
                
                
                    # write the values to the target files
                    for illFileKey in newIllFilesDict.keys():
                        line = " ".join(dateInfo + mergedLine[sum(numOfPtsInEachSpace[:illFileKey]):sum(numOfPtsInEachSpace[:illFileKey+1])])
                        newIllFilesDict[illFileKey].write(line + "\n")
                
                # close all the opened files
                for illFileKey in illFilesDict.keys(): illFilesDict[illFileKey].close()
                for illFileKey in newIllFilesDict.keys(): newIllFilesDict[illFileKey].close()
        
        
        # print numOfPtsInEachSpace
        # write the new .dc files for 
        dcFilesDict = {}
        newDcFilesDict = {}
        
        for shdGroupCounter, illFileList in originalIllFilesSorted.items():
            
            for shadingStateCount, shadingStateFiles in enumerate(illFileList):
                #illFileDict[shaidngGroupCounter]
                lenOfDCFiles = []
                for counter, illFile in enumerate(shadingStateFiles):
                    if illFile.endswith("_up.ill"):
                        dcFile = illFile.replace("_up.ill", ".dc")
                        
                    elif illFile.endswith("_down.ill"):
                        dcFile = illFile.replace("_down.ill", ".dc")
                        
                    else:
                        dcFile = illFile.replace(".ill", ".dc")

                    lenOfDCFile = getFilelength(dcFile) - 6 #Daysim files has 6 lines as header
                    lenOfDCFiles.append(lenOfDCFile)
                    dcfile = open(dcFile, "r")
                    dcFilesDict[counter] = dcfile
                
                # open new ill files for each space and put them in the same directory
                for spaceCount in range(numOfSpaces):
                    newDcFileName  = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".dc"
                    newDcFile = open(newDcFileName, "w")
                    newDcFilesDict[spaceCount] = newDcFile
                
                heading = str.Empty
                for line in dcFilesDict[0]:
                    if line.startswith("#"):
                        #make one instance of heading
                        heading += line
                    else:
                        newDcFilesDict[0].write(heading)
                        newDcFilesDict[0].write(line)
                        break
                
                pointCount = 1
                spaceCount = 0
                for dcFileKey in dcFilesDict.keys():
                    for line in dcFilesDict[dcFileKey]:
                        if not line.startswith("#"):
                            # write the line
                            newDcFilesDict[spaceCount].write(line)
                            pointCount+=1
                            if pointCount == sum(numOfPtsInEachSpace[:spaceCount + 1]):
                                # end of the file, start a new file
                                spaceCount += 1
                                try: newDcFilesDict[spaceCount].write(heading)
                                except: pass
                    
                # close all the opened files
                for dcFileKey in dcFilesDict.keys(): dcFilesDict[dcFileKey].close()
                for dcFileKey in newDcFilesDict.keys(): newDcFilesDict[dcFileKey].close()
        

    heaFileNames = []
    # write point files and heading files
    for spaceCount in range(numOfSpaces):
        tmpFolder = os.path.join(projectDirectory, "tmp_space_" + str(spaceCount))
        if not os.path.isdir(tmpFolder): os.mkdir(tmpFolder)
        subProjectName = projectName + "_space_" + str(spaceCount)
        ptsFileName = subProjectName + ".pts"
        modifiedHea = modifiedHeaBase
        
        with open(os.path.join(filePath, ptsFileName), "w") as ptsf:
            for ptCount, testPoint in enumerate(testPoints[spaceCount]):
                ptNormal = testVectors[spaceCount][ptCount]
                ptStr = '%.4f'%testPoint.X + '\t' + \
                        '%.4f'%testPoint.Y + '\t' + \
                        '%.4f'%testPoint.Z + '\t' + \
                        '%.4f'%ptNormal.X + '\t' + \
                        '%.4f'%ptNormal.Y + '\t' + \
                        '%.4f'%ptNormal.Z + '\n'
                
                ptsf.write(ptStr)
        
        # replace some of the values
        
        # replace sensor file with the new file
        if modifiedHea.find("[sensor_file]") >= 0:
            modifiedHea = modifiedHea.replace("[sensor_file]", ptsFileName)
        else:
            modifiedHea += "sensor_file " + ptsFileName + "\n"
        
        # occupancy file
        try:
            occFileFullPath = occFiles[spaceCount]
        except:
            occFileFullPath = occFiles[0]
        
        
        #copy occupancy file to the folder
        occFileName = os.path.basename(occFileFullPath)
        targetFile = os.path.join(projectDirectory, occFileName)
        
        if not os.path.isdir(targetFile):
            shutil.copy2(occFileFullPath, targetFile)
        
        if modifiedHea.find("[occupancy]") >= 0:
            modifiedHea = modifiedHea.replace("[occupancy]", occFileName)
        else:
            # pass
            modifiedHea += "occupancy-file " + occFileName + "\n"
            modifiedHea += "occupancy 5 " + occFileName + "\n"
        
        modifiedHea = modifiedHea.replace("[project_name]", subProjectName)
        
        # daylight saving
        if modifiedHea.find("daylight_savings_time") >= 0:
            pass
        else:
            modifiedHea += "daylight_savings_time 1\n"
        
        # illuminance level threshold
        try: illumT = DLAIllumThresholds[spaceCount]
        except: illumT = DLAIllumThresholds[0]
        
        if modifiedHea.find("[minimum_illuminance_level]") >= 0:
            modifiedHea = modifiedHea.replace("[minimum_illuminance_level]", str(illumT))
        else:
            modifiedHea += "minimum_illuminance_level " + str(illumT)+ "\n"
        
        
        # replace the file names for advanced shadings
        modifiedHea = modifiedHea.replace("[spaceCount]", "_space_" + str(spaceCount))
        
        # add user information
        modifiedHea += "user_profile 1\n" + \
        "active 100 1 1\n"
        
        try:
            lghtCtrls = lightingControls[spaceCount]
            lightingGroupSensors = []
        except:
            lghtCtrls = []
        
        if len(lghtCtrls)!=0:
            modifiedHea += "\n\nelectric_lighting_system " + str(len(lghtCtrls)) + "\n"
        
        for lightingControl in lghtCtrls:
            lightingGroupSensors.append(lightingControl.sensorPts)
            lightingControlDefinition = lightingControl.lightingControlStr
            modifiedHea += lightingControlDefinition
        
        # write sensor info
        modifiedHea += "\nsensor_file_info "
        
        for pt in testPoints[spaceCount]:
            sensorInfo = []
            
            # test shading group
            for groupCount, shdGroupSensor in enumerate([SHDGroupISensors[spaceCount], SHDGroupIISensors[spaceCount]]):
                if shdGroupSensor!=None:
                    if isSensor(pt, shdGroupSensor.intSensors):
                        sensorInfo.append('BG' + str(groupCount+1))
                    if isSensor(pt, shdGroupSensor.extSensors):
                        sensorInfo.append('BG' + str(groupCount+1) + '_Ext')
            
            # test lighting group
            for groupCount, lightingGroupSensor in enumerate(lightingGroupSensors):
                if lightingGroupSensor!=[] and isSensor(pt, lightingGroupSensor):
                    sensorInfo.append('LG' + str(groupCount+1))
            if len(sensorInfo)==0:
                modifiedHea += "0 "
            elif len(sensorInfo)==1:
                modifiedHea += sensorInfo[0] + " "
            else:
                modifiedHea += ",".join(sensorInfo) + " "
            
        # output files
        modifiedHea += "\n\n############################\n" + \
                       "# Daylighting Result Files #\n" + \
                       "############################\n"
        modifiedHea += "daylight_autonomy_active_RGB " + subProjectName +"_autonomy.DA\n"
        modifiedHea += "continuous_daylight_autonomy_active_RGB " + subProjectName +".CDA\n"
        modifiedHea += "UDI_100_active_RGB " + subProjectName +"_less_than_100.UDI\n"
        modifiedHea += "UDI_100_2000_active_RGB " + subProjectName +"_100_2000.UDI\n"
        modifiedHea += "UDI_2000_active_RGB " + subProjectName + "_more_than_2000.UDI\n"
        modifiedHea += "occupancy_profile " + subProjectName + "_occ_profile.csv\n"
        modifiedHea += "electric_lighting "  + subProjectName + "_electriclighting.htm\n"
        modifiedHea += "direct_sunlight_file "  + subProjectName + ".dir\n"
        modifiedHea += "thermal_simulation " + subProjectName + "_intgain.csv\n"
        #modifiedHea += "DDS_sensor_file "  + subProjectName +".CDA\n".dds\n"
        #modifiedHea += "DDS_file "  + subProjectName +".sen\n"
                           
                           
        heaFileName = subProjectName + ".hea"
        heaFileNames.append(heaFileName)
        with open(os.path.join(filePath, heaFileName), "w") as heaf:
            heaf.write(modifiedHea)
            
    # write batch files
    batchFileNames = []
    pathStr = "SET RAYPATH=.;" + hb_RADLibPath + ";" + hb_DSPath + ";" + hb_DSLibPath + ";\nPATH=" + hb_RADPath + ";" + hb_DSPath + ";" + hb_DSLibPath + ";$PATH\n"
    for heaFileName in heaFileNames:
        batchFileName = heaFileName.replace(".hea", ".bat")
        batchFileNames.append(batchFileName)
        with open(os.path.join(filePath, batchFileName), "w") as batchInf:
            batchFileStr = ":: Daysim Result Calculation - Generated by Honeybee\n\n"
            batchFileStr += pathStr
            # gen glare profile in case there is any dynamic shading systems!
            if len(originalIllFilesSorted.keys())>1:
                batchFileStr += ':: Glare Profile in The Case of Dynamic Shading Calculation\n' + \
                                'gen_directsunlight ' + os.path.join(filePath, heaFileName) + '\n'
            batchFileStr += ':: Generate the result files\n' + \
                            'ds_el_lighting.exe  ' + os.path.join(filePath, heaFileName) + '\n'
            
            batchInf.write(batchFileStr)
            
    # write a batch file and run the study
    ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
    if ncpus == 0: ncpus = 1
    
    #execute the batch files in parallel if there is enough CPUs!
    fileNames = []

    if ncpus >= numOfSpaces:
        for fileName in batchFileNames:
            batchFileName = os.path.join(filePath, fileName)
            fileNames.append(batchFileName)
            if runInBackground:
                p = subprocess.Popen(batchFileName , shell=True)
            else:
                p = subprocess.Popen(r'start cmd /c ' + batchFileName , shell=True)
                
        isTheStudyOver(batchFileNames)
    else:
        for fileName in batchFileNames:
            batchFileName = os.path.join(filePath, fileName)
            if runInBackground:
                p = subprocess.Popen(batchFileName , shell=True)
            else:
                p = subprocess.Popen(r'start cmd /c ' + batchFileName , shell=True)
    
    # calculate sDA    
    
    #sDADict = {}
    
    #if len(newIllFileNamesDict.keys())!=1:
    #    warning = "This version of Honeybee doesn't consider dynamic blinds in sDA calculation!\n"
    #    w = gh.GH_RuntimeMessageLevel.Warning
    #    ghenv.Component.AddRuntimeMessage(w, warning)
    #    
    #for spaceCount, spaceIllFiles in enumerate(newIllFileNamesDict[0]):
    #    totalOccupancyHours = 0
    #    sDADict[spaceCount] = 0
        
    #    try: DLAIllumThreshold = DLAIllumThresholds[spaceCount]
    #    except: DLAIllumThreshold = DLAIllumThresholds[0]
    #    
    #    
    #    # open the file to read the values
    #    with open(spaceIllFiles, "r") as illInf:
    #        
    #        # import occupancy profile
    #        try: occFile = occFiles[spaceCount]
    #        except: occFile = occFiles[0]
    #        with open(occFile, "r") as occInFile:
    #            occupancyLines = occInFile.readlines()
    #            
    #        # each line represnt an hour
    #        for lineCount, line in enumerate(illInf):
    #            higherThanThreshold = 0
    #            # check the occupancy profile
    #            if int(occupancyLines[lineCount + 3].split(",")[-1]) != 0:
    #                totalOccupancyHours += 1
    #                illValues = line.split("  ")[1].strip().split(" ")
    #                
    #                # check number of points that satisfy the minimum illuminance
    #                for sensorCount, illuminance in enumerate(illValues):
    #                    # print float(illuminance), DLAIllumThreshold, float(illuminance) >= DLAIllumThreshold
    #                    if float(illuminance) >= DLAIllumThreshold:
    #                        higherThanThreshold += 1
    #                
    #                if higherThanThreshold/len(illValues) > .5:
    #                    sDADict[spaceCount] += 1
    #        
    #        sDADict[spaceCount] = "%.2f"%((sDADict[spaceCount]/totalOccupancyHours) * 100)
          
    
    # read all the results
    DLALists = []
    underUDLILists = []
    inRangeUDLILists = []
    overUDLILists = []
    CDALists = []
    EPLSchLists = []
    htmLists = []
    
    resultFiles = os.listdir(projectDirectory)
    for fileName in resultFiles:
        if fileName.endswith(".DA"): DLALists.append(os.path.join(filePath,fileName))
        elif fileName.endswith(".CDA"): CDALists.append(os.path.join(filePath,fileName))
        elif fileName.endswith(".htm"): htmLists.append(os.path.join(filePath,fileName))
        elif fileName.endswith("_intgain.csv"): EPLSchLists.append(os.path.join(filePath,fileName))
        elif fileName.endswith("less_than_100.UDI"): underUDLILists.append(os.path.join(filePath,fileName))
        elif fileName.endswith("100_2000.UDI"): inRangeUDLILists.append(os.path.join(filePath,fileName))
        elif fileName.endswith("more_than_2000.UDI"): overUDLILists.append(os.path.join(filePath,fileName))
        
    # sort the lists
    try: CDALists = sorted(CDALists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-1]))
    except: pass
    try: DLALists = sorted(DLALists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-2]))
    except: pass
    try: htmLists = sorted(htmLists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-2]))
    except: pass
    try: EPLSchLists = sorted(EPLSchLists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-2]))
    except: pass    
    try: underUDLILists = sorted(underUDLILists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-4]))
    except: pass
    try: inRangeUDLILists = sorted(inRangeUDLILists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-3]))
    except: pass
    try: overUDLILists = sorted(overUDLILists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-4]))
    except: pass
    
    return None, [DLALists, underUDLILists, inRangeUDLILists, overUDLILists, CDALists, EPLSchLists, htmLists]

Example 159

Project: lyman Source File: run_fmri.py
def main(arglist):
    """Main function for workflow setup and execution."""
    args = parse_args(arglist)

    # Get and process specific information
    project = lyman.gather_project_info()
    exp = lyman.gather_experiment_info(args.experiment, args.altmodel, args)

    # Set up the SUBJECTS_DIR for Freesurfer
    os.environ["SUBJECTS_DIR"] = project["data_dir"]

    # Subject is always highest level of parameterization
    subject_list = lyman.determine_subjects(args.subjects)
    subj_source = tools.make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp_base = exp_name
    if args.altmodel is not None:
        exp_name = "-".join([exp_base, args.altmodel])

    # Set roots of output storage
    data_dir = project["data_dir"]
    analysis_dir = op.join(project["analysis_dir"], exp_name)
    working_dir = op.join(project["working_dir"], exp_name)
    nipype.config.set("execution", "crashdump_dir", project["crash_dir"])

    # Create symlinks to the preproc directory for altmodels
    if not op.exists(analysis_dir):
        os.makedirs(analysis_dir)
    if exp_base != exp_name:
        for subj in subject_list:
            subj_dir = op.join(analysis_dir, subj)
            if not op.exists(subj_dir):
                os.makedirs(subj_dir)
            link_dir = op.join(analysis_dir, subj, "preproc")
            if not op.exists(link_dir):
                preproc_dir = op.join("../..", exp_base, subj, "preproc")
                os.symlink(preproc_dir, link_dir)

    # For later processing steps, are we using smoothed inputs?
    smoothing = "unsmoothed" if args.unsmoothed else "smoothed"

    # Also define the regspace variable here
    space = args.regspace

    # ----------------------------------------------------------------------- #
    # Preprocessing Workflow
    # ----------------------------------------------------------------------- #

    # Create workflow in function defined elsewhere in this package
    preproc, preproc_input, preproc_output = wf.create_preprocessing_workflow(
                                                exp_info=exp)

    # Collect raw nifti data
    preproc_templates = dict(timeseries=exp["source_template"])
    if exp["partial_brain"]:
        preproc_templates["whole_brain"] = exp["whole_brain_template"]
    if exp["fieldmap_template"]:
        preproc_templates["fieldmap"] = exp["fieldmap_template"]

    preproc_source = Node(SelectFiles(preproc_templates,
                                      base_directory=project["data_dir"]),
                          "preproc_source")

    # Convenience class to handle some sterotyped connections
    # between run-specific nodes (defined here) and the inputs
    # to the prepackaged workflow returned above
    preproc_inwrap = tools.InputWrapper(preproc, subj_source,
                                        preproc_source, preproc_input)
    preproc_inwrap.connect_inputs()

    # Store workflow outputs to persistant location
    preproc_sink = Node(DataSink(base_directory=analysis_dir), "preproc_sink")

    # Similar to above, class to handle sterotyped output connections
    preproc_outwrap = tools.OutputWrapper(preproc, subj_source,
                                          preproc_sink, preproc_output)
    preproc_outwrap.set_subject_container()
    preproc_outwrap.set_mapnode_substitutions(exp["n_runs"])
    preproc_outwrap.sink_outputs("preproc")

    # Set the base for the possibly temporary working directory
    preproc.base_dir = working_dir

    # Possibly execute the workflow, depending on the command line
    lyman.run_workflow(preproc, "preproc", args)

    # ----------------------------------------------------------------------- #
    # Timeseries Model
    # ----------------------------------------------------------------------- #

    # Create a modelfitting workflow and specific nodes as above
    model, model_input, model_output = wf.create_timeseries_model_workflow(
        name=smoothing + "_model", exp_info=exp)

    model_base = op.join(analysis_dir, "{subject_id}/preproc/run_*/")
    model_templates = dict(
        timeseries=op.join(model_base, smoothing + "_timeseries.nii.gz"),
        realign_file=op.join(model_base, "realignment_params.csv"),
        nuisance_file=op.join(model_base, "nuisance_variables.csv"),
        artifact_file=op.join(model_base, "artifacts.csv"),
        )

    if exp["design_name"] is not None:
        design_file = exp["design_name"] + ".csv"
        regressor_file = exp["design_name"] + ".csv"
        model_templates["design_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", design_file)
    if exp["regressor_file"] is not None:
        regressor_file = exp["regressor_file"] + ".csv"
        model_templates["regressor_file"] = op.join(data_dir, "{subject_id}",
                                                    "design", regressor_file)

    model_source = Node(SelectFiles(model_templates), "model_source")

    model_inwrap = tools.InputWrapper(model, subj_source,
                                      model_source, model_input)
    model_inwrap.connect_inputs()

    model_sink = Node(DataSink(base_directory=analysis_dir), "model_sink")

    model_outwrap = tools.OutputWrapper(model, subj_source,
                                        model_sink, model_output)
    model_outwrap.set_subject_container()
    model_outwrap.set_mapnode_substitutions(exp["n_runs"])
    model_outwrap.sink_outputs("model." + smoothing)

    # Set temporary output locations
    model.base_dir = working_dir

    # Possibly execute the workflow
    lyman.run_workflow(model, "model", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Registration
    # ----------------------------------------------------------------------- #

    # Is this a model or timeseries registration?
    regtype = "timeseries" if (args.timeseries or args.residual) else "model"

    # Are we registering across experiments?
    cross_exp = args.regexp is not None

    # Retrieve the right workflow function for registration
    # Get the workflow function dynamically based on the space
    warp_method = project["normalization"]
    flow_name = "%s_%s_reg" % (space, regtype)
    reg, reg_input, reg_output = wf.create_reg_workflow(flow_name,
                                                        space,
                                                        regtype,
                                                        warp_method,
                                                        args.residual,
                                                        cross_exp)

    # Define a smoothing info node here. Use an iterable so that running
    # with/without smoothing doesn't clobber working directory files
    # for the other kind of execution
    smooth_source = Node(IdentityInterface(fields=["smoothing"]),
                         iterables=("smoothing", [smoothing]),
                         name="smooth_source")

    # Set up the registration inputs and templates
    reg_templates = dict(
        masks="{subject_id}/preproc/run_*/functional_mask.nii.gz",
        means="{subject_id}/preproc/run_*/mean_func.nii.gz",
                         )

    if regtype == "model":
        # First-level model summary statistic images
        reg_base = "{subject_id}/model/{smoothing}/run_*/"
        reg_templates.update(dict(
            copes=op.join(reg_base, "cope*.nii.gz"),
            varcopes=op.join(reg_base, "varcope*.nii.gz"),
            sumsquares=op.join(reg_base, "ss*.nii.gz"),
                                  ))
    else:
        # Timeseries images
        if args.residual:
            ts_file = op.join("{subject_id}/model/{smoothing}/run_*/",
                              "results/res4d.nii.gz")
        else:
            ts_file = op.join("{subject_id}/preproc/run_*/",
                              "{smoothing}_timeseries.nii.gz")
        reg_templates.update(dict(timeseries=ts_file))
    reg_lists = reg_templates.keys()

    # Native anatomy to group anatomy affine matrix and warpfield
    if space == "mni":
        aff_ext = "mat" if warp_method == "fsl" else "txt"
        reg_templates["warpfield"] = op.join(data_dir, "{subject_id}",
                                             "normalization/warpfield.nii.gz")
        reg_templates["affine"] = op.join(data_dir, "{subject_id}",
                                          "normalization/affine." + aff_ext)
    else:
        if args.regexp is None:
            tkreg_base = analysis_dir
        else:
            tkreg_base = op.join(project["analysis_dir"], args.regexp)
        reg_templates["tkreg_rigid"] = op.join(tkreg_base,
                                               "{subject_id}", "preproc",
                                               "run_1", "func2anat_tkreg.dat")

    # Rigid (6dof) functional-to-anatomical matrices
    rigid_stem = "{subject_id}/preproc/run_*/func2anat_"
    if warp_method == "ants" and space == "mni":
        reg_templates["rigids"] = rigid_stem + "tkreg.dat"
    else:
        reg_templates["rigids"] = rigid_stem + "flirt.mat"

    # Rigid matrix from anatomy to target experiment space
    if args.regexp is not None:
        targ_analysis_dir = op.join(project["analysis_dir"], args.regexp)
        reg_templates["first_rigid"] = op.join(targ_analysis_dir,
                                               "{subject_id}", "preproc",
                                               "run_1", "func2anat_flirt.mat")

    # Define the registration data source node
    reg_source = Node(SelectFiles(reg_templates,
                                  force_lists=reg_lists,
                                  base_directory=analysis_dir),
                      "reg_source")

    # Registration inputnode
    reg_inwrap = tools.InputWrapper(reg, subj_source,
                                    reg_source, reg_input)
    reg_inwrap.connect_inputs()

    # The source node also needs to know about the smoothing on this run
    reg.connect(smooth_source, "smoothing", reg_source, "smoothing")

    # Set up the registration output and datasink
    reg_sink = Node(DataSink(base_directory=analysis_dir), "reg_sink")

    reg_outwrap = tools.OutputWrapper(reg, subj_source,
                                    reg_sink, reg_output)
    reg_outwrap.set_subject_container()
    reg_outwrap.sink_outputs("reg.%s" % space)

    # Reg has some additional substitutions to strip out iterables
    # and rename the timeseries file
    reg_subs = [("_smoothing_", "")]
    reg_outwrap.add_regexp_substitutions(reg_subs)

    # Add dummy substitutions for the contasts to make sure the DataSink
    # reruns when the deisgn has changed. This accounts for the problem where
    # directory inputs are treated as strings and the contents/timestamps are
    # not hashed, which should be fixed upstream soon.
    contrast_subs = [(c, c) for c in exp["contrast_names"]]
    reg_outwrap.add_regexp_substitutions(contrast_subs)

    reg.base_dir = working_dir

    # Possibly run registration workflow and clean up
    lyman.run_workflow(reg, "reg", args)

    # ----------------------------------------------------------------------- #
    # Across-Run Fixed Effects Model
    # ----------------------------------------------------------------------- #

    # Dynamically get the workflow
    wf_name = space + "_ffx"
    ffx, ffx_input, ffx_output = wf.create_ffx_workflow(wf_name,
                                                        space,
                                                        exp["contrast_names"],
                                                        exp_info=exp)

    ext = "_warp.nii.gz" if space == "mni" else "_xfm.nii.gz"
    ffx_base = op.join("{subject_id}/reg", space, "{smoothing}/run_*")
    ffx_templates = dict(
        copes=op.join(ffx_base, "cope*" + ext),
        varcopes=op.join(ffx_base, "varcope*" + ext),
        masks=op.join(ffx_base, "functional_mask" + ext),
        means=op.join(ffx_base, "mean_func" + ext),
        dofs="{subject_id}/model/{smoothing}/run_*/results/dof",
        ss_files=op.join(ffx_base, "ss*" + ext),
        timeseries="{subject_id}/preproc/run_*/{smoothing}_timeseries.nii.gz",
                         )
    ffx_lists = ffx_templates.keys()

    # Space-conditional inputs
    if space == "mni":
        bg = op.join(data_dir, "{subject_id}/normalization/brain_warp.nii.gz")
        reg = op.join(os.environ["FREESURFER_HOME"],
                      "average/mni152.register.dat")
    else:
        reg_dir = "{subject_id}/reg/epi/{smoothing}/run_1"
        bg = op.join(reg_dir, "mean_func_xfm.nii.gz")
        reg = op.join(reg_dir, "func2anat_tkreg.dat")
    ffx_templates["anatomy"] = bg
    ffx_templates["reg_file"] = reg

    # Define the ffxistration data source node
    ffx_source = Node(SelectFiles(ffx_templates,
                                  force_lists=ffx_lists,
                                  base_directory=analysis_dir),
                      "ffx_source")

    # Fixed effects inutnode
    ffx_inwrap = tools.InputWrapper(ffx, subj_source,
                                    ffx_source, ffx_input)
    ffx_inwrap.connect_inputs()

    # Connect the smoothing information
    ffx.connect(smooth_source, "smoothing", ffx_source, "smoothing")

    # Fixed effects output and datasink
    ffx_sink = Node(DataSink(base_directory=analysis_dir), "ffx_sink")

    ffx_outwrap = tools.OutputWrapper(ffx, subj_source,
                                      ffx_sink, ffx_output)
    ffx_outwrap.set_subject_container()
    ffx_outwrap.sink_outputs("ffx.%s" % space)

    # Fixed effects has some additional substitutions to strip out interables
    ffx_outwrap.add_regexp_substitutions([
        ("_smoothing_", ""), ("flamestats", "")
                                          ])

    ffx.base_dir = working_dir

    # Possibly run fixed effects workflow
    lyman.run_workflow(ffx, "ffx", args)

    # -------- #
    # Clean-up
    # -------- #

    if project["rm_working_dir"]:
        shutil.rmtree(project["working_dir"])

Example 160

Project: yournextrepresentative Source File: conf.py
def get_settings(conf_file_leafname, election_app=None, tests=False):
    conf = get_conf(conf_file_leafname)

    debug = bool(int(conf.get('STAGING')))

    # Get the requested ELECTION_APP:
    if election_app is None:
        election_app = conf['ELECTION_APP']
    election_app_fully_qualified = 'elections.' + election_app
    election_settings_module = election_app_fully_qualified + '.settings'
    elections_module = importlib.import_module(election_settings_module)

    language_code = conf.get('LANGUAGE_CODE', 'en-gb')

    # Internationalization
    # https://docs.djangoproject.com/en/1.6/topics/i18n/
    locale_paths = [
        join(BASE_DIR, 'locale')
    ]
    # The code below sets LANGUAGES to only those we have translations
    # for, so at the time of writing that will be:
    #   [('en', 'English'), ('es-ar', 'Argentinian Spanish')]
    # whereas the default setting is a long list of languages which
    # includes:
    #   ('es', 'Spanish').
    # If someone's browser sends 'Accept-Language: es', that means that it
    # will be found in this list, but since there are no translations for 'es'
    # it'll fall back to LANGUAGE_CODE.  However, if there is no 'es' in
    # LANGUAGES, then Django will attempt to do a best match, so if
    # Accept-Language is 'es' then it will use the 'es-ar' translation.  We think
    # this is generally desirable (e.g. so someone can see YourNextMP in Spanish
    # if their browser asks for Spanish).
    languages = [
        l for l in LANGUAGES
        if exists(join(locale_paths[0], to_locale(l[0])))
    ]
    languages.append(('cy-gb', 'Welsh'))
    languages.append(('es-cr', 'Costa Rican Spanish'))

    # we need this to make the language switcher work because Django doesn't
    # have any language information for these so if you try to tell it to
    # switch to them it cannot and falls back to the existing/default language
    EXTRA_LANG_INFO = {
        'cy-gb': {
            'bidi': False,
            'code': 'cy-gb',
            'name': 'Welsh',
            'name_local': u'Cymraeg',
        },
        'es-cr': {
            'bidi': False,
            'code': 'es-cr',
            'name': 'Spanish',
            'name_local': u'español de Costa Rica',
        },
    }

    locale.LANG_INFO.update(EXTRA_LANG_INFO)

    # The language selection has been slightly complicated now that we
    # have two es- languages: es-ar and es-cr.  Chrome doesn't offer
    # Costa Rican Spanish as one of its language choices, so the best
    # you can do is choose 'Spanish - español'. (This might well be
    # the case in other browsers too.)  Since 'es-ar' comes first in
    # 'languages' after the preceding code, this means that someone
    # viewing the Costa Rica site with Chrome's preferred language set
    # to Spanish (i.e. with 'es' first in Accept-Language) will get
    # the Argentinian Spanish translations instead of Costa Rican
    # Spanish.  To get around this, look for the default language code
    # for the site, and if that's present, move it to the front of
    # 'languages'.  This should be generally helpful behaviour: the
    # default language code of the site should take precedence over
    # another language that happens to match based on the generic part
    # of the language code.
    language_code_index = next(
        (i for i, l in enumerate(languages) if l[0] == language_code),
        None
    )
    if language_code_index is not None:
        languages.insert(0, languages.pop(language_code_index))

    # Make sure the MEDIA_ROOT directory actually exists:
    media_root = conf.get('MEDIA_ROOT') or join(BASE_DIR, 'media')
    # Make sure that the MEDIA_ROOT and subdirectory for archived CSV
    # files exist:
    mkdir_p(join(media_root, 'csv-archives'))

    # Database
    # https://docs.djangoproject.com/en/1.6/ref/settings/#databases
    if conf.get('DATABASE_SYSTEM') == 'postgresql':
        databases = {
            'default': {
                'ENGINE':   'django.db.backends.postgresql_psycopg2',
                'NAME':     conf.get('YNMP_DB_NAME'),
                'USER':     conf.get('YNMP_DB_USER'),
                'PASSWORD': conf.get('YNMP_DB_PASS'),
                'HOST':     conf.get('YNMP_DB_HOST'),
                'PORT':     conf.get('YNMP_DB_PORT'),
            }
        }
    else:
        databases = {
            'default': {
                'ENGINE': 'django.db.backends.sqlite3',
                'NAME': join(BASE_DIR, 'db.sqlite3'),
            }
        }

    # Setup caches depending on DEBUG:
    if debug:
        cache = {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}
        cache_thumbnails = {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}
    else:
        cache = {
            'TIMEOUT': None, # cache keys never expire; we invalidate them
            'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
            'LOCATION': '127.0.0.1:11211',
            'KEY_PREFIX': databases['default']['NAME'],
        }
        cache_thumbnails = {
            'TIMEOUT': 60 * 60 * 24 * 2, # expire after two days
            'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
            'LOCATION': '127.0.0.1:11211',
            'KEY_PREFIX': databases['default']['NAME'] + "-thumbnails",
        }

    # Create a dictionary with these settings and other simpler ones:
    result = {
        'BASE_DIR': BASE_DIR,
        'ALLOWED_HOSTS': conf.get('ALLOWED_HOSTS'),
        'DEBUG': debug,
        'RUNNING_TESTS': tests,

        # Email addresses that error emails are sent to when DEBUG = False
        'ADMINS': conf['ADMINS'],

        # SECURITY WARNING: keep the secret key used in production secret!
        'SECRET_KEY': conf['SECRET_KEY'],

        'TEMPLATE_DEBUG': True,
        'TEMPLATE_DIRS': (
            join(BASE_DIR, 'mysite', 'templates'),
        ),
        'TEMPLATE_CONTEXT_PROCESSORS': TEMPLATE_CONTEXT_PROCESSORS + (
            # Required by allauth template tags
            "django.core.context_processors.request",
            # allauth specific context processors
            "allauth.account.context_processors.account",
            "allauth.socialaccount.context_processors.socialaccount",
            "django.contrib.messages.context_processors.messages",
            "mysite.context_processors.add_settings",
            "mysite.context_processors.election_date",
            "mysite.context_processors.add_group_permissions",
            "mysite.context_processors.add_notification_data",
            "mysite.context_processors.locale",
            "mysite.context_processors.add_site",
        ),

        'ELECTION_APP': election_app,
        'ELECTION_APP_FULLY_QUALIFIED': election_app_fully_qualified,

        # The Django applications in use:
        'INSTALLED_APPS': (
            'django.contrib.admin',
            'django.contrib.auth',
            'django.contrib.contenttypes',
            'django.contrib.humanize',
            'django.contrib.sessions',
            'django.contrib.messages',
            'django.contrib.staticfiles',
            'django.contrib.sites',
            'django_nose',
            'django_extensions',
            'pipeline',
            'statici18n',
            'sorl.thumbnail',
            'rest_framework',
            'rest_framework.authtoken',
            'images',
            'haystack',
            'elections',
            'popolo',
            election_app_fully_qualified,
            'candidates',
            'tasks',
            'alerts',
            'cached_counts',
            'moderation_queue',
            'auth_helpers',
            'debug_toolbar',
            'template_timings_panel',
            'official_docuements',
            'results',
            'notifications',
            'allauth',
            'allauth.account',
            'allauth.socialaccount',
            'allauth.socialaccount.providers.google',
            'allauth.socialaccount.providers.facebook',
            'allauth.socialaccount.providers.twitter',
            'corsheaders',
            'crispy_forms',
            'usersettings',
        ),

        'SITE_ID': 1,

        'USERSETTINGS_MODEL': 'candidates.SiteSettings',

        'MIDDLEWARE_CLASSES': (
            'debug_toolbar.middleware.DebugToolbarMiddleware',
            'corsheaders.middleware.CorsMiddleware',
            'django.contrib.sessions.middleware.SessionMiddleware',
            'django.middleware.locale.LocaleMiddleware',
            'django.middleware.common.CommonMiddleware',
            'django.middleware.csrf.CsrfViewMiddleware',
            'django.contrib.auth.middleware.AuthenticationMiddleware',
            'candidates.middleware.CopyrightAssignmentMiddleware',
            'candidates.middleware.DisallowedUpdateMiddleware',
            'django.contrib.messages.middleware.MessageMiddleware',
            'django.middleware.clickjacking.XFrameOptionsMiddleware',
            'usersettings.middleware.CurrentUserSettingsMiddleware',
        ),

        # django-allauth settings:
        'AUTHENTICATION_BACKENDS': (
            # Needed to login by username in Django admin, regardless of `allauth`
            "django.contrib.auth.backends.ModelBackend",
            # `allauth` specific authentication methods, such as login by e-mail
            "allauth.account.auth_backends.AuthenticationBackend",
        ),
        'SOCIALACCOUNT_PROVIDERS': {
            'google': {'SCOPE': ['https://www.googleapis.com/auth/userinfo.profile'],
                       'AUTH_PARAMS': {'access_type': 'online'}},
            'facebook': {'SCOPE': ['email',]},
        },
        'LOGIN_REDIRECT_URL': '/',
        'ACCOUNT_EMAIL_VERIFICATION': 'mandatory',
        'ACCOUNT_EMAIL_REQUIRED': True,
        'ACCOUNT_USERNAME_REQUIRED': True,
        'SOCIALACCOUNT_AUTO_SIGNUP': True,

        # use our own adapter that checks if user signup has been disabled
        'ACCOUNT_ADAPTER': 'mysite.account_adapter.CheckIfAllowedNewUsersAccountAdapter',

        'ROOT_URLCONF': 'mysite.urls',
        'WSGI_APPLICATION': 'mysite.wsgi.application',

        # Django Debug Toolbar settings:
        'DEBUG_TOOLBAR_PATCH_SETTINGS': False,
        'DEBUG_TOOLBAR_PANELS': [
            'debug_toolbar.panels.versions.VersionsPanel',
            'debug_toolbar.panels.timer.TimerPanel',
            'debug_toolbar.panels.settings.SettingsPanel',
            'debug_toolbar.panels.headers.HeadersPanel',
            'debug_toolbar.panels.request.RequestPanel',
            'debug_toolbar.panels.sql.SQLPanel',
            'debug_toolbar.panels.staticfiles.StaticFilesPanel',
            'debug_toolbar.panels.templates.TemplatesPanel',
            'debug_toolbar.panels.cache.CachePanel',
            'debug_toolbar.panels.signals.SignalsPanel',
            'debug_toolbar.panels.logging.LoggingPanel',
            'debug_toolbar.panels.redirects.RedirectsPanel',
            'template_timings_panel.panels.TemplateTimings.TemplateTimings',
        ],
        'INTERNAL_IPS': ['127.0.0.1'],

        # Language settings (calculated above):
        'LOCALE_PATHS': locale_paths,
        'LANGUAGES': languages,
        'LANGUAGE_CODE': language_code,
        'TIME_ZONE': conf.get('TIME_ZONE', 'Europe/London'),
        'USE_I18N': True,
        'USE_L10N': True,
        'USE_TZ': True,

        # The media and static file settings:
        'MEDIA_ROOT': media_root,
        'MEDIA_URL': '/media/',

        # Settings for staticfiles and Django pipeline:
        'STATIC_URL': '/static/',
        'STATIC_ROOT': join(BASE_DIR, 'static'),
        'STATICI18N_ROOT': join(BASE_DIR, 'mysite', 'static'),
        'STATICFILES_DIRS': (
            join(BASE_DIR, 'mysite', 'static'),
        ),
        'STATICFILES_FINDERS': (
            'django.contrib.staticfiles.finders.FileSystemFinder',
            'django.contrib.staticfiles.finders.AppDirectoriesFinder',
            'pipeline.finders.PipelineFinder',
        ),
        'PIPELINE': {
            'STYLESHEETS': {
                'image-review': {
                    'source_filenames': (
                        'moderation_queue/css/jquery.Jcrop.css',
                        'moderation_queue/css/crop.scss',
                    ),
                    'output_filename': 'css/image-review.css',
                },
                'official_docuements': {
                    'source_filenames': (
                        'official_docuements/css/official_docuements.scss',
                    ),
                    'output_filename': 'css/official_docuements.css',
                },
                'all': {
                    'source_filenames': (
                        'candidates/style.scss',
                        'cached_counts/style.scss',
                        'select2/select2.css',
                        'jquery/jquery-ui.css',
                        'jquery/jquery-ui.structure.css',
                        'jquery/jquery-ui.theme.css',
                        'moderation_queue/css/photo-upload.scss',
                    ),
                    'output_filename': 'css/all.css',
                }
            },
            'JAVASCRIPT': {
                'image-review': {
                    'source_filenames': (
                        'moderation_queue/js/jquery.color.js',
                        'moderation_queue/js/jquery.Jcrop.js',
                        'moderation_queue/js/crop.js',
                    ),
                    'output_filename': 'js/image-review.js',
                },
                'all': {
                    'source_filenames': (
                        'js/vendor/custom.modernizr.js',
                        'jquery/jquery-1.11.1.js',
                        'jquery/jquery-ui.js',
                        'foundation/js/foundation/foundation.js',
                        'foundation/js/foundation/foundation.equalizer.js',
                        'foundation/js/foundation/foundation.dropdown.js',
                        'foundation/js/foundation/foundation.tooltip.js',
                        'foundation/js/foundation/foundation.offcanvas.js',
                        'foundation/js/foundation/foundation.accordion.js',
                        'foundation/js/foundation/foundation.joyride.js',
                        'foundation/js/foundation/foundation.alert.js',
                        'foundation/js/foundation/foundation.topbar.js',
                        'foundation/js/foundation/foundation.reveal.js',
                        'foundation/js/foundation/foundation.slider.js',
                        'foundation/js/foundation/foundation.magellan.js',
                        'foundation/js/foundation/foundation.clearing.js',
                        'foundation/js/foundation/foundation.orbit.js',
                        'foundation/js/foundation/foundation.interchange.js',
                        'foundation/js/foundation/foundation.abide.js',
                        'foundation/js/foundation/foundation.tab.js',
                        'select2/select2.js',
                        'js/constituency.js',
                        'js/person_form.js',
                        'js/home_geolocation_form.js',
                        'js/versions.js',
                        'js/language-switcher.js',
                    ),
                    'output_filename': 'js/all.js'
                }
            },

            'COMPILERS': (
                'pipeline.compilers.sass.SASSCompiler',
            ),
            'SASS_BINARY': 'sassc',
            'CSS_COMPRESSOR': 'pipeline.compressors.yui.YUICompressor',
            'JS_COMPRESSOR': 'pipeline.compressors.yui.YUICompressor',
            # On some platforms this might be called "yuicompressor", so it may be
            # necessary to symlink it into your PATH as "yui-compressor".
            'YUI_BINARY': '/usr/bin/env yui-compressor',
        },


        'TEST_RUNNER': 'django_nose.NoseTestSuiteRunner',

        'SOURCE_HINTS': _(
            u"Please don't quote third-party candidate sites \u2014 "
            u"we prefer URLs of news stories or official candidate pages."
        ),

        # By default, cache successful results from MapIt for a day
        'MAPIT_CACHE_SECONDS': 86400,
        'DATABASES': databases,
        'CACHES': {
            'default': cache,
            'thumbnails': cache_thumbnails,
        },

        # sorl-thumbnail settings:
        'THUMBNAIL_CACHE': 'thumbnails',
        'THUMBNAIL_DEBUG': debug,

        # Django Rest Framework settings:
        'REST_FRAMEWORK': {
            'DEFAULT_PERMISSION_CLASSES': ('candidates.api_permissions.ReadOnly',),
            'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
            'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
            'PAGE_SIZE': 10,
        },

        # allow attaching extra data to notifications:
        'NOTIFICATIONS_USE_JSONFIELD': True,

        'HAYSTACK_SIGNAL_PROCESSOR': 'haystack.signals.RealtimeSignalProcessor',

        'HAYSTACK_CONNECTIONS': {
            'default': {
                'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
                'URL': 'http://127.0.0.1:9200/',
                'INDEX_NAME': '{0}_{1}'.format(conf.get('YNMP_DB_NAME'), conf.get('YNMP_DB_HOST')),
            },
        },

        # CORS config
        'CORS_ORIGIN_ALLOW_ALL': True,
        'CORS_URLS_REGEX': r'^/api/.*$',
        'CORS_ALLOW_METHODS': (
            'GET',
            'OPTIONS',
        ),
    }

    if tests:
        result['NOSE_ARGS'] = [
            '--nocapture',
            '--with-yanc',
            # There are problems with OpenCV on Travis, so don't even try to
            # import moderation_queue/faces.py
            '--ignore-files=faces',
        ]
        if election_app == 'example':
            result['NOSE_ARGS'].append('--with-doctest')
    else:
        # If we're not testing, use PipelineCachedStorage
        result['STATICFILES_STORAGE'] = \
            'pipeline.storage.PipelineCachedStorage'
    if conf.get('NGINX_SSL'):
        result['SECURE_PROXY_SSL_HEADER'] = ('HTTP_X_FORWARDED_PROTO', 'https')
        result['ACCOUNT_DEFAULT_HTTP_PROTOCOL'] = 'https'
    for optional_election_app_setting, default in (
            ('AREAS_TO_ALWAYS_RETURN', []),
    ):
        try:
            result[optional_election_app_setting] = \
                getattr(elections_module, optional_election_app_setting)
        except AttributeError:
            result[optional_election_app_setting] = default
    # Make sure there's a trailing slash at the end of base MapIt URL:
    result['MAPIT_BASE_URL'] = \
        re.sub(r'/*$', '/', elections_module.MAPIT_BASE_URL)

    return result

Example 161

Project: mysql-utilities Source File: binlogmove_parameters.py
Function: run
    def run(self):
        cmd_base = "mysqlbinlogmove.py"
        master_con = self.build_connection_string(self.server1).strip(' ')
        slave1_con = self.build_connection_string(self.server2).strip(' ')
        slave2_con = self.build_connection_string(self.server3).strip(' ')

        test_num = 1
        comment = "Test case {0} - help option.".format(test_num)
        cmd = "{0} --help".format(cmd_base)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        master_src = self.server1.select_variable('datadir')
        if self.debug:
            print("\nServer {0}:{1} source directory (datadir): "
                  "{2}".format(self.server1.host, self.server1.port,
                               master_src))
        master_index = os.path.join(master_src, 'master-bin.index')
        if self.debug:
            print("\nServer {0}:{1} bin index file: "
                  "{2}".format(self.server1.host, self.server1.port,
                               master_index))
        master_basename = os.path.join(master_src, 'master-bin')
        if self.debug:
            print("\nServer {0}:{1} bin basename: "
                  "{2}".format(self.server1.host, self.server1.port,
                               master_basename))

        test_num += 1
        comment = ("Test case {0} - warning using --bin-log-index with relay "
                   "type.").format(test_num)
        cmd = ("{0} --server={1} --bin-log-index={2} --log-type=relay "
               "--skip-flush-binlogs .").format(cmd_base, master_con,
                                                master_index)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - warning using --bin-log-basename with "
                   "relay type.").format(test_num)
        cmd = ("{0} --server={1} --bin-log-basename={2} --log-type=relay "
               "--skip-flush-binlogs .").format(cmd_base, master_con,
                                                master_basename)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - warning using --relay-log-index with bin "
                   "type.").format(test_num)
        cmd = ("{0} --server={1} --relay-log-index={2} --log-type=bin "
               "--skip-flush-binlogs .").format(cmd_base, master_con,
                                                master_index)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - warning using --relay-log-basename with "
                   "bin type.").format(test_num)
        cmd = ("{0} --server={1} --relay-log-basename={2} --log-type=bin "
               "--skip-flush-binlogs .").format(cmd_base, master_con,
                                                master_basename)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0} - warning using --skip-flush-binlogs "
                   "without --server.").format(test_num)
        cmd = ("{0} --binlog-dir={1} --skip-flush-binlogs "
               ".").format(cmd_base, self.slave1_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Disable automatic relay log purging on slaves.
        for srv in [self.server2, self.server3]:
            srv.exec_query('SET GLOBAL relay_log_purge = 0')

        # Generate multiple binary log files.
        if self.debug:
            print("\nCreate multiple binary logs on all servers "
                  "(FLUSH LOCAL LOGS)...")
        for srv in [self.server1, self.server2, self.server3]:
            for _ in range(5):
                srv.exec_query('FLUSH LOCAL LOGS')

        # Get parameters info for slave 1 (source, index files, and basenames).
        slave1_src = self.server2.select_variable('datadir')
        if self.debug:
            print("\nServer {0}:{1} source directory (datadir): "
                  "{2}".format(self.server2.host, self.server2.port,
                               slave1_src))
        slave1_bin_index = os.path.join(slave1_src, 'slave1-bin.index')
        if self.debug:
            print("\nServer {0}:{1} bin index file: "
                  "{2}".format(self.server2.host, self.server2.port,
                               slave1_bin_index))
        slave1_bin_basename = os.path.join(slave1_src, 'slave1-bin')
        if self.debug:
            print("\nServer {0}:{1} bin basename: "
                  "{2}".format(self.server2.host, self.server2.port,
                               slave1_bin_basename))
        slave1_relay_index = os.path.join(slave1_src, 'slave1-relay-bin.index')
        if self.debug:
            print("\nServer {0}:{1} relay index file: "
                  "{2}".format(self.server2.host, self.server2.port,
                               slave1_relay_index))
        slave1_relay_basename = os.path.join(slave1_src, 'slave1-relay-bin')
        if self.debug:
            print("\nServer {0}:{1} relay basename: "
                  "{2}".format(self.server2.host, self.server2.port,
                               slave1_relay_basename))

        # Move only binlog files.
        test_num += 1
        comment = ("Test case {0}a - move (only) binlogs from slave "
                   ".").format(test_num)
        cmd = ("{0} --server={1} --bin-log-index={2} --bin-log-basename={3} "
               "--log-type=bin {4}").format(cmd_base, slave1_con,
                                            slave1_bin_index,
                                            slave1_bin_basename,
                                            self.slave1_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.results.append("Test case {0}b - check moved files and changes "
                            "in index file:\n".format(test_num))
        self.check_moved_binlogs(self.slave1_dir, slave1_src,
                                 'slave1-bin.index')

        self.results.append("Test case {0}c - SHOW BINARY LOGS (flush "
                            "performed):\n".format(test_num))
        result_set = self.server2.exec_query('SHOW BINARY LOGS')
        for row in result_set:
            self.results.append("file: {0}, size: {1};\n".format(row[0],
                                                                 row[1]))

        comment = "Test case {0}d - move files back.".format(test_num)
        cmd = ("{0} --binlog-dir={1} --bin-log-index={2} "
               "--bin-log-basename={3} --log-type=bin "
               "{4}").format(cmd_base, self.slave1_dir, slave1_bin_index,
                             slave1_bin_basename, slave1_src)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Move only relay log files.
        test_num += 1
        comment = ("Test case {0}a - move (only) relay logs from slave "
                   ".").format(test_num)
        cmd = ("{0} --server={1} --relay-log-index={2} "
               "--relay-log-basename={3} --log-type=relay "
               "{4}").format(cmd_base, slave1_con, slave1_relay_index,
                             slave1_relay_basename, self.slave1_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.results.append("Test case {0}b - check moved files and changes "
                            "in index file:\n".format(test_num))
        self.check_moved_binlogs(self.slave1_dir, slave1_src,
                                 'slave1-relay-bin.index')

        comment = "Test case {0}c - move files back.".format(test_num)
        cmd = ("{0} --binlog-dir={1} --relay-log-index={2} "
               "--relay-log-basename={3} --log-type=relay "
               "{4}").format(cmd_base, self.slave1_dir, slave1_relay_index,
                             slave1_relay_basename, slave1_src)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Get parameters info for slave 2 (source, index files, and basenames).
        slave2_src = self.server3.select_variable('datadir')
        if self.debug:
            print("\nServer {0}:{1} source directory (datadir): "
                  "{2}".format(self.server3.host, self.server3.port,
                               slave2_src))
        slave2_bin_index = os.path.join(slave2_src, 'slave2-bin.index')
        if self.debug:
            print("\nServer {0}:{1} bin index file: "
                  "{2}".format(self.server3.host, self.server3.port,
                               slave2_bin_index))
        slave2_bin_basename = os.path.join(slave2_src, 'slave2-bin')
        if self.debug:
            print("\nServer {0}:{1} bin basename: "
                  "{2}".format(self.server3.host, self.server3.port,
                               slave2_bin_basename))
        slave2_relay_index = os.path.join(slave2_src, 'slave2-relay-bin.index')
        if self.debug:
            print("\nServer {0}:{1} relay index file: "
                  "{2}".format(self.server3.host, self.server3.port,
                               slave2_relay_index))
        slave2_relay_basename = os.path.join(slave2_src, 'slave2-relay-bin')
        if self.debug:
            print("\nServer {0}:{1} relay basename: "
                  "{2}".format(self.server3.host, self.server3.port,
                               slave2_relay_basename))

        # Workaround for Windows to avoid the next SHOW BINARY LOGS to display
        # unexpected/wrong information after relocating the binary log files.
        if os.name != 'posix':
            self.server3.exec_query('SHOW BINARY LOGS')

        # Move all binary log (bin and relay) files.
        test_num += 1
        comment = ("Test case {0}a - move (all) bin and relay logs from slave "
                   " (with --skip-flush-binlogs).").format(test_num)
        cmd = ("{0} --server={1} --bin-log-index={2} --bin-log-basename={3} "
               "--relay-log-index={4} --relay-log-basename={5} --log-type=all "
               "--skip-flush-binlogs {6}").format(cmd_base, slave2_con,
                                                  slave2_bin_index,
                                                  slave2_bin_basename,
                                                  slave2_relay_index,
                                                  slave2_relay_basename,
                                                  self.slave2_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.results.append("Test case {0}b - check moved files and changes "
                            "in index file:\n".format(test_num))
        self.check_moved_binlogs(self.slave2_dir, slave2_src,
                                 'slave2-bin.index', 'slave2-relay-bin',
                                 'slave2-relay-bin.index')

        self.results.append("Test case {0}c - SHOW BINARY LOGS (flush "
                            "skipped):\n".format(test_num))
        result_set = self.server3.exec_query('SHOW BINARY LOGS')
        for row in result_set:
            self.results.append("file: {0}, size: {1};\n".format(row[0],
                                                                 row[1]))

        comment = "Test case {0}d - move files back.".format(test_num)
        cmd = ("{0} --binlog-dir={1} --bin-log-index={2} "
               "--bin-log-basename={3} --relay-log-index={4} "
               "--relay-log-basename={5} --log-type=all "
               "{6}").format(cmd_base, self.slave2_dir, slave2_bin_index,
                             slave2_bin_basename, slave2_relay_index,
                             slave2_relay_basename, slave2_src)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Move binary log files matching specified sequence numbers.
        test_num += 1
        comment = ("Test case {0}a - move binary logs from slave matching "
                   "specific sequence numbers.").format(test_num)
        cmd = ("{0} --server={1} --bin-log-index={2} --bin-log-basename={3} "
               "--relay-log-index={4} --relay-log-basename={5} --log-type=all "
               "--sequence=2,4-7,11,13 {6}").format(cmd_base, slave1_con,
                                                    slave1_bin_index,
                                                    slave1_bin_basename,
                                                    slave1_relay_index,
                                                    slave1_relay_basename,
                                                    self.slave1_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.results.append("Test case {0}b - check moved files and changes "
                            "in index file:\n".format(test_num))
        self.check_moved_binlogs(self.slave1_dir, slave1_src,
                                 'slave1-bin.index', 'slave1-relay-bin',
                                 'slave1-relay-bin.index')

        comment = "Test case {0}c - move files back.".format(test_num)
        cmd = ("{0} --binlog-dir={1} --bin-log-index={2} "
               "--bin-log-basename={3} --relay-log-index={4} "
               "--relay-log-basename={5} --log-type=all "
               "--sequence=2,4-7,11,13 {6}").format(cmd_base, self.slave1_dir,
                                                    slave1_bin_index,
                                                    slave1_bin_basename,
                                                    slave1_relay_index,
                                                    slave1_relay_basename,
                                                    slave1_src)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Hack modified date/time for a few binary log files.
        files_to_hack_date = ['slave2-bin.000002', 'slave2-relay-bin.000005',
                              'slave2-relay-bin.000010']
        # Hacked date/time: 2 days and 1 second before current.
        hacked_time = time.time() - (86400 * 2) - 1
        for f_name in files_to_hack_date:
            file_path = os.path.join(slave2_src, f_name)
            os.utime(file_path, (hacked_time, hacked_time))

        # Move older binary log files (prior to a specific date).
        test_num += 1
        comment = ("Test case {0} - move binary logs from slave modified "
                   "3 days ago (no files to move).").format(test_num)
        cmd = ("{0} --server={1} --bin-log-index={2} --bin-log-basename={3} "
               "--relay-log-index={4} --relay-log-basename={5} --log-type=all "
               "--modified-before=3 {6}").format(cmd_base, slave2_con,
                                                 slave2_bin_index,
                                                 slave2_bin_basename,
                                                 slave2_relay_index,
                                                 slave2_relay_basename,
                                                 self.slave2_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0}a - move binary logs from slave modified "
                   "2 days ago.").format(test_num)
        cmd = ("{0} --server={1} --bin-log-index={2} --bin-log-basename={3} "
               "--relay-log-index={4} --relay-log-basename={5} --log-type=all "
               "--modified-before=2 {6}").format(cmd_base, slave2_con,
                                                 slave2_bin_index,
                                                 slave2_bin_basename,
                                                 slave2_relay_index,
                                                 slave2_relay_basename,
                                                 self.slave2_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.results.append("Test case {0}b - check moved files and changes "
                            "in index file:\n".format(test_num))
        self.check_moved_binlogs(self.slave2_dir, slave2_src,
                                 'slave2-bin.index', 'slave2-relay-bin',
                                 'slave2-relay-bin.index')

        comment = "Test case {0}c - move files back.".format(test_num)
        cmd = ("{0} --binlog-dir={1} --bin-log-index={2} "
               "--bin-log-basename={3} --relay-log-index={4} "
               "--relay-log-basename={5} --log-type=all "
               "--modified-before=2 {6}").format(cmd_base, self.slave2_dir,
                                                 slave2_bin_index,
                                                 slave2_bin_basename,
                                                 slave2_relay_index,
                                                 slave2_relay_basename,
                                                 slave2_src)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        test_num += 1
        comment = ("Test case {0}a - move binary logs from slave modified "
                   "before yesterday.").format(test_num)
        yesterday_time = time.localtime(time.time() - 86400)
        yesterday = time.strftime('%Y-%m-%d', yesterday_time)
        cmd = ("{0} --server={1} --bin-log-index={2} --bin-log-basename={3} "
               "--relay-log-index={4} --relay-log-basename={5} --log-type=all "
               "--modified-before={6} {7}").format(cmd_base, slave2_con,
                                                   slave2_bin_index,
                                                   slave2_bin_basename,
                                                   slave2_relay_index,
                                                   slave2_relay_basename,
                                                   yesterday, self.slave2_dir)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.results.append("Test case {0}b - check moved files and changes "
                            "in index file:\n".format(test_num))
        self.check_moved_binlogs(self.slave2_dir, slave2_src,
                                 'slave2-bin.index', 'slave2-relay-bin',
                                 'slave2-relay-bin.index')

        comment = "Test case {0}c - move files back.".format(test_num)
        cmd = ("{0} --binlog-dir={1} --bin-log-index={2} "
               "--bin-log-basename={3} --relay-log-index={4} "
               "--relay-log-basename={5} --log-type=all "
               "--modified-before={6} {7}").format(cmd_base, self.slave2_dir,
                                                   slave2_bin_index,
                                                   slave2_bin_basename,
                                                   slave2_relay_index,
                                                   slave2_relay_basename,
                                                   yesterday, slave2_src)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Mask non-deterministic data.
        self.replace_substring_portion(", size: ", ";", ", size: ???;")
        # Remove version information.
        self.remove_result_and_lines_after("MySQL Utilities mysqlbinlogmove"
                                           " version", 1)
        # Warning messages for older MySQL versions (variables not available).
        self.remove_result("# WARNING: Variable 'relay_log_basename' is not "
                           "available for server ")
        self.remove_result("# WARNING: Variable 'log_bin_basename' is not "
                           "available for server ")
        self.remove_result("# WARNING: The bin basename is not required for "
                           "server versions >= 5.6.2 (value ignored).")
        self.remove_result("# WARNING: The bin index is not required for "
                           "server versions >= 5.6.4 (value ignored).")
        self.remove_result("# WARNING: The relay basename is not required for "
                           "server versions >= 5.6.2 (value ignored).")

        return True

Example 162

Project: mysql-utilities Source File: audit_log_grep.py
Function: run
    def run(self):
        self.res_fname = "result.txt"

        cmd_base = "mysqlauditgrep.py {0}"

        num_test = 1
        comment = "Test case {0} - Show the help".format(num_test)
        cmd_opts = "--help "
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Get the current (malformed) audit log file used by the server
        data_dir = self.server1.show_server_variable('datadir')[0][1]
        audit_log = self.server1.show_server_variable('audit_log_file')[0][1]
        audit_log_name = os.path.join(data_dir, audit_log)

        num_test += 1
        comment = "Test case {0} - Show audit log statistics".format(num_test)
        cmd_opts = "--file-stats {0} --format=VERTICAL".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = "Test case {0} - No search criteria defined".format(num_test)
        cmd_opts = "{0} ".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Audit log for tests (old format).
        audit_log_name = os.path.normpath(
            "./std_data/audit.log.13488316109086370")

        # Audit log for tests (NEW format).
        new_audit_log_name = os.path.normpath(
            "./std_data/audit.log.13951424704434196.xml")

        num_test += 1
        comment = ("Test case {0} - Show all records in the RAW "
                   "format".format(num_test))
        cmd_opts = "{0} --format=RAW".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Show all records in the RAW "
                   "format".format(num_test))
        cmd_opts = "{0} --format=RAW".format(new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        users = "tester"

        num_test += 1
        comment = ("Test case {0} - Search entries of specific "
                   "users".format(num_test))
        cmd_opts = "--users={0} {1} ".format(users, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries of specific "
                   "users".format(num_test))
        cmd_opts = "--users={0} {1} ".format(users, new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - No entry found for specified "
                   "users".format(num_test))
        cmd_opts = "--users=xpto,,fake, {0} ".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - No entry found for specified "
                   "users".format(num_test))
        cmd_opts = "--users=xpto,,fake, {0} ".format(new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        startdate = "2012-09-27T13:33:47"
        enddate = "2012-09-28"

        num_test += 1
        comment = ("Test case {0} - Search entries for a specific datetime "
                   "range".format(num_test))
        cmd_opts = ("--start-date={0} --end-date={1} "
                    "{2}".format(startdate, enddate, audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        startdate = "2014-03-18T11:34:30"
        enddate = "2014-03-25"

        comment = ("Test case {0} (NEW) - Search entries for a specific "
                   "datetime range".format(num_test))
        cmd_opts = ("--start-date={0} --end-date={1} "
                    "{2}".format(startdate, enddate, new_audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - No entry found for specified datetime "
                   "range".format(num_test))
        cmd_opts = ("--start-date=2012-01-01 --end-date=2012-01-01T23:59:59 "
                    "{0}".format(audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - No entry found for specified "
                   "datetime range".format(num_test))
        cmd_opts = ("--start-date=2012-01-01 --end-date=2012-01-01T23:59:59 "
                    "{0}".format(new_audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        pattern = '"% = ___"'

        num_test += 1
        comment = ("Test case {0} - Search entries matching SQL LIKE "
                   "pattern ".format(num_test))
        cmd_opts = "--pattern={0} {1}".format(pattern, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries matching SQL LIKE "
                   "pattern ".format(num_test))
        cmd_opts = "--pattern={0} {1}".format(pattern, new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        pattern = '".* = ..."'

        num_test += 1
        comment = ("Test case {0} - Search entries matching REGEXP "
                   "pattern ".format(num_test))
        cmd_opts = "--pattern={0} --regexp {1}".format(pattern, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries matching REGEXP "
                   "pattern ".format(num_test))
        cmd_opts = "--pattern={0} --regexp {1}".format(pattern,
                                                       new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - No entry found matching specified "
                   "pattern ".format(num_test))
        cmd_opts = '--pattern="%% = ___" --regexp {0}'.format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - No entry found matching specified "
                   "pattern ".format(num_test))
        cmd_opts = '--pattern="%% = ___" --regexp {0}'.format(
            new_audit_log_name
        )
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        query_types = "show,SET"

        num_test += 1
        comment = ("Test case {0} - Search entries of specific query "
                   "types".format(num_test))
        cmd_opts = "--query-type={0} {1}".format(query_types, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries of specific query "
                   "types".format(num_test))
        cmd_opts = "--query-type={0} {1}".format(query_types,
                                                 new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - No entry found for specified query "
                   "types".format(num_test))
        cmd_opts = "--query-type=GRANT,REVOKE {0}".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - No entry found for specified query "
                   "types".format(num_test))
        cmd_opts = "--query-type=GRANT,REVOKE {0}".format(new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        event_types = "Ping,cONNECT"

        num_test += 1
        comment = ("Test case {0} - Search entries of specific event "
                   "types".format(num_test))
        cmd_opts = "--event-type={0} {1}".format(event_types, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries of specific event "
                   "types".format(num_test))
        cmd_opts = "--event-type={0} {1}".format(event_types,
                                                 new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - No entry found for specified event "
                   "types".format(num_test))
        cmd_opts = ('--event-type="Binlog Dump,NoAudit" '
                    '{0}'.format(audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - No entry found for specified event "
                   "types".format(num_test))
        cmd_opts = ('--event-type="Binlog Dump,NoAudit" '
                    '{0}'.format(new_audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        status = "1100-1199,1046"
        num_test += 1
        comment = ("Test case {0} - Search entries with specific "
                   "status".format(num_test))
        cmd_opts = "--status={0} {1}".format(status, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries with specific "
                   "status".format(num_test))
        cmd_opts = "--status={0} {1}".format(status, new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        status = ",100,500-750,50,25,999,,8000-9000,10-30,,"
        num_test += 1
        comment = ("Test case {0} - No entry found for specific "
                   "status".format(num_test))
        cmd_opts = "--status={0} {1}".format(status, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - No entry found for specific "
                   "status".format(num_test))
        cmd_opts = "--status={0} {1}".format(status, new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        users = "tester"
        startdate = "2012-10-10"
        enddate = "0"
        pattern = '".*<>.*"'
        query_types = "SELECT"
        event_types = "query"
        status = "1-9999"

        num_test += 1
        comment = "Test case {0} - Apply all search criteria".format(num_test)
        cmd_opts = ("--users={0} --start-date={1} --end-date={2} "
                    "--pattern={3} --regexp --query-type={4} --event-type={5} "
                    "--status={6} {7}".format(users, startdate, enddate,
                                              pattern, query_types,
                                              event_types, status,
                                              audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        startdate = "2013-03-25"
        comment = ("Test case {0} (NEW) - Apply all search criteria"
                   "").format(num_test)
        cmd_opts = ("--users={0} --start-date={1} --end-date={2} "
                    "--pattern={3} --regexp --query-type={4} --event-type={5} "
                    "--status={6} {7}".format(users, startdate, enddate,
                                              pattern, query_types,
                                              event_types, status,
                                              new_audit_log_name))
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - Test query-type false "
                   "positives".format(num_test))
        cmd_opts = "--query-type={0} {1}".format(query_types, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Test query-type false "
                   "positives".format(num_test))
        cmd_opts = "--query-type={0} {1}".format(query_types,
                                                 new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        query_types = "COMMIT,SET,PREPARE"
        num_test += 1
        comment = ("Test case {0} - Test query-type false "
                   "positives (particular cases)".format(num_test))
        cmd_opts = "--query-type={0} {1}".format(query_types, audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Test query-type false "
                   "positives (particular cases)".format(num_test))
        cmd_opts = "--query-type={0} {1}".format(query_types,
                                                 new_audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - Search entries of "
                   "multi-line log".format(num_test))
        audit_log_name = os.path.normpath("./std_data/multi.log")
        cmd_opts = "--format=csv --query=CREATE {0}".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries of "
                   "multi-line log".format(num_test))
        audit_log_name = os.path.normpath("./std_data/multi_sqltext.log.xml")
        cmd_opts = "--format=csv --query=CREATE {0}".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        num_test += 1
        comment = ("Test case {0} - Search entries of "
                   "single line log".format(num_test))
        audit_log_name = os.path.normpath("./std_data/single.log")
        cmd_opts = "--format=csv --query=CREATE {0}".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        comment = ("Test case {0} (NEW) - Search entries of "
                   "single line log".format(num_test))
        audit_log_name = os.path.normpath("./std_data/single_sqltext.log.xml")
        cmd_opts = "--format=csv --query=CREATE {0}".format(audit_log_name)
        cmd = cmd_base.format(cmd_opts)
        res = self.run_test_case(0, cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        self.do_replacements()

        return True

Example 163

Project: medicare-demo Source File: regrtest.py
def main(tests=None, testdir=None, verbose=0, quiet=False, generate=False,
         exclude=False, single=False, randomize=False, fromfile=None,
         findleaks=False, use_resources=None, trace=False, coverdir='coverage',
         runleaks=False, huntrleaks=False, verbose2=False, expected=False,
         memo=None, junit_xml=None):
    """Execute a test suite.

    This also parses command-line options and modifies its behavior
    accordingly.

    tests -- a list of strings containing test names (optional)
    testdir -- the directory in which to look for tests (optional)

    Users other than the Python test suite will certainly want to
    specify testdir; if it's omitted, the directory containing the
    Python test suite is searched for.

    If the tests argument is omitted, the tests listed on the
    command-line will be used.  If that's empty, too, then all *.py
    files beginning with test_ will be used.

    The other default arguments (verbose, quiet, generate, exclude, single,
    randomize, findleaks, use_resources, trace and coverdir) allow programmers
    calling main() directly to set the values that would normally be set by
    flags on the command line.
    """

    test_support.record_original_stdout(sys.stdout)
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsrf:lu:t:TD:NLR:wM:em:j:',
                                   ['help', 'verbose', 'quiet', 'generate',
                                    'exclude', 'single', 'random', 'fromfile',
                                    'findleaks', 'use=', 'threshold=', 'trace',
                                    'coverdir=', 'nocoverdir', 'runleaks',
                                    'huntrleaks=', 'verbose2', 'memlimit=',
                                    'expected', 'memo'
                                    ])
    except getopt.error, msg:
        usage(2, msg)

    # Defaults
    allran = True
    if use_resources is None:
        use_resources = []
    for o, a in opts:
        if o in ('-h', '--help'):
            usage(0)
        elif o in ('-v', '--verbose'):
            verbose += 1
        elif o in ('-w', '--verbose2'):
            verbose2 = True
        elif o in ('-q', '--quiet'):
            quiet = True;
            verbose = 0
        elif o in ('-g', '--generate'):
            generate = True
        elif o in ('-x', '--exclude'):
            exclude = True
            allran = False
        elif o in ('-e', '--expected'):
            expected = True
            allran = False
        elif o in ('-s', '--single'):
            single = True
        elif o in ('-r', '--randomize'):
            randomize = True
        elif o in ('-f', '--fromfile'):
            fromfile = a
        elif o in ('-l', '--findleaks'):
            findleaks = True
        elif o in ('-L', '--runleaks'):
            runleaks = True
        elif o in ('-m', '--memo'):
            memo = a
        elif o in ('-j', '--junit-xml'):
            junit_xml = a
        elif o in ('-t', '--threshold'):
            import gc
            gc.set_threshold(int(a))
        elif o in ('-T', '--coverage'):
            trace = True
        elif o in ('-D', '--coverdir'):
            coverdir = os.path.join(os.getcwd(), a)
        elif o in ('-N', '--nocoverdir'):
            coverdir = None
        elif o in ('-R', '--huntrleaks'):
            huntrleaks = a.split(':')
            if len(huntrleaks) != 3:
                print a, huntrleaks
                usage(2, '-R takes three colon-separated arguments')
            if len(huntrleaks[0]) == 0:
                huntrleaks[0] = 5
            else:
                huntrleaks[0] = int(huntrleaks[0])
            if len(huntrleaks[1]) == 0:
                huntrleaks[1] = 4
            else:
                huntrleaks[1] = int(huntrleaks[1])
            if len(huntrleaks[2]) == 0:
                huntrleaks[2] = "reflog.txt"
        elif o in ('-M', '--memlimit'):
            test_support.set_memlimit(a)
        elif o in ('-u', '--use'):
            u = [x.lower() for x in a.split(',')]
            for r in u:
                if r == 'all':
                    use_resources[:] = RESOURCE_NAMES
                    continue
                remove = False
                if r[0] == '-':
                    remove = True
                    r = r[1:]
                if r not in RESOURCE_NAMES:
                    usage(1, 'Invalid -u/--use option: ' + a)
                if remove:
                    if r in use_resources:
                        use_resources.remove(r)
                elif r not in use_resources:
                    use_resources.append(r)
    if generate and verbose:
        usage(2, "-g and -v don't go together!")
    if single and fromfile:
        usage(2, "-s and -f don't go together!")

    good = []
    bad = []
    skipped = []
    resource_denieds = []

    if findleaks:
        try:
            if test_support.is_jython:
                raise ImportError()
            import gc
        except ImportError:
            print 'No GC available, disabling findleaks.'
            findleaks = False
        else:
            # Uncomment the line below to report garbage that is not
            # freeable by reference counting alone.  By default only
            # garbage that is not collectable by the GC is reported.
            #gc.set_debug(gc.DEBUG_SAVEALL)
            found_garbage = []

    if single:
        from tempfile import gettempdir
        filename = os.path.join(gettempdir(), 'pynexttest')
        try:
            fp = open(filename, 'r')
            next = fp.read().strip()
            tests = [next]
            fp.close()
        except IOError:
            pass

    if fromfile:
        tests = []
        fp = open(fromfile)
        for line in fp:
            guts = line.split() # assuming no test has whitespace in its name
            if guts and not guts[0].startswith('#'):
                tests.extend(guts)
        fp.close()

    # Strip .py extensions.
    if args:
        args = map(removepy, args)
        allran = False
    if tests:
        tests = map(removepy, tests)

    stdtests = STDTESTS[:]
    nottests = NOTTESTS[:]
    if exclude:
        for arg in args:
            if arg in stdtests:
                stdtests.remove(arg)
        nottests[:0] = args
        args = []
    tests = tests or args or findtests(testdir, stdtests, nottests)
    if single:
        tests = tests[:1]
    if randomize:
        random.shuffle(tests)
    if trace:
        import trace
        tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
                             trace=False, count=True)
    test_support.verbose = verbose      # Tell tests to be moderately quiet
    test_support.use_resources = use_resources
    test_support.junit_xml_dir = junit_xml
    save_modules = sys.modules.keys()
    skips = _ExpectedSkips()
    failures = _ExpectedFailures()
    for test in tests:
        if expected and (test in skips or test in failures):
            continue
        if not quiet:
            print test
            sys.stdout.flush()
        if trace:
            # If we're tracing code coverage, then we don't exit with status
            # if on a false return value from main.
            tracer.runctx('runtest(test, generate, verbose, quiet, testdir)',
                          globals=globals(), locals=vars())
        else:
            try:
                ok = runtest(test, generate, verbose, quiet, testdir,
                             huntrleaks, junit_xml)
            except KeyboardInterrupt:
                # print a newline separate from the ^C
                print
                break
            except:
                raise
            if ok > 0:
                good.append(test)
            elif ok == 0:
                bad.append(test)
            else:
                skipped.append(test)
                if ok == -2:
                    resource_denieds.append(test)
        if findleaks:
            gc.collect()
            if gc.garbage:
                print "Warning: test created", len(gc.garbage),
                print "uncollectable object(s)."
                # move the uncollectable objects somewhere so we don't see
                # them again
                found_garbage.extend(gc.garbage)
                del gc.garbage[:]
        # Unload the newly imported modules (best effort finalization)
        for module in sys.modules.keys():
            if module not in save_modules and module.startswith("test."):
                test_support.unload(module)
                module = module[5:]
                if hasattr(_test, module):
                    delattr(_test, module)

    # The lists won't be sorted if running with -r
    good.sort()
    bad.sort()
    skipped.sort()

    if good and not quiet:
        if not bad and not skipped and len(good) > 1:
            print "All",
        print count(len(good), "test"), "OK."
        if verbose:
            print "CAUTION:  stdout isn't compared in verbose mode:"
            print "a test that passes in verbose mode may fail without it."
    surprises = 0
    if skipped and not quiet:
        print count(len(skipped), "test"), "skipped:"
        surprises += countsurprises(skips, skipped, 'skip', 'ran', allran, resource_denieds)
    if bad:
        print count(len(bad), "test"), "failed:"
        surprises += countsurprises(failures, bad, 'fail', 'passed', allran, resource_denieds)

    if verbose2 and bad:
        print "Re-running failed tests in verbose mode"
        for test in bad:
            print "Re-running test %r in verbose mode" % test
            sys.stdout.flush()
            try:
                test_support.verbose = 1
                ok = runtest(test, generate, 1, quiet, testdir,
                             huntrleaks)
            except KeyboardInterrupt:
                # print a newline separate from the ^C
                print
                break
            except:
                raise

    if single:
        alltests = findtests(testdir, stdtests, nottests)
        for i in range(len(alltests)):
            if tests[0] == alltests[i]:
                if i == len(alltests) - 1:
                    os.unlink(filename)
                else:
                    fp = open(filename, 'w')
                    fp.write(alltests[i+1] + '\n')
                    fp.close()
                break
        else:
            os.unlink(filename)

    if trace:
        r = tracer.results()
        r.write_results(show_missing=True, summary=True, coverdir=coverdir)

    if runleaks:
        os.system("leaks %d" % os.getpid())

    if memo:
        savememo(memo,good,bad,skipped)

    sys.exit(surprises > 0)

Example 164

Project: manuskript Source File: version_1.py
def saveProject(zip=None):
    """
    Saves the project. If zip is False, the project is saved as a multitude of plain-text files for the most parts
    and some XML or zip? for settings and stuff.
    If zip is True, everything is saved as a single zipped file. Easier to carry around, but does not allow
    collaborative work, versionning, or third-party editing.
    @param zip: if True, saves as a single file. If False, saves as plain-text. If None, tries to determine based on
    settings.
    @return: Nothing
    """
    if zip is None:
        zip = settings.saveToZip

    log("\n\nSaving to:", "zip" if zip else "folder")

    # List of files to be written
    files = []
    # List of files to be removed
    removes = []
    # List of files to be moved
    moves = []

    mw = mainWindow()

    # File format version
    files.append(("MANUSKRIPT", "1"))

    # General infos (book and author)
    # Saved in plain text, in infos.txt

    path = "infos.txt"
    content = ""
    for name, col in [
            ("Title", 0),
            ("Subtitle", 1),
            ("Serie", 2),
            ("Volume", 3),
            ("Genre", 4),
            ("License", 5),
            ("Author", 6),
            ("Email", 7),
            ]:
        item = mw.mdlFlatData.item(0, col)
        if item:
            val = item.text().strip()
        else:
            val = ""

        if val:
            content += "{name}:{spaces}{value}\n".format(
                name=name,
                spaces=" " * (15 - len(name)),
                value=val
            )
    files.append((path, content))

    ####################################################################################################################
    # Summary
    # In plain text, in summary.txt

    path = "summary.txt"
    content = ""
    for name, col in [
            ("Situation", 0),
            ("Sentence", 1),
            ("Paragraph", 2),
            ("Page", 3),
            ("Full", 4),
            ]:
        item = mw.mdlFlatData.item(1, col)
        if item:
            val = item.text().strip()
        else:
            val = ""

        if val:
            content += formatMetaData(name, val, 12)

    files.append((path, content))

    ####################################################################################################################
    # Label & Status
    # In plain text

    for mdl, path in [
        (mw.mdlStatus, "status.txt"),
        (mw.mdlLabels, "labels.txt")
    ]:

        content = ""

        # We skip the first row, which is empty and transparent
        for i in range(1, mdl.rowCount()):
            color = ""
            if mdl.data(mdl.index(i, 0), Qt.DecorationRole) is not None:
                color = iconColor(mdl.data(mdl.index(i, 0), Qt.DecorationRole)).name(QColor.HexRgb)
                color = color if color != "#ff000000" else "#00000000"

            text = mdl.data(mdl.index(i, 0))

            if text:
                content += "{name}{color}\n".format(
                    name=text,
                    color="" if color == "" else ":" + " " * (20 - len(text)) + color
                )

        files.append((path, content))

    ####################################################################################################################
    # Characters
    # In a character folder

    path = os.path.join("characters", "{name}.txt")
    mdl = mw.mdlCharacter

    # Review characters
    for c in mdl.characters:

        # Generates file's content
        content = ""
        for m in characterMap:
            val = mdl.data(c.index(m.value)).strip()
            if val:
                content += formatMetaData(characterMap[m], val, 20)

        # Character's color:
        content += formatMetaData("Color", c.color().name(QColor.HexRgb), 20)

        # Character's infos
        for info in c.infos:
            content += formatMetaData(info.description, info.value, 20)

        # generate file's path
        cpath = path.format(name="{ID}-{slugName}".format(
            ID=c.ID(),
            slugName=slugify(c.name())
        ))

        # Has the character been renamed?
        if c.lastPath and cpath != c.lastPath:
            moves.append((c.lastPath, cpath))

        # Update character's path
        c.lastPath = cpath

        files.append((cpath, content))

    ####################################################################################################################
    # Texts
    # In an outline folder

    mdl = mw.mdlOutline

    # Go through the tree
    f, m, r = exportOutlineItem(mdl.rootItem)
    files += f
    moves += m
    removes += r

    # Writes revisions (if asked for)
    if settings.revisions["keep"]:
        files.append(("revisions.xml", mdl.saveToXML()))

    ####################################################################################################################
    # World
    # Either in an XML file, or in lots of plain texts?
    # More probably text, since there might be writing done in third-party.

    path = "world.opml"
    mdl = mw.mdlWorld

    root = ET.Element("opml")
    root.attrib["version"] = "1.0"
    body = ET.SubElement(root, "body")
    addWorldItem(body, mdl)
    content = ET.tostring(root, encoding="UTF-8", xml_declaration=True, pretty_print=True)
    files.append((path, content))

    ####################################################################################################################
    # Plots (mw.mdlPlots)
    # Either in XML or lots of plain texts?
    # More probably XML since there is not really a lot if writing to do (third-party)

    path = "plots.xml"
    mdl = mw.mdlPlots

    root = ET.Element("root")
    addPlotItem(root, mdl)
    content = ET.tostring(root, encoding="UTF-8", xml_declaration=True, pretty_print=True)
    files.append((path, content))

    ####################################################################################################################
    # Settings
    # Saved in readable text (json) for easier versionning. But they mustn't be shared, it seems.
    # Maybe include them only if zipped?
    # Well, for now, we keep them here...

    files.append(("settings.txt", settings.save(protocol=0)))

    project = mw.currentProject

    ####################################################################################################################
    # Save to zip

    if zip:
        # project = os.path.join(
        #     os.path.dirname(project),
        #     "_" + os.path.basename(project)
        # )

        zf = zipfile.ZipFile(project, mode="w")

        for filename, content in files:
            zf.writestr(filename, content, compress_type=compression)

        zf.close()

    ####################################################################################################################
    # Save to plain text

    else:

        global cache

        # Project path
        dir = os.path.dirname(project)

        # Folder containing file: name of the project file (without .msk extension)
        folder = os.path.splitext(os.path.basename(project))[0]

        # Debug
        log("\nSaving to folder", folder)

        # If cache is empty (meaning we haven't loaded from disk), we wipe folder, just to be sure.
        if not cache:
            if os.path.exists(os.path.join(dir, folder)):
                shutil.rmtree(os.path.join(dir, folder))

        # Moving files that have been renamed
        for old, new in moves:

            # Get full path
            oldPath = os.path.join(dir, folder, old)
            newPath = os.path.join(dir, folder, new)

            # Move the old file to the new place
            try:
                os.replace(oldPath, newPath)
                log("* Renaming/moving {} to {}".format(old, new))
            except FileNotFoundError:
                # Maybe parent folder has been renamed
                pass

            # Update cache
            cache2 = {}
            for f in cache:
                f2 = f.replace(old, new)
                if f2 != f:
                    log("  * Updating cache:", f, f2)
                cache2[f2] = cache[f]
            cache = cache2

        # Writing files
        for path, content in files:
            filename = os.path.join(dir, folder, path)
            os.makedirs(os.path.dirname(filename), exist_ok=True)

            # Check if content is in cache, and write if necessary
            if path not in cache or cache[path] != content:
                log("* Writing file {} ({})".format(path, "not in cache" if path not in cache else "different"))
                # mode = "w" + ("b" if type(content) == bytes else "")
                if type(content) == bytes:
                    with open(filename, "wb") as f:
                        f.write(content)
                else:
                    with open(filename, "w", encoding='utf8') as f:
                        f.write(content)

                cache[path] = content

        # Removing phantoms
        for path in [p for p in cache if p not in [p for p, c in files]]:
            filename = os.path.join(dir, folder, path)
            log("* Removing", path)

            if os.path.isdir(filename):
                shutil.rmtree(filename)

            else:  # elif os.path.exists(filename)
                os.remove(filename)

            # Clear cache
            cache.pop(path, 0)

        # Removing empty directories
        for root, dirs, files in os.walk(os.path.join(dir, folder, "outline")):
            for dir in dirs:
                newDir = os.path.join(root, dir)
                try:
                    os.removedirs(newDir)
                    log("* Removing empty directory:", newDir)
                except:
                    # Directory not empty, we don't remove.
                    pass

        # Write the project file's content
        with open(project, "w", encoding='utf8') as f:
            f.write("1")  # Format number

Example 165

Project: pyon Source File: message_object_generator.py
    def generate(self, opts):
        service_yaml_data = self.get_yaml_data()
        messageobject_output_text = "# Message Objects. Don't edit, it is auto generated file.\n\nimport interface.objects\nfrom pyon.core.object import IonMessageObjectBase\n"
        if not service_yaml_data:
            print "message_model_generator: Error!!! the datastore (or the YAML file) is empty."
            exit()

        # Now process the service definition yaml files to
        # generate message classes for input and return messages.
        # Do this on a per file basis to simplify figuring out
        # when we've reached the end of a service's ops.
        for yaml_text in service_yaml_data:
            index = 0
            lines = yaml_text.split('\n')

            # Find service name
            while index < len(lines):
                if lines[index].startswith('name:'):
                    break
                index += 1

            if index >= len(lines):
                continue

            current_service_name = lines[index].split(':')[1].strip()
            index += 1
            # Find op definitions
            while index < len(lines):
                if lines[index].startswith('methods:'):
                    break
                index += 1
            index += 1

            if index >= len(lines):
                continue


            current_class_decorators = ''

            # Find method/op name
            while index < len(lines):
                # Find method decorators
                if lines[index].startswith('  #@') and lines[index][4].isalpha():
                    dec = lines[index].strip()[2:].split("=")
                    key = dec[0]
                    value = dec[1] if len(dec) == 2 else ""
                    # Add it to the decorator list
                    if not current_class_decorators:
                        current_class_decorators = '"' + key + '":"' + value + '"'
                    else:
                        current_class_decorators = current_class_decorators + ', "' + key + '":"' + value + '"'

                if lines[index].startswith('  ') and lines[index][2].isalpha():
                    break
                index += 1

            if index >= len(lines):
                continue

            while index < len(lines):

                if len(lines[index]) == 0 or lines[index].isspace():
                    index += 1
                    continue

                if lines[index].startswith('  #@') and lines[index][4].isalpha():
                    dec = lines[index].strip()[2:].split("=")
                    key = dec[0]
                    value = dec[1] if len(dec) == 2 else ""
                    # Add it to the decorator list
                    if not current_class_decorators:
                        current_class_decorators = '"' + key + '":"' + value + '"'
                    else:
                        current_class_decorators = current_class_decorators + ', "' + key + '":"' + value + '"'


                if not (lines[index].startswith('  ') and lines[index][2].isalpha()):
                    index += 1
                    continue

                args = []
                init_lines = []
                current_op_name = lines[index].strip(' :')
                current_class_name = current_service_name + "_" + current_op_name + "_in"
                messageobject_output_text += '\nclass ' + current_class_name + "(IonMessageObjectBase):\n"
                messageobject_output_text += "    _svc_name = '" + current_service_name + "'\n"
                messageobject_output_text += "    _op_name = '" + current_op_name + "'\n"
                messageobject_output_text += "    _class_info = {'name': '" + current_class_name + "', 'decorators': {" + current_class_decorators + "} }\n\n"
                index += 1

                # Find in
                while index < len(lines):
                    if lines[index].startswith('    resource_type:'):
                        messageobject_output_text += "    _resource_type = '" + lines[index].split('    resource_type:')[1].strip() + "'\n"
                    if lines[index].startswith('    resource_id:'):
                        messageobject_output_text += "    _resource_id = '" + lines[index].split('    resource_id:')[1].strip() + "'\n"
                    if lines[index].startswith('    operation_type:'):
                        messageobject_output_text += "    _operation_type = '" + lines[index].split('    operation_type:')[1].strip() + "'\n"
                    if lines[index].startswith('    in:'):
                        break
                    index += 1
                index += 1

                messageobject_output_text += '\n    def __init__(self'
                current_class_schema = "\n    _schema = {"
                decorators = ''
                description = ''

                while index < len(lines) and not lines[index].startswith('    out:'):

                    if lines[index].isspace():
                        index += 1
                        continue

                    line = lines[index].replace('    ', '', 1)

                    # Find decorators and comments
                    if line.startswith('  #'):
                        # Check for decorators
                        if len(line) > 4 and line.startswith('  #@'):
                            dec = line.strip()[2:].split("=")
                            key = dec[0]
                            value = dec[1] if len(dec) == 2 else ""
                            # Add it to the decorator list
                            if not decorators:
                                decorators = '"' + key + '":"' + value + '"'
                            else:
                                decorators = decorators + ', "' + key + '":"' + value + '"'
                        else:
                            init_lines.append('  ' + line + '\n')
                            if not description:
                                description = line.strip()[1:]
                            else:
                                description = description + ' ' + line.strip()[1:]

                        index += 1
                        continue

                    elif line.startswith('  '):
                        field = line.split(":", 1)[0].strip()
                        try:
                            value = line.split(":", 1)[1].strip()
                            if '#' in value:
                                dsc = value.split('#', 1)[1].strip()[1:]
                                value = value.split('#')[0].strip()
                                # Get inline comment
                                if not description:
                                    description = dsc
                                else:
                                    description = description + ' ' + dsc
                        except KeyError:
                            # Ignore key error because value is nested
                            index += 1
                            continue

                        if len(value) == 0:
                            value = "None"
                            value_type = "str"
                            default = "None"
                        elif value.startswith('!'):
                            value = value.strip("!")
                            if value in enums_by_name:
                                value_type = 'int'
                                # Get default enum value
                                enum_def = enums_by_name[value]
                                value = default = "interface.objects." + value + "." + enum_def["default"]
                            else:
                                value_type = value
                                value = default = "None"
                        # Hacks, find a better way in the future
                        elif "'" in value or '"' in value:
                            value_type = "str"
                            default = value
                        # Hack
                        else:
                            try:
                                eval_value = ast.literal_eval(value)
                                value_type = type(eval_value).__name__
                            except ValueError:
                                value_type = "str"
                                value = "'" + value + "'"
                            except SyntaxError:
                                value_type = "str"
                                value = "'" + value + "'"
                            if value_type in ['dict', 'list', 'tuple']:
                                default = value = "None"
                            else:
                                default = value
                        args.append(", ")
                        args.append(field + "=" + value)
                        init_lines.append('        self.' + field + " = " + field + "\n")
                        current_class_schema += "\n                '" + field + "': {'type': '" + value_type + "', 'default': " + default + ", 'decorators': {" + decorators + "}" + ", 'description': '" + re.escape(description) + "' },"
                    index += 1
                    decorators = ''
                    description = ''

                if len(args) > 0:
                    for arg in args:
                        messageobject_output_text += arg
                    messageobject_output_text += "):\n"
                    for init_line in init_lines:
                        messageobject_output_text += init_line
                else:
                    messageobject_output_text += "):\n"
                    messageobject_output_text += '        pass\n'
                messageobject_output_text += current_class_schema + "\n              }\n"


                if index < len(lines) and lines[index].startswith('    out:'):
                    args = []
                    init_lines = []
                    current_class_decorators = ''

                    current_class_name = current_service_name + "_" + current_op_name + "_out"
                    messageobject_output_text += '\nclass ' + current_class_name + "(IonMessageObjectBase):\n"
                    messageobject_output_text += "    _svc_name = '" + current_service_name + "'\n"
                    messageobject_output_text += "    _op_name = '" + current_op_name + "'\n"
                    messageobject_output_text += "    _class_info = {'name': '" + current_class_name + "', 'decorators': {" + current_class_decorators + "} }\n\n"

                    messageobject_output_text += '    def __init__(self'
                    current_class_schema = "\n    _schema = {"
                    index += 1
                    while index < len(lines):

                        line = lines[index]

                        if line.isspace() or len(line) == 0:
                            index += 1
                            continue

                        # Ignore
                        if not line.startswith('  '):
                            index += 1
                            continue

                        if lines[index].startswith('  #@') and lines[index][4].isalpha():
                            dec = lines[index].strip()[2:].split("=")
                            key = dec[0]
                            value = dec[1] if len(dec) == 2 else ""
                            # Add it to the decorator list
                            if not current_class_decorators:
                                current_class_decorators = '"' + key + '":"' + value + '"'
                            else:
                                current_class_decorators = current_class_decorators + ', "' + key + '":"' + value + '"'

                        # Found next op
                        if line.startswith('  ') and line[2].isalpha():
                            break

                        if line.startswith('    throws:'):
                            index += 1
                            while index < len(lines):
                                if not lines[index].startswith('    '):
                                    break
                                index += 1
                            break

                        line = line.replace('    ', '', 1)

                        # Add comments and decorators
                        if line.startswith('  #'):
                            # Check for decorators
                            if len(line) > 4 and line.startswith('  #@'):
                                dec = line.strip()[2:].split("=")
                                key = dec[0]
                                value = dec[1] if len(dec) == 2 else ""
                                # Add it to the decorator list
                                if not decorators:
                                    decorators = '"' + key + '":"' + value + '"'
                                else:
                                    decorators = decorators + ', "' + key + '":"' + value + '"'
                            else:
                                init_lines.append('  ' + line + '\n')
                                if not description:
                                    description = line.strip()[1:]
                                else:
                                    description = description + ' ' + line.strip()[1:]
                            index += 1
                            continue

                        field = line.split(":", 1)[0].strip()
                        try:
                            value = line.split(":", 1)[1].strip()
                            if '#' in value:
                                dsc = value.split('#', 1)[1].strip()
                                value = value.split('#')[0].strip()
                                # Get inline comment
                                if not description:
                                    description = dsc
                                else:
                                    description = description + ' ' + dsc
                        except KeyError:
                            # Ignore key error because value is nested
                            index += 1
                            continue

                        if len(value) == 0:
                            value = "None"
                            value_type = "str"
                            default = "None"
                        elif value.startswith('!'):
                            value = value.strip("!")
                            if value in enums_by_name:
                                value_type = 'int'
                                # Get default enum value
                                enum_def = enums_by_name[value]
                                value = default = "interface.objects." + value + "." + enum_def["default"]
                            else:
                                value_type = value
                                value = default = "None"
                        # Hacks, find a better way in the future
                        elif "'" in value or '"' in value:
                            value_type = "str"
                            default = value
                        # Hack
                        else:
                            try:
                                eval_value = ast.literal_eval(value)
                                value_type = type(eval_value).__name__
                            except ValueError:
                                value_type = "str"
                                value = "'" + value + "'"
                            except SyntaxError:
                                value_type = "str"
                                value = "'" + value + "'"
                            if value_type in ['dict', 'list', 'tuple']:
                                default = value = "None"
                            else:
                                default = value
                        args.append(", ")
                        args.append(field + "=" + value)
                        init_lines.append('        self.' + field + " = " + field + "\n")
                        current_class_schema += "\n                '" + field + "': {'type': '" + value_type + "', 'default': " + default + ", 'decorators': {" + decorators + "}" + ", 'description': '" + re.escape(description) + "' },"
                        index += 1
                        decorators = ''

                    if len(args) > 0:
                        for arg in args:
                            messageobject_output_text += arg
                        messageobject_output_text += "):\n"
                        for init_line in init_lines:
                            messageobject_output_text += init_line
                    else:
                        messageobject_output_text += "):\n"
                        messageobject_output_text += '        pass\n'
                    messageobject_output_text += current_class_schema + "\n              }\n"

        datadir = 'interface'
        messagemodelfile = os.path.join(datadir, 'messages.py')
        if not opts.dryrun:
            try:
                os.unlink(messagemodelfile)
            except:
                pass
            print " Writing message interfaces to '" + messagemodelfile + "'"
            with open(messagemodelfile, 'w') as f:
                f.write(messageobject_output_text)

Example 166

Project: keystonemiddleware Source File: client_fixtures.py
    def setUp(self):
        super(Examples, self).setUp()

        # The data for several tests are signed using openssl and are stored in
        # files in the signing subdirectory.  In order to keep the values
        # consistent between the tests and the signed docuements, we read them
        # in for use in the tests.
        with open(os.path.join(CMSDIR, 'auth_token_scoped.json')) as f:
            self.TOKEN_SCOPED_DATA = cms.cms_to_token(f.read())

        with open(os.path.join(CMSDIR, 'auth_token_scoped.pem')) as f:
            self.SIGNED_TOKEN_SCOPED = cms.cms_to_token(f.read())
        self.SIGNED_TOKEN_SCOPED_HASH = _hash_signed_token_safe(
            self.SIGNED_TOKEN_SCOPED)
        self.SIGNED_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe(
            self.SIGNED_TOKEN_SCOPED, mode='sha256')
        with open(os.path.join(CMSDIR, 'auth_token_unscoped.pem')) as f:
            self.SIGNED_TOKEN_UNSCOPED = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pem')) as f:
            self.SIGNED_v3_TOKEN_SCOPED = cms.cms_to_token(f.read())
        self.SIGNED_v3_TOKEN_SCOPED_HASH = _hash_signed_token_safe(
            self.SIGNED_v3_TOKEN_SCOPED)
        self.SIGNED_v3_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe(
            self.SIGNED_v3_TOKEN_SCOPED, mode='sha256')
        with open(os.path.join(CMSDIR, 'auth_token_revoked.pem')) as f:
            self.REVOKED_TOKEN = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_scoped_expired.pem')) as f:
            self.SIGNED_TOKEN_SCOPED_EXPIRED = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pem')) as f:
            self.REVOKED_v3_TOKEN = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_scoped.pkiz')) as f:
            self.SIGNED_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_unscoped.pkiz')) as f:
            self.SIGNED_TOKEN_UNSCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pkiz')) as f:
            self.SIGNED_v3_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_revoked.pkiz')) as f:
            self.REVOKED_TOKEN_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR,
                               'auth_token_scoped_expired.pkiz')) as f:
            self.SIGNED_TOKEN_SCOPED_EXPIRED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pkiz')) as f:
            self.REVOKED_v3_TOKEN_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'revocation_list.json')) as f:
            self.REVOCATION_LIST = jsonutils.loads(f.read())
        with open(os.path.join(CMSDIR, 'revocation_list.pem')) as f:
            self.SIGNED_REVOCATION_LIST = jsonutils.dumps({'signed': f.read()})

        self.SIGNING_CERT_FILE = os.path.join(CERTDIR, 'signing_cert.pem')
        with open(self.SIGNING_CERT_FILE) as f:
            self.SIGNING_CERT = f.read()

        self.KERBEROS_BIND = 'USER@REALM'
        self.SERVICE_KERBEROS_BIND = 'SERVICE_USER@SERVICE_REALM'

        self.SIGNING_KEY_FILE = os.path.join(KEYDIR, 'signing_key.pem')
        with open(self.SIGNING_KEY_FILE) as f:
            self.SIGNING_KEY = f.read()

        self.SIGNING_CA_FILE = os.path.join(CERTDIR, 'cacert.pem')
        with open(self.SIGNING_CA_FILE) as f:
            self.SIGNING_CA = f.read()

        self.UUID_TOKEN_DEFAULT = "ec6c0710ec2f471498484c1b53ab4f9d"
        self.UUID_TOKEN_NO_SERVICE_CATALOG = '8286720fbe4941e69fa8241723bb02df'
        self.UUID_TOKEN_UNSCOPED = '731f903721c14827be7b2dc912af7776'
        self.UUID_TOKEN_BIND = '3fc54048ad64405c98225ce0897af7c5'
        self.UUID_TOKEN_UNKNOWN_BIND = '8885fdf4d42e4fb9879e6379fa1eaf48'
        self.VALID_DIABLO_TOKEN = 'b0cf19b55dbb4f20a6ee18e6c6cf1726'
        self.v3_UUID_TOKEN_DEFAULT = '5603457654b346fdbb93437bfe76f2f1'
        self.v3_UUID_TOKEN_UNSCOPED = 'd34835fdaec447e695a0a024d84f8d79'
        self.v3_UUID_TOKEN_DOMAIN_SCOPED = 'e8a7b63aaa4449f38f0c5c05c3581792'
        self.v3_UUID_TOKEN_BIND = '2f61f73e1c854cbb9534c487f9bd63c2'
        self.v3_UUID_TOKEN_UNKNOWN_BIND = '7ed9781b62cd4880b8d8c6788ab1d1e2'

        self.UUID_SERVICE_TOKEN_DEFAULT = 'fe4c0710ec2f492748596c1b53ab124'
        self.UUID_SERVICE_TOKEN_BIND = '5e43439613d34a13a7e03b2762bd08ab'
        self.v3_UUID_SERVICE_TOKEN_DEFAULT = 'g431071bbc2f492748596c1b53cb229'
        self.v3_UUID_SERVICE_TOKEN_BIND = 'be705e4426d0449a89e35ae21c380a05'
        self.v3_NOT_IS_ADMIN_PROJECT = uuid.uuid4().hex

        revoked_token = self.REVOKED_TOKEN
        if isinstance(revoked_token, six.text_type):
            revoked_token = revoked_token.encode('utf-8')
        self.REVOKED_TOKEN_HASH = utils.hash_signed_token(revoked_token)
        self.REVOKED_TOKEN_HASH_SHA256 = utils.hash_signed_token(revoked_token,
                                                                 mode='sha256')
        self.REVOKED_TOKEN_LIST = (
            {'revoked': [{'id': self.REVOKED_TOKEN_HASH,
                          'expires': timeutils.utcnow()}]})
        self.REVOKED_TOKEN_LIST_JSON = jsonutils.dumps(self.REVOKED_TOKEN_LIST)

        revoked_v3_token = self.REVOKED_v3_TOKEN
        if isinstance(revoked_v3_token, six.text_type):
            revoked_v3_token = revoked_v3_token.encode('utf-8')
        self.REVOKED_v3_TOKEN_HASH = utils.hash_signed_token(revoked_v3_token)
        hash = utils.hash_signed_token(revoked_v3_token, mode='sha256')
        self.REVOKED_v3_TOKEN_HASH_SHA256 = hash
        self.REVOKED_v3_TOKEN_LIST = (
            {'revoked': [{'id': self.REVOKED_v3_TOKEN_HASH,
                          'expires': timeutils.utcnow()}]})
        self.REVOKED_v3_TOKEN_LIST_JSON = jsonutils.dumps(
            self.REVOKED_v3_TOKEN_LIST)

        revoked_token_pkiz = self.REVOKED_TOKEN_PKIZ
        if isinstance(revoked_token_pkiz, six.text_type):
            revoked_token_pkiz = revoked_token_pkiz.encode('utf-8')
        self.REVOKED_TOKEN_PKIZ_HASH = utils.hash_signed_token(
            revoked_token_pkiz)
        revoked_v3_token_pkiz = self.REVOKED_v3_TOKEN_PKIZ
        if isinstance(revoked_v3_token_pkiz, six.text_type):
            revoked_v3_token_pkiz = revoked_v3_token_pkiz.encode('utf-8')
        self.REVOKED_v3_PKIZ_TOKEN_HASH = utils.hash_signed_token(
            revoked_v3_token_pkiz)

        self.REVOKED_TOKEN_PKIZ_LIST = (
            {'revoked': [{'id': self.REVOKED_TOKEN_PKIZ_HASH,
                          'expires': timeutils.utcnow()},
                         {'id': self.REVOKED_v3_PKIZ_TOKEN_HASH,
                          'expires': timeutils.utcnow()},
                         ]})
        self.REVOKED_TOKEN_PKIZ_LIST_JSON = jsonutils.dumps(
            self.REVOKED_TOKEN_PKIZ_LIST)

        self.SIGNED_TOKEN_SCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_SCOPED)
        self.SIGNED_TOKEN_UNSCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_UNSCOPED)
        self.SIGNED_v3_TOKEN_SCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_v3_TOKEN_SCOPED)

        self.SIGNED_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_SCOPED_PKIZ)
        self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_UNSCOPED_PKIZ)
        self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_v3_TOKEN_SCOPED_PKIZ)

        self.INVALID_SIGNED_TOKEN = (
            "MIIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
            "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
            "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
            "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
            "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
            "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
            "0000000000000000000000000000000000000000000000000000000000000000"
            "1111111111111111111111111111111111111111111111111111111111111111"
            "2222222222222222222222222222222222222222222222222222222222222222"
            "3333333333333333333333333333333333333333333333333333333333333333"
            "4444444444444444444444444444444444444444444444444444444444444444"
            "5555555555555555555555555555555555555555555555555555555555555555"
            "6666666666666666666666666666666666666666666666666666666666666666"
            "7777777777777777777777777777777777777777777777777777777777777777"
            "8888888888888888888888888888888888888888888888888888888888888888"
            "9999999999999999999999999999999999999999999999999999999999999999"
            "0000000000000000000000000000000000000000000000000000000000000000")

        self.INVALID_SIGNED_PKIZ_TOKEN = (
            "PKIZ_AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
            "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
            "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
            "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
            "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
            "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
            "0000000000000000000000000000000000000000000000000000000000000000"
            "1111111111111111111111111111111111111111111111111111111111111111"
            "2222222222222222222222222222222222222222222222222222222222222222"
            "3333333333333333333333333333333333333333333333333333333333333333"
            "4444444444444444444444444444444444444444444444444444444444444444"
            "5555555555555555555555555555555555555555555555555555555555555555"
            "6666666666666666666666666666666666666666666666666666666666666666"
            "7777777777777777777777777777777777777777777777777777777777777777"
            "8888888888888888888888888888888888888888888888888888888888888888"
            "9999999999999999999999999999999999999999999999999999999999999999"
            "0000000000000000000000000000000000000000000000000000000000000000")

        # JSON responses keyed by token ID
        self.TOKEN_RESPONSES = {}

        # basic values
        PROJECT_ID = 'tenant_id1'
        PROJECT_NAME = 'tenant_name1'
        USER_ID = 'user_id1'
        USER_NAME = 'user_name1'
        DOMAIN_ID = 'domain_id1'
        DOMAIN_NAME = 'domain_name1'
        ROLE_NAME1 = 'role1'
        ROLE_NAME2 = 'role2'

        SERVICE_PROJECT_ID = 'service_project_id1'
        SERVICE_PROJECT_NAME = 'service_project_name1'
        SERVICE_USER_ID = 'service_user_id1'
        SERVICE_USER_NAME = 'service_user_name1'
        SERVICE_DOMAIN_ID = 'service_domain_id1'
        SERVICE_DOMAIN_NAME = 'service_domain_name1'
        SERVICE_ROLE_NAME1 = 'service_role1'
        SERVICE_ROLE_NAME2 = 'service_role2'

        self.SERVICE_TYPE = 'identity'
        self.UNVERSIONED_SERVICE_URL = 'http://keystone.server:5000/'
        self.SERVICE_URL = self.UNVERSIONED_SERVICE_URL + 'v2.0'

        # Old Tokens

        self.TOKEN_RESPONSES[self.VALID_DIABLO_TOKEN] = {
            'access': {
                'token': {
                    'id': self.VALID_DIABLO_TOKEN,
                    'expires': '2020-01-01T00:00:10.000123Z',
                    'tenantId': PROJECT_ID,
                },
                'user': {
                    'id': USER_ID,
                    'name': USER_NAME,
                    'roles': [
                        {'name': ROLE_NAME1},
                        {'name': ROLE_NAME2},
                    ],
                },
            },
        }

        # Generated V2 Tokens

        token = fixture.V2Token(token_id=self.UUID_TOKEN_DEFAULT,
                                tenant_id=PROJECT_ID,
                                tenant_name=PROJECT_NAME,
                                user_id=USER_ID,
                                user_name=USER_NAME)
        token.add_role(name=ROLE_NAME1)
        token.add_role(name=ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint(public=self.SERVICE_URL)
        self.TOKEN_RESPONSES[self.UUID_TOKEN_DEFAULT] = token

        token = fixture.V2Token(token_id=self.UUID_TOKEN_UNSCOPED,
                                user_id=USER_ID,
                                user_name=USER_NAME)
        self.TOKEN_RESPONSES[self.UUID_TOKEN_UNSCOPED] = token

        token = fixture.V2Token(token_id='valid-token',
                                tenant_id=PROJECT_ID,
                                tenant_name=PROJECT_NAME,
                                user_id=USER_ID,
                                user_name=USER_NAME)
        token.add_role(ROLE_NAME1)
        token.add_role(ROLE_NAME2)
        self.TOKEN_RESPONSES[self.UUID_TOKEN_NO_SERVICE_CATALOG] = token

        token = fixture.V2Token(token_id=self.SIGNED_TOKEN_SCOPED_KEY,
                                tenant_id=PROJECT_ID,
                                tenant_name=PROJECT_NAME,
                                user_id=USER_ID,
                                user_name=USER_NAME)
        token.add_role(ROLE_NAME1)
        token.add_role(ROLE_NAME2)
        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_KEY] = token

        token = fixture.V2Token(token_id=self.SIGNED_TOKEN_UNSCOPED_KEY,
                                user_id=USER_ID,
                                user_name=USER_NAME)
        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_KEY] = token

        token = fixture.V2Token(token_id=self.UUID_TOKEN_BIND,
                                tenant_id=PROJECT_ID,
                                tenant_name=PROJECT_NAME,
                                user_id=USER_ID,
                                user_name=USER_NAME)
        token.add_role(ROLE_NAME1)
        token.add_role(ROLE_NAME2)
        token['access']['token']['bind'] = {'kerberos': self.KERBEROS_BIND}
        self.TOKEN_RESPONSES[self.UUID_TOKEN_BIND] = token

        token = fixture.V2Token(token_id=self.UUID_SERVICE_TOKEN_BIND,
                                tenant_id=SERVICE_PROJECT_ID,
                                tenant_name=SERVICE_PROJECT_NAME,
                                user_id=SERVICE_USER_ID,
                                user_name=SERVICE_USER_NAME)
        token.add_role(SERVICE_ROLE_NAME1)
        token.add_role(SERVICE_ROLE_NAME2)
        token['access']['token']['bind'] = {
            'kerberos': self.SERVICE_KERBEROS_BIND}
        self.TOKEN_RESPONSES[self.UUID_SERVICE_TOKEN_BIND] = token

        token = fixture.V2Token(token_id=self.UUID_TOKEN_UNKNOWN_BIND,
                                tenant_id=PROJECT_ID,
                                tenant_name=PROJECT_NAME,
                                user_id=USER_ID,
                                user_name=USER_NAME)
        token.add_role(ROLE_NAME1)
        token.add_role(ROLE_NAME2)
        token['access']['token']['bind'] = {'FOO': 'BAR'}
        self.TOKEN_RESPONSES[self.UUID_TOKEN_UNKNOWN_BIND] = token

        token = fixture.V2Token(token_id=self.UUID_SERVICE_TOKEN_DEFAULT,
                                tenant_id=SERVICE_PROJECT_ID,
                                tenant_name=SERVICE_PROJECT_NAME,
                                user_id=SERVICE_USER_ID,
                                user_name=SERVICE_USER_NAME)
        token.add_role(name=SERVICE_ROLE_NAME1)
        token.add_role(name=SERVICE_ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint(public=self.SERVICE_URL)
        self.TOKEN_RESPONSES[self.UUID_SERVICE_TOKEN_DEFAULT] = token

        # Generated V3 Tokens

        token = fixture.V3Token(user_id=USER_ID,
                                user_name=USER_NAME,
                                user_domain_id=DOMAIN_ID,
                                user_domain_name=DOMAIN_NAME,
                                project_id=PROJECT_ID,
                                project_name=PROJECT_NAME,
                                project_domain_id=DOMAIN_ID,
                                project_domain_name=DOMAIN_NAME)
        token.add_role(id=ROLE_NAME1, name=ROLE_NAME1)
        token.add_role(id=ROLE_NAME2, name=ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_DEFAULT] = token

        token = fixture.V3Token(user_id=USER_ID,
                                user_name=USER_NAME,
                                user_domain_id=DOMAIN_ID,
                                user_domain_name=DOMAIN_NAME)
        self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_UNSCOPED] = token

        token = fixture.V3Token(user_id=USER_ID,
                                user_name=USER_NAME,
                                user_domain_id=DOMAIN_ID,
                                user_domain_name=DOMAIN_NAME,
                                domain_id=DOMAIN_ID,
                                domain_name=DOMAIN_NAME)
        token.add_role(id=ROLE_NAME1, name=ROLE_NAME1)
        token.add_role(id=ROLE_NAME2, name=ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_DOMAIN_SCOPED] = token

        token = fixture.V3Token(user_id=USER_ID,
                                user_name=USER_NAME,
                                user_domain_id=DOMAIN_ID,
                                user_domain_name=DOMAIN_NAME,
                                project_id=PROJECT_ID,
                                project_name=PROJECT_NAME,
                                project_domain_id=DOMAIN_ID,
                                project_domain_name=DOMAIN_NAME)
        token.add_role(name=ROLE_NAME1)
        token.add_role(name=ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_KEY] = token

        token = fixture.V3Token(user_id=USER_ID,
                                user_name=USER_NAME,
                                user_domain_id=DOMAIN_ID,
                                user_domain_name=DOMAIN_NAME,
                                project_id=PROJECT_ID,
                                project_name=PROJECT_NAME,
                                project_domain_id=DOMAIN_ID,
                                project_domain_name=DOMAIN_NAME)
        token.add_role(name=ROLE_NAME1)
        token.add_role(name=ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        token['token']['bind'] = {'kerberos': self.KERBEROS_BIND}
        self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_BIND] = token

        token = fixture.V3Token(user_id=SERVICE_USER_ID,
                                user_name=SERVICE_USER_NAME,
                                user_domain_id=SERVICE_DOMAIN_ID,
                                user_domain_name=SERVICE_DOMAIN_NAME,
                                project_id=SERVICE_PROJECT_ID,
                                project_name=SERVICE_PROJECT_NAME,
                                project_domain_id=SERVICE_DOMAIN_ID,
                                project_domain_name=SERVICE_DOMAIN_NAME)
        token.add_role(name=SERVICE_ROLE_NAME1)
        token.add_role(name=SERVICE_ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        token['token']['bind'] = {'kerberos': self.SERVICE_KERBEROS_BIND}
        self.TOKEN_RESPONSES[self.v3_UUID_SERVICE_TOKEN_BIND] = token

        token = fixture.V3Token(user_id=USER_ID,
                                user_name=USER_NAME,
                                user_domain_id=DOMAIN_ID,
                                user_domain_name=DOMAIN_NAME,
                                project_id=PROJECT_ID,
                                project_name=PROJECT_NAME,
                                project_domain_id=DOMAIN_ID,
                                project_domain_name=DOMAIN_NAME)
        token.add_role(name=ROLE_NAME1)
        token.add_role(name=ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        token['token']['bind'] = {'FOO': 'BAR'}
        self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_UNKNOWN_BIND] = token

        token = fixture.V3Token(user_id=SERVICE_USER_ID,
                                user_name=SERVICE_USER_NAME,
                                user_domain_id=SERVICE_DOMAIN_ID,
                                user_domain_name=SERVICE_DOMAIN_NAME,
                                project_id=SERVICE_PROJECT_ID,
                                project_name=SERVICE_PROJECT_NAME,
                                project_domain_id=SERVICE_DOMAIN_ID,
                                project_domain_name=SERVICE_DOMAIN_NAME)
        token.add_role(id=SERVICE_ROLE_NAME1,
                       name=SERVICE_ROLE_NAME1)
        token.add_role(id=SERVICE_ROLE_NAME2,
                       name=SERVICE_ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        self.TOKEN_RESPONSES[self.v3_UUID_SERVICE_TOKEN_DEFAULT] = token

        token = fixture.V3Token(user_id=USER_ID,
                                user_name=USER_NAME,
                                user_domain_id=DOMAIN_ID,
                                user_domain_name=DOMAIN_NAME,
                                project_id=PROJECT_ID,
                                project_name=PROJECT_NAME,
                                project_domain_id=DOMAIN_ID,
                                project_domain_name=DOMAIN_NAME,
                                is_admin_project=False)
        token.add_role(name=ROLE_NAME1)
        token.add_role(name=ROLE_NAME2)
        svc = token.add_service(self.SERVICE_TYPE)
        svc.add_endpoint('public', self.SERVICE_URL)
        self.TOKEN_RESPONSES[self.v3_NOT_IS_ADMIN_PROJECT] = token

        # PKIZ tokens generally link to above tokens

        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_KEY])
        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_KEY])
        self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_KEY])

        self.JSON_TOKEN_RESPONSES = dict([(k, jsonutils.dumps(v)) for k, v in
                                          six.iteritems(self.TOKEN_RESPONSES)])

Example 167

Project: python-keystoneclient Source File: client_fixtures.py
    def setUp(self):
        super(Examples, self).setUp()

        # The data for several tests are signed using openssl and are stored in
        # files in the signing subdirectory.  In order to keep the values
        # consistent between the tests and the signed docuements, we read them
        # in for use in the tests.
        with open(os.path.join(CMSDIR, 'auth_token_scoped.json')) as f:
            self.TOKEN_SCOPED_DATA = cms.cms_to_token(f.read())

        with open(os.path.join(CMSDIR, 'auth_token_scoped.pem')) as f:
            self.SIGNED_TOKEN_SCOPED = cms.cms_to_token(f.read())
        self.SIGNED_TOKEN_SCOPED_HASH = _hash_signed_token_safe(
            self.SIGNED_TOKEN_SCOPED)
        self.SIGNED_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe(
            self.SIGNED_TOKEN_SCOPED, mode='sha256')
        with open(os.path.join(CMSDIR, 'auth_token_unscoped.pem')) as f:
            self.SIGNED_TOKEN_UNSCOPED = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pem')) as f:
            self.SIGNED_v3_TOKEN_SCOPED = cms.cms_to_token(f.read())
        self.SIGNED_v3_TOKEN_SCOPED_HASH = _hash_signed_token_safe(
            self.SIGNED_v3_TOKEN_SCOPED)
        self.SIGNED_v3_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe(
            self.SIGNED_v3_TOKEN_SCOPED, mode='sha256')
        with open(os.path.join(CMSDIR, 'auth_token_revoked.pem')) as f:
            self.REVOKED_TOKEN = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_scoped_expired.pem')) as f:
            self.SIGNED_TOKEN_SCOPED_EXPIRED = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pem')) as f:
            self.REVOKED_v3_TOKEN = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_scoped.pkiz')) as f:
            self.SIGNED_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_unscoped.pkiz')) as f:
            self.SIGNED_TOKEN_UNSCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pkiz')) as f:
            self.SIGNED_v3_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_revoked.pkiz')) as f:
            self.REVOKED_TOKEN_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR,
                               'auth_token_scoped_expired.pkiz')) as f:
            self.SIGNED_TOKEN_SCOPED_EXPIRED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pkiz')) as f:
            self.REVOKED_v3_TOKEN_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'revocation_list.json')) as f:
            self.REVOCATION_LIST = jsonutils.loads(f.read())
        with open(os.path.join(CMSDIR, 'revocation_list.pem')) as f:
            self.SIGNED_REVOCATION_LIST = jsonutils.dumps({'signed': f.read()})

        self.SIGNING_CERT_FILE = os.path.join(CERTDIR, 'signing_cert.pem')
        with open(self.SIGNING_CERT_FILE) as f:
            self.SIGNING_CERT = f.read()

        self.KERBEROS_BIND = 'USER@REALM'

        self.SIGNING_KEY_FILE = os.path.join(KEYDIR, 'signing_key.pem')
        with open(self.SIGNING_KEY_FILE) as f:
            self.SIGNING_KEY = f.read()

        self.SIGNING_CA_FILE = os.path.join(CERTDIR, 'cacert.pem')
        with open(self.SIGNING_CA_FILE) as f:
            self.SIGNING_CA = f.read()

        self.UUID_TOKEN_DEFAULT = "ec6c0710ec2f471498484c1b53ab4f9d"
        self.UUID_TOKEN_NO_SERVICE_CATALOG = '8286720fbe4941e69fa8241723bb02df'
        self.UUID_TOKEN_UNSCOPED = '731f903721c14827be7b2dc912af7776'
        self.UUID_TOKEN_BIND = '3fc54048ad64405c98225ce0897af7c5'
        self.UUID_TOKEN_UNKNOWN_BIND = '8885fdf4d42e4fb9879e6379fa1eaf48'
        self.VALID_DIABLO_TOKEN = 'b0cf19b55dbb4f20a6ee18e6c6cf1726'
        self.v3_UUID_TOKEN_DEFAULT = '5603457654b346fdbb93437bfe76f2f1'
        self.v3_UUID_TOKEN_UNSCOPED = 'd34835fdaec447e695a0a024d84f8d79'
        self.v3_UUID_TOKEN_DOMAIN_SCOPED = 'e8a7b63aaa4449f38f0c5c05c3581792'
        self.v3_UUID_TOKEN_BIND = '2f61f73e1c854cbb9534c487f9bd63c2'
        self.v3_UUID_TOKEN_UNKNOWN_BIND = '7ed9781b62cd4880b8d8c6788ab1d1e2'

        revoked_token = self.REVOKED_TOKEN
        if isinstance(revoked_token, six.text_type):
            revoked_token = revoked_token.encode('utf-8')
        self.REVOKED_TOKEN_HASH = utils.hash_signed_token(revoked_token)
        self.REVOKED_TOKEN_HASH_SHA256 = utils.hash_signed_token(revoked_token,
                                                                 mode='sha256')
        self.REVOKED_TOKEN_LIST = (
            {'revoked': [{'id': self.REVOKED_TOKEN_HASH,
                          'expires': timeutils.utcnow()}]})
        self.REVOKED_TOKEN_LIST_JSON = jsonutils.dumps(self.REVOKED_TOKEN_LIST)

        revoked_v3_token = self.REVOKED_v3_TOKEN
        if isinstance(revoked_v3_token, six.text_type):
            revoked_v3_token = revoked_v3_token.encode('utf-8')
        self.REVOKED_v3_TOKEN_HASH = utils.hash_signed_token(revoked_v3_token)
        hash = utils.hash_signed_token(revoked_v3_token, mode='sha256')
        self.REVOKED_v3_TOKEN_HASH_SHA256 = hash
        self.REVOKED_v3_TOKEN_LIST = (
            {'revoked': [{'id': self.REVOKED_v3_TOKEN_HASH,
                          'expires': timeutils.utcnow()}]})
        self.REVOKED_v3_TOKEN_LIST_JSON = jsonutils.dumps(
            self.REVOKED_v3_TOKEN_LIST)

        revoked_token_pkiz = self.REVOKED_TOKEN_PKIZ
        if isinstance(revoked_token_pkiz, six.text_type):
            revoked_token_pkiz = revoked_token_pkiz.encode('utf-8')
        self.REVOKED_TOKEN_PKIZ_HASH = utils.hash_signed_token(
            revoked_token_pkiz)
        revoked_v3_token_pkiz = self.REVOKED_v3_TOKEN_PKIZ
        if isinstance(revoked_v3_token_pkiz, six.text_type):
            revoked_v3_token_pkiz = revoked_v3_token_pkiz.encode('utf-8')
        self.REVOKED_v3_PKIZ_TOKEN_HASH = utils.hash_signed_token(
            revoked_v3_token_pkiz)

        self.REVOKED_TOKEN_PKIZ_LIST = (
            {'revoked': [{'id': self.REVOKED_TOKEN_PKIZ_HASH,
                          'expires': timeutils.utcnow()},
                         {'id': self.REVOKED_v3_PKIZ_TOKEN_HASH,
                          'expires': timeutils.utcnow()},
                         ]})
        self.REVOKED_TOKEN_PKIZ_LIST_JSON = jsonutils.dumps(
            self.REVOKED_TOKEN_PKIZ_LIST)

        self.SIGNED_TOKEN_SCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_SCOPED)
        self.SIGNED_TOKEN_UNSCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_UNSCOPED)
        self.SIGNED_v3_TOKEN_SCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_v3_TOKEN_SCOPED)

        self.SIGNED_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_SCOPED_PKIZ)
        self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_UNSCOPED_PKIZ)
        self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_v3_TOKEN_SCOPED_PKIZ)

        self.INVALID_SIGNED_TOKEN = (
            "MIIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
            "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
            "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
            "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
            "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
            "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
            "0000000000000000000000000000000000000000000000000000000000000000"
            "1111111111111111111111111111111111111111111111111111111111111111"
            "2222222222222222222222222222222222222222222222222222222222222222"
            "3333333333333333333333333333333333333333333333333333333333333333"
            "4444444444444444444444444444444444444444444444444444444444444444"
            "5555555555555555555555555555555555555555555555555555555555555555"
            "6666666666666666666666666666666666666666666666666666666666666666"
            "7777777777777777777777777777777777777777777777777777777777777777"
            "8888888888888888888888888888888888888888888888888888888888888888"
            "9999999999999999999999999999999999999999999999999999999999999999"
            "0000000000000000000000000000000000000000000000000000000000000000")

        self.INVALID_SIGNED_PKIZ_TOKEN = (
            "PKIZ_AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
            "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
            "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
            "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
            "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
            "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
            "0000000000000000000000000000000000000000000000000000000000000000"
            "1111111111111111111111111111111111111111111111111111111111111111"
            "2222222222222222222222222222222222222222222222222222222222222222"
            "3333333333333333333333333333333333333333333333333333333333333333"
            "4444444444444444444444444444444444444444444444444444444444444444"
            "5555555555555555555555555555555555555555555555555555555555555555"
            "6666666666666666666666666666666666666666666666666666666666666666"
            "7777777777777777777777777777777777777777777777777777777777777777"
            "8888888888888888888888888888888888888888888888888888888888888888"
            "9999999999999999999999999999999999999999999999999999999999999999"
            "0000000000000000000000000000000000000000000000000000000000000000")

        # JSON responses keyed by token ID
        self.TOKEN_RESPONSES = {
            self.UUID_TOKEN_DEFAULT: {
                'access': {
                    'token': {
                        'id': self.UUID_TOKEN_DEFAULT,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    },
                    'serviceCatalog': {}
                },
            },
            self.VALID_DIABLO_TOKEN: {
                'access': {
                    'token': {
                        'id': self.VALID_DIABLO_TOKEN,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenantId': 'tenant_id1',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    },
                },
            },
            self.UUID_TOKEN_UNSCOPED: {
                'access': {
                    'token': {
                        'id': self.UUID_TOKEN_UNSCOPED,
                        'expires': '2020-01-01T00:00:10.000123Z',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    },
                },
            },
            self.UUID_TOKEN_NO_SERVICE_CATALOG: {
                'access': {
                    'token': {
                        'id': 'valid-token',
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    }
                },
            },
            self.UUID_TOKEN_BIND: {
                'access': {
                    'token': {
                        'bind': {'kerberos': self.KERBEROS_BIND},
                        'id': self.UUID_TOKEN_BIND,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    },
                    'serviceCatalog': {}
                },
            },
            self.UUID_TOKEN_UNKNOWN_BIND: {
                'access': {
                    'token': {
                        'bind': {'FOO': 'BAR'},
                        'id': self.UUID_TOKEN_UNKNOWN_BIND,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    },
                    'serviceCatalog': {}
                },
            },
            self.v3_UUID_TOKEN_DEFAULT: {
                'token': {
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [
                        {'name': 'role1', 'id': 'Role1'},
                        {'name': 'role2', 'id': 'Role2'},
                    ],
                    'catalog': {}
                }
            },
            self.v3_UUID_TOKEN_UNSCOPED: {
                'token': {
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    }
                }
            },
            self.v3_UUID_TOKEN_DOMAIN_SCOPED: {
                'token': {
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'domain': {
                        'id': 'domain_id1',
                        'name': 'domain_name1',
                    },
                    'roles': [
                        {'name': 'role1', 'id': 'Role1'},
                        {'name': 'role2', 'id': 'Role2'},
                    ],
                    'catalog': {}
                }
            },
            self.SIGNED_TOKEN_SCOPED_KEY: {
                'access': {
                    'token': {
                        'id': self.SIGNED_TOKEN_SCOPED_KEY,
                        'expires': '2020-01-01T00:00:10.000123Z',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'tenantId': 'tenant_id1',
                        'tenantName': 'tenant_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    },
                },
            },
            self.SIGNED_TOKEN_UNSCOPED_KEY: {
                'access': {
                    'token': {
                        'id': self.SIGNED_TOKEN_UNSCOPED_KEY,
                        'expires': '2020-01-01T00:00:10.000123Z',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {'name': 'role1'},
                            {'name': 'role2'},
                        ],
                    },
                },
            },
            self.SIGNED_v3_TOKEN_SCOPED_KEY: {
                'token': {
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [
                        {'name': 'role1'},
                        {'name': 'role2'}
                    ],
                    'catalog': {}
                }
            },
            self.v3_UUID_TOKEN_BIND: {
                'token': {
                    'bind': {'kerberos': self.KERBEROS_BIND},
                    'methods': ['password'],
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [
                        {'name': 'role1', 'id': 'Role1'},
                        {'name': 'role2', 'id': 'Role2'},
                    ],
                    'catalog': {}
                }
            },
            self.v3_UUID_TOKEN_UNKNOWN_BIND: {
                'token': {
                    'bind': {'FOO': 'BAR'},
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [
                        {'name': 'role1', 'id': 'Role1'},
                        {'name': 'role2', 'id': 'Role2'},
                    ],
                    'catalog': {}
                }
            },
        }
        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_KEY])
        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_KEY])
        self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_KEY])

        self.JSON_TOKEN_RESPONSES = dict([(k, jsonutils.dumps(v)) for k, v in
                                          six.iteritems(self.TOKEN_RESPONSES)])

Example 168

Project: DLRN Source File: shell.py
def main():
    parser = argparse.ArgumentParser()
    # Some of the non-positional arguments are required, so change the text
    # saying "optional arguments" to just "arguments":
    parser._optionals.title = 'arguments'

    parser.add_argument('--config-file',
                        help="Config file (required).",
                        required=True)
    parser.add_argument('--info-repo',
                        help="use a local rdoinfo repo instead of "
                             "fetching the default one using rdopkg. Only"
                             "applies when pkginfo_driver is rdoinfo in"
                             "projects.ini")
    parser.add_argument('--build-env', action='append',
                        help="Variables for the build environment.")
    parser.add_argument('--local', action="store_true",
                        help="Use local git repos if possible.")
    parser.add_argument('--head-only', action="store_true",
                        help="Build from the most recent Git commit only.")
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--project-name', action='append',
                       help="Build a specific project name only."
                            "Use multiple times to build more than one "
                            "project in a run.")
    group.add_argument('--package-name', action='append',
                       help="Build a specific package name only."
                            "Use multiple times to build more than one "
                            "package in a run.")
    parser.add_argument('--dev', action="store_true",
                        help="Don't reset packaging git repo, force build "
                             "and add public master repo for dependencies "
                             "(dev mode).")
    parser.add_argument('--log-commands', action="store_true",
                        help="Log the commands run by dlrn.")
    parser.add_argument('--use-public', action="store_true",
                        help="Use the public master repo for dependencies "
                             "when doing install verification.")
    parser.add_argument('--order', action="store_true",
                        help="Compute the build order according to the spec "
                             "files instead of the dates of the commits. "
                             "Implies --sequential.")
    parser.add_argument('--sequential', action="store_true",
                        help="Run all actions sequentially, regardless of the"
                             " number of workers specified in projects.ini.")
    parser.add_argument('--status', action="store_true",
                        help="Get the status of packages.")
    parser.add_argument('--recheck', action="store_true",
                        help="Force a rebuild for a particular package. "
                        "Imply --package-name")
    parser.add_argument('--version',
                        action='version',
                        version=version.version_info.version_string())
    parser.add_argument('--run',
                        help="Run a program instead of trying to build. "
                             "Imply --head-only")
    parser.add_argument('--stop', action="store_true",
                        help="Stop on error.")
    parser.add_argument('--verbose-mock', action="store_true",
                        help="Show verbose mock output during build.")

    options, args = parser.parse_known_args(sys.argv[1:])

    global verbose_mock
    verbose_mock = options.verbose_mock

    cp = configparser.RawConfigParser(default_options)
    cp.read(options.config_file)

    if options.log_commands is True:
        logging.getLogger("sh.command").setLevel(logging.INFO)
    if options.order is True:
        options.sequential = True

    session = getSession('sqlite:///commits.sqlite')
    config_options = ConfigOptions(cp)
    pkginfo_driver = config_options.pkginfo_driver
    global pkginfo
    pkginfo = import_object(pkginfo_driver, cfg_options=config_options)
    packages = pkginfo.getpackages(local_info_repo=options.info_repo,
                                   tags=config_options.tags,
                                   dev_mode=options.dev)

    if options.project_name:
        pkg_names = [p['name'] for p in packages
                     if p['project'] in options.project_name]
    elif options.package_name:
        pkg_names = options.package_name
    else:
        pkg_names = None

    if options.status is True:
        if not pkg_names:
            pkg_names = [p['name'] for p in packages]
        for name in pkg_names:
            commit = getLastProcessedCommit(session, name, 'invalid status')
            if commit:
                print(name, commit.status)
            else:
                print(name, 'NO_BUILD')
        sys.exit(0)

    if pkg_names:
        pkg_name = pkg_names[0]
    else:
        pkg_name = None

    if options.recheck is True:
        if not pkg_name:
            logger.error('Please use --package-name or --project-name '
                         'with --recheck.')
            sys.exit(1)
        commit = getLastProcessedCommit(session, pkg_name)
        if commit:
            if commit.status == 'SUCCESS':
                logger.error("Trying to recheck an already successful commit,"
                             " ignoring.")
                sys.exit(1)
            elif commit.status == 'RETRY':
                # In this case, we are going to retry anyway, so
                # do nothing and exit
                logger.warning("Trying to recheck a commit in RETRY state,"
                               " ignoring.")
                sys.exit(0)
            else:
                # We could set the status to RETRY here, but if we have gone
                # beyond max_retries it wouldn't work as expected. Thus, our
                # only chance is to remove the commit
                session.delete(commit)
                session.commit()
                sys.exit(0)
        else:
                logger.error("There are no existing commits for package %s"
                             % pkg_name)
                sys.exit(1)
    # when we run a program instead of building we don't care about
    # the commits, we just want to run once per package
    if options.run:
        options.head_only = True
    # Build a list of commits we need to process
    toprocess = []
    for package in packages:
        project = package["name"]
        since = "-1"
        commit = getLastProcessedCommit(session, project)
        if commit:
            # If we have switched source branches, we want to behave
            # as if no previous commits had been built, and only build
            # the last one
            if commit.commit_branch == getsourcebranch(package):
                # This will return all commits since the last handled commit
                # including the last handled commit, remove it later if needed.
                since = "--after=%d" % (commit.dt_commit)
            else:
                # The last processed commit belongs to a different branch. Just
                # in case, let's check if we built a previous commit from the
                # current branch
                commit = getLastBuiltCommit(session, project,
                                            getsourcebranch(package))
                if commit:
                    logger.info("Last commit belongs to another branch, but"
                                " we're ok with that")
                    since = "--after=%d" % (commit.dt_commit)

        if not pkg_name or package["name"] in pkg_names:
            project_toprocess = pkginfo.getinfo(project=project,
                                                package=package,
                                                since=since,
                                                local=options.local,
                                                dev_mode=options.dev)
            # If since == -1, then we only want to trigger a build for the
            # most recent change
            if since == "-1" or options.head_only:
                del project_toprocess[:-1]

            # The first entry in the list of commits is a commit we have
            # already processed, we want to process it again only if in dev
            # mode or distro hash has changed, we can't simply check against
            # the last commit in the db, as multiple commits can have the same
            # commit date
            for commit_toprocess in project_toprocess:
                if ((options.dev is True) or
                    options.run or
                    (not session.query(Commit).filter(
                        Commit.commit_hash == commit_toprocess.commit_hash,
                        Commit.distro_hash == commit_toprocess.distro_hash,
                        Commit.status != "RETRY")
                        .all())):
                    toprocess.append(commit_toprocess)

    # if requested do a sort according to build and install
    # dependencies
    if options.order is True and not pkg_name:
        # collect info from all spec files
        logger.info("Reading rpm spec files")
        projects = sorted([p['name'] for p in packages])

        speclist = []
        bootstraplist = []
        for project_name in projects:
            # Preprocess spec if needed
            pkginfo.preprocess(package_name=project_name)

            specpath = os.path.join(pkginfo.distgit_dir(project_name),
                                    project_name + '.spec')
            speclist.append(sh.rpmspec('-D', 'repo_bootstrap 1',
                                       '-P', specpath))

            # Check if repo_bootstrap is defined in the package.
            # If so, we'll need to rebuild after the whole bootstrap exercise
            rawspec = open(specpath).read(-1)
            if 'repo_bootstrap' in rawspec:
                bootstraplist.append(project_name)

        logger.debug("Packages to rebuild: %s" % bootstraplist)

        specs = RpmSpecCollection([RpmSpecFile(spec)
                                  for spec in speclist])
        # compute order according to BuildRequires
        logger.info("Computing build order")
        orders = specs.compute_order()
        # hack because the package name is not consistent with the directory
        # name and the spec file name
        if 'python-networking_arista' in orders:
            orders.insert(orders.index('python-networking_arista'),
                          'python-networking-arista')

        # sort the commits according to the score of their project and
        # then use the timestamp of the commits as a secondary key
        def my_cmp(a, b):
            if a.project_name == b.project_name:
                return cmp(a.dt_commit, b.dt_commit)
            return cmp(orders.index(a.project_name),
                       orders.index(b.project_name))
        toprocess.sort(cmp=my_cmp)
    else:
        # sort according to the timestamp of the commits
        toprocess.sort()

    exit_code = 0
    if options.sequential is True:
        for commit in toprocess:
            status = build_worker(packages, commit, run_cmd=options.run,
                                  build_env=options.build_env,
                                  dev_mode=options.dev,
                                  use_public=options.use_public,
                                  order=options.order, sequential=True)
            exception = status[3]
            if exception is not None:
                logger.error("Received exception %s" % exception)
            else:
                if not options.run:
                    post_build(status, packages, session)
            exit_value = process_build_result(status, packages, session,
                                              dev_mode=options.dev,
                                              run_cmd=options.run,
                                              stop=options.stop,
                                              build_env=options.build_env,
                                              head_only=options.head_only)
            if exit_value != 0:
                exit_code = exit_value
            if options.stop and exit_code != 0:
                return exit_code
    else:
        # Setup multiprocessing pool
        pool = multiprocessing.Pool(config_options.workers)
        # Use functools.partial to iterate on the commits to process,
        # while keeping a few options fixed
        build_worker_wrapper = partial(build_worker, packages,
                                       run_cmd=options.run,
                                       build_env=options.build_env,
                                       dev_mode=options.dev,
                                       use_public=options.use_public,
                                       order=options.order, sequential=False)
        iterator = pool.imap(build_worker_wrapper, toprocess)

        while True:
            try:
                status = iterator.next()
                exception = status[3]
                if exception is not None:
                    logger.info("Received exception %s" % exception)
                else:
                    # Create repo, build versions.csv file.
                    # This needs to be sequential
                    if not options.run:
                        post_build(status, packages, session)
                exit_value = process_build_result(status, packages,
                                                  session,
                                                  dev_mode=options.dev,
                                                  run_cmd=options.run,
                                                  stop=options.stop,
                                                  build_env=options.build_env,
                                                  head_only=options.head_only)
                if exit_value != 0:
                    exit_code = exit_value
                if options.stop and exit_code != 0:
                    return exit_code
            except StopIteration:
                break

    # If we were bootstrapping, set the packages that required it to RETRY
    if options.order is True and not pkg_name:
        for bpackage in bootstraplist:
            commit = getLastProcessedCommit(session, bpackage)
            commit.status = 'RETRY'
            session.add(commit)
            session.commit()
    genreports(packages, options.head_only)
    return exit_code

Example 169

Project: mysql-stats-to-graphite Source File: ss_get_mysql_stats.py
def ss_get_mysql_stats(options):
    # Process connection options and connect to MySQL.
    global cache_dir, poll_time, chk_options
        
    # Connect to MySQL.
    host = options.host
    user = options.user
    passwd = options.password
    port = options.port
    heartbeat = options.heartbeat
    
    db = MySQLdb.connect(host=host, port=port, user=user, passwd=passwd, cursorclass=MySQLdb.cursors.DictCursor)
    cursor = db.cursor()
    
    sanitized_host = host.replace(':', '').replace('/', '_')
    sanitized_host = sanitized_host + '_' + str(port)
    cache_file = os.path.join(cache_dir, '%s-mysql_graphite_stats.txt' % (sanitized_host))
    log_debug('Cache file is %s' % (cache_file))
    
    # First, check the cache.
    fp = None
    if not options.nocache:
        with open(cache_file, 'a+') as fp:
            try:
                fcntl.flock(fp, fcntl.LOCK_SH) # LOCK_SH
                try:
                    lines = open(cache_file).readlines()
                except Exception:
                    lines = []
                if os.path.getsize(cache_file) > 0 and \
                        os.path.getctime(cache_file) + (poll_time/2) > int(time.time()) and \
                        len(lines) > 0:
                    # The cache file is good to use
                    log_debug('Using the cache file')
                    return lines[0]
                else:
                    log_debug('The cache file seems too small or stale')
                    try:
                        # Escalate the lock to exclusive, so we can write to it.
                        fcntl.flock(fp, fcntl.LOCK_EX) # LOCK_EX
                        try:
                            lines = open(cache_file).readlines()
                        except Exception:
                            lines = []
                        if os.path.getsize(cache_file) > 0 and \
                                os.path.getctime(cache_file) + (poll_time/2) > int(time.time()) and \
                                len(lines) > 0:
                            log_debug("Using the cache file")
                            return lines[0]
                        f.truncate(0)
                    except Exception:
                        pass
            except IOError:
                log_debug("Couldn't lock the cache file, ignoring it.")
                fp = None
    else:
        log_debug("Couldn't open cache file")
        fp = None
        
    # Set up variables
    status = { # Holds the result of SHOW STATUS, SHOW INNODB STATUS, etc
        # Define some indexes so they don't cause errors with += operations
        'relay_log_space'           : None,
        'binary_log_space'          : None,
        'current_transactions'      : 0,
        'locked_transactions'       : 0,
        'active_transactions'       : 0,
        'innodb_locked_tables'      : 0,
        'innodb_tables_in_use'      : 0,
        'innodb_lock_structs'       : 0,
        'innodb_lock_wait_secs'     : 0,
        'innodb_sem_waits'          : 0,
        'innodb_set_wait_time_ms'   : 0,
        # Values for the 'state' column from SHOW PROCESSLIST (converted to
        # lowercase, with spaces replaced by underscores)
        'State_closing_tables'      : None,
        'State_copying_to_tmp_table': None,
        'State_end'                 : None,
        'State_freeing_items'       : None,
        'State_init'                : None,
        'State_login'               : None,
        'State_preparing'           : None,
        'State_reading_from_net'    : None,
        'State_sending_data'        : None,
        'State_sorting_result'      : None,
        'State_statistics'          : None,
        'State_updating'            : None,
        'State_writing_to_net'      : None,
        'State_none'                : None,
        'State_other'               : None, # Everything not listed above
    }
    
    # Get SHOW STATUS
    cursor.execute("SHOW /*!50002 GLOBAL */ STATUS")
    result = cursor.fetchall()
    for row in result:
        row = dict_change_key_case(row, case='lower')
        status[row.get('variable_name')] = row.get('value')

    # Get SHOW VARIABLES
    cursor.execute('SHOW VARIABLES')
    result = cursor.fetchall()
    for row in result:
        row = dict_change_key_case(row, case='lower')
        status[row.get('variable_name')] = row.get('value')
     
    # Get SHOW SLAVE STATUS 
    if chk_options.get('slave') and not options.no_replication_client:
        cursor.execute('SHOW SLAVE STATUS')
        result = cursor.fetchall()
        slave_status_row_gotten = 0
        for row in result:
            slave_status_row_gotten += 1
            # Must lowercase keys because different MySQL versions have different
            # lettercase.
            row = dict_change_key_case(row, case='lower')
            status['relay_log_space'] = row.get('relay_log_space')
            status['slave_lag'] = row.get('seconds_behind_master')
            
            if len(heartbeat) > 0:
                cursor.execute(
                    'SELECT MAX(GREATEST(0, UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts) - 1)) AS delay FROM %s' % (heartbeat)
                )
                result2 = cursor.fetchall()
                slave_delay_rows_gotten = 0
                for row2 in result2:
                    slave_delay_rows_gotten += 1
                    if type(row2) == dict and 'delay' in row2.keys():
                        status['slave_lag'] = row2.get('delay')
                    else:
                        log_debug("Couldn't get slave lag from %s" % (heartbeat))
                        
                if slave_delay_rows_gotten == 0:
                    log_debug('Got nothing from heartbeat query')
            
            # Scale slave_running and slave_stopped relative to the slave lag.
            status['slave_running'] = status.get('slave_lag') if row.get('slave_sql_running') == 'Yes' else 0
            status['slave_stopped'] = 0 if row.get('slave_sql_running') == 'Yes' else status.get('slave_lag')
        
        if slave_status_row_gotten == 0:
            log_debug('Got nothing from SHOW SLAVE STATUS')
    
    # Get SHOW MASTER STATUS
    if chk_options.get('master') and not options.no_super and status.get('log_bin') == 'ON':
        binlogs = []
        cursor.execute('SHOW MASTER LOGS')
        result = cursor.fetchall()
        for row in result:
            row = dict_change_key_case(row, case='lower')
            # Older versions of MySQL may not have the File_size column in the
            # results of the command.  Zero-size files indicate the user is
            # deleting binlogs manually from disk (bad user! bad!).
            if 'file_size' in row.keys() and row.get('file_size') > 0:
                binlogs.append(row.get('file_size'))
                
        if len(binlogs) > 0:
            status['binary_log_space'] = sum(binlogs)

    # Get SHOW PROCESSLIST and aggregate it by state
    if chk_options.get('procs'):
        cursor.execute('SHOW PROCESSLIST')
        result = cursor.fetchall()
        for row in result:
            state = row.get('state')
            if state is None:
                state = 'NULL'
            if state == '':
                state = 'none'
                
            # MySQL 5.5 replaces the 'Locked' state with a variety of "Waiting for
            # X lock" types of statuses.  Wrap these all back into "Locked" because
            # we don't really care about the type of locking it is.
            state = re.sub('^(Table lock|Waiting for .*lock)$', 'Locked', state)
            state = state.replace(' ', '_')
            if 'State_%s' % (state) in status.keys():
                increment(status, 'State_%s' % (state), 1)
            else:
                increment(status, 'State_other', 1)

    # Get SHOW INNODB STATUS and extract the desired metrics from it
    if chk_options.get('innodb') and status.get('have_innodb') == 'YES':
        cursor.execute('SHOW /*!50000 ENGINE*/ INNODB STATUS')
        result = cursor.fetchall()
        istatus_text = result[0].get('Status')
        istatus_vals = get_innodb_array(istatus_text)
        
        if chk_options.get('get_qrt') and status.get('have_response_time_distribution') == 'YES':
            log_debug('Getting query time histogram')
            i = 0
            cursor.execute(
                '''
                SELECT `count`, total * 1000000 AS total
                FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME
                WHERE `time` <> 'TOO LONG'
                '''
            )
            result = cursor.fetchall()
            for row in result:
                if i > 13:
                    # It's possible that the number of rows returned isn't 14.
                    # Don't add extra status counters.
                    break
                count_key = 'Query_time_count_%02d' % (i)
                total_key = 'Query_time_total_%02d' % (i)
                status[count_key] = row['count']
                status[total_key] = row['total']
                i += 1
            # It's also possible that the number of rows returned is too few.
            # Don't leave any status counters unassigned; it will break graphs.
            while i <= 13:
                count_key = 'Query_time_count_%02d' % (i)
                total_key = 'Query_time_total_%02d' % (i)
                status[count_key] = 0
                status[total_key] = 0
                i += 1
        else:
            log_debug('Not getting time histogram because it is not enabled')        
            
        # Override values from InnoDB parsing with values from SHOW STATUS,
        # because InnoDB status might not have everything and the SHOW STATUS is
        # to be preferred where possible.
        
        overrides = {
            'Innodb_buffer_pool_pages_data'  : 'database_pages',
            'Innodb_buffer_pool_pages_dirty' : 'modified_pages',
            'Innodb_buffer_pool_pages_free'  : 'free_pages',
            'Innodb_buffer_pool_pages_total' : 'pool_size',
            'Innodb_data_fsyncs'             : 'file_fsyncs',
            'Innodb_data_pending_reads'      : 'pending_normal_aio_reads',
            'Innodb_data_pending_writes'     : 'pending_normal_aio_writes',
            'Innodb_os_log_pending_fsyncs'   : 'pending_log_flushes',
            'Innodb_pages_created'           : 'pages_created',
            'Innodb_pages_read'              : 'pages_read',
            'Innodb_pages_written'           : 'pages_written',
            'Innodb_rows_deleted'            : 'rows_deleted',
            'Innodb_rows_inserted'           : 'rows_inserted',
            'Innodb_rows_read'               : 'rows_read',
            'Innodb_rows_updated'            : 'rows_updated',
        }
        
        # If the SHOW STATUS value exists, override...
        for k,v in overrides.items():
            if k in status.keys():
                log_debug('Override %s' % (k))
                istatus_vals[v] = status[k]
                
        # Now copy the values into $status.
        for k in istatus_vals.keys():
            status[k] = istatus_vals[k]
            
    # Make table_open_cache backwards-compatible (issue 63).
    if 'table_open_cache' in status.keys():
        status['table_cache'] = status.get('table_open_cache')
        
    # Compute how much of the key buffer is used and unflushed (issue 127).
    status['Key_buf_bytes_used'] = big_sub(status.get('key_buffer_size'), big_multiply(status.get('Key_blocks_unused'), status.get('key_cache_block_size')))
    status['Key_buf_bytes_unflushed'] = big_multiply(status.get('Key_blocks_not_flushed'), status.get('key_cache_block_size'))
    
    if 'unflushed_log' in status.keys() and status.get('unflushed_log'):
        # TODO: I'm not sure what the deal is here; need to debug this.  But the
        # unflushed log bytes spikes a lot sometimes and it's impossible for it to
        # be more than the log buffer.
        log_debug('Unflushed log: %s' % (status.get('unflushed_log')))
        status['unflushed_log'] = max(status.get('unflushed_log'), status.get('innodb_log_buffer_size'))
        
    keys = [
        'Key_read_requests',
        'Key_reads',
        'Key_write_requests',
        'Key_writes',
        'history_list',
        'innodb_transactions',
        'read_views',
        'current_transactions',
        'locked_transactions',
        'active_transactions',
        'pool_size',
        'free_pages',
        'database_pages',
        'modified_pages',
        'pages_read',
        'pages_created',
        'pages_written',
        'file_fsyncs',
        'file_reads',
        'file_writes',
        'log_writes',
        'pending_aio_log_ios',
        'pending_aio_sync_ios',
        'pending_buf_pool_flushes',
        'pending_chkp_writes',
        'pending_ibuf_aio_reads',
        'pending_log_flushes',
        'pending_log_writes',
        'pending_normal_aio_reads',
        'pending_normal_aio_writes',
        'ibuf_inserts',
        'ibuf_merged',
        'ibuf_merges',
        'spin_waits',
        'spin_rounds',
        'os_waits',
        'rows_inserted',
        'rows_updated',
        'rows_deleted',
        'rows_read',
        'Table_locks_waited',
        'Table_locks_immediate',
        'Slow_queries',
        'Open_files',
        'Open_tables',
        'Opened_tables',
        'innodb_open_files',
        'open_files_limit',
        'table_cache',
        'Aborted_clients',
        'Aborted_connects',
        'Max_used_connections',
        'Slow_launch_threads',
        'Threads_cached',
        'Threads_connected',
        'Threads_created',
        'Threads_running',
        'max_connections',
        'thread_cache_size',
        'Connections',
        'slave_running',
        'slave_stopped',
        'Slave_retried_transactions',
        'slave_lag',
        'Slave_open_temp_tables',
        'Qcache_free_blocks',
        'Qcache_free_memory',
        'Qcache_hits',
        'Qcache_inserts',
        'Qcache_lowmem_prunes',
        'Qcache_not_cached',
        'Qcache_queries_in_cache',
        'Qcache_total_blocks',
        'query_cache_size',
        'Questions',
        'Com_update',
        'Com_insert',
        'Com_select',
        'Com_delete',
        'Com_replace',
        'Com_load',
        'Com_update_multi',
        'Com_insert_select',
        'Com_delete_multi',
        'Com_replace_select',
        'Select_full_join',
        'Select_full_range_join',
        'Select_range',
        'Select_range_check',
        'Select_scan',
        'Sort_merge_passes',
        'Sort_range',
        'Sort_rows',
        'Sort_scan',
        'Created_tmp_tables',
        'Created_tmp_disk_tables',
        'Created_tmp_files',
        'Bytes_sent',
        'Bytes_received',
        'innodb_log_buffer_size',
        'unflushed_log',
        'log_bytes_flushed',
        'log_bytes_written',
        'relay_log_space',
        'binlog_cache_size',
        'Binlog_cache_disk_use',
        'Binlog_cache_use',
        'binary_log_space',
        'innodb_locked_tables',
        'innodb_lock_structs',
        'State_closing_tables',
        'State_copying_to_tmp_table',
        'State_end',
        'State_freeing_items',
        'State_init',
        'State_locked',
        'State_login',
        'State_preparing',
        'State_reading_from_net',
        'State_sending_data',
        'State_sorting_result',
        'State_statistics',
        'State_updating',
        'State_writing_to_net',
        'State_none',
        'State_other',
        'Handler_commit',
        'Handler_delete',
        'Handler_discover',
        'Handler_prepare',
        'Handler_read_first',
        'Handler_read_key',
        'Handler_read_next',
        'Handler_read_prev',
        'Handler_read_rnd',
        'Handler_read_rnd_next',
        'Handler_rollback',
        'Handler_savepoint',
        'Handler_savepoint_rollback',
        'Handler_update',
        'Handler_write',
        'innodb_tables_in_use',
        'innodb_lock_wait_secs',
        'hash_index_cells_total',
        'hash_index_cells_used',
        'total_mem_alloc',
        'additional_pool_alloc',
        'uncheckpointed_bytes',
        'ibuf_used_cells',
        'ibuf_free_cells',
        'ibuf_cell_count',
        'adaptive_hash_memory',
        'page_hash_memory',
        'dictionary_cache_memory',
        'file_system_memory',
        'lock_system_memory',
        'recovery_system_memory',
        'thread_hash_memory',
        'innodb_sem_waits',
        'innodb_sem_wait_time_ms',
        'Key_buf_bytes_unflushed',
        'Key_buf_bytes_used',
        'key_buffer_size',
        'Innodb_row_lock_time',
        'Innodb_row_lock_waits',
        'Query_time_count_00',
        'Query_time_count_01',
        'Query_time_count_02',
        'Query_time_count_03',
        'Query_time_count_04',
        'Query_time_count_05',
        'Query_time_count_06',
        'Query_time_count_07',
        'Query_time_count_08',
        'Query_time_count_09',
        'Query_time_count_10',
        'Query_time_count_11',
        'Query_time_count_12',
        'Query_time_count_13',
        'Query_time_total_00',
        'Query_time_total_01',
        'Query_time_total_02',
        'Query_time_total_03',
        'Query_time_total_04',
        'Query_time_total_05',
        'Query_time_total_06',
        'Query_time_total_07',
        'Query_time_total_08',
        'Query_time_total_09',
        'Query_time_total_10',
        'Query_time_total_11',
        'Query_time_total_12',
        'Query_time_total_13',
    ] 
    
    # Return the output.
    output = []
    for k in keys:
        # If the value isn't defined, return -1 which is lower than (most graphs')
        # minimum value of 0, so it'll be regarded as a missing value.
        val = status.get(k) if status.get(k) is not None else -1
        output.append('%s:%s' % (k, str(val)))
        
    result = ' '.join(output)
    if fp is not None:
        with open(cache_file, 'w+') as fp:
            fp.write('%s\n' % result)
    db.close()
    return result
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Page 4 Selected