re.search

Here are the examples of the python api re.search taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

166 Examples 7

Example 51

Project: aerospike-admin Source File: logreader.py
    def grepDiff(
            self,
            grep_str,
            file,
            start_tm="head",
            duration="",
            slice_tm="10",
            global_start_tm="",
            limit="", is_casesensitive=True):
        latencyPattern1 = '%s (\d+)'
        latencyPattern2 = '%s \(([0-9,\s]+)\)'
        latencyPattern3 = '(\d+)\((\d+)\) %s'
        latencyPattern4 = '%s \((\d+)'
        result = {"value": {}, "diff": {}}

        lines = self.grep([grep_str], None, None, file, False, is_casesensitive).strip().split('\n')
        if not lines or lines == ['']:
            return global_start_tm, result
        line = lines.pop(0)
        try:
            tail_line = lines[-1]
        except Exception:
            tail_line = line
        tail_tm = self.parse_dt(tail_line)
        if global_start_tm:
            start_tm = global_start_tm
        else:
            if start_tm == "head":
                start_tm = self.parse_dt(line)
            else:
                start_tm = self.parse_init_dt(start_tm, tail_tm)
                if start_tm > tail_tm:
                    # print "Wrong start time"
                    return global_start_tm, result

        while(self.parse_dt(line) < start_tm):
            try:
                line = lines.pop(0)
            except Exception:
                # print "Wrong start time"
                return global_start_tm, result

        if duration:
            duration_tm = self.parse_timedelta(duration)
            end_tm = start_tm + duration_tm
        else:
            end_tm = tail_tm + self.parse_timedelta("10")

        slice_size = self.parse_timedelta(slice_tm)

        if is_casesensitive:
            m1 = re.search(latencyPattern1 % (grep_str), line)
            m2 = re.search(latencyPattern2 % (grep_str), line)
            m3 = re.search(latencyPattern3 % (grep_str), line)
            m4 = re.search(latencyPattern4 % (grep_str), line)
        else:
            m1 = re.search(latencyPattern1 % (grep_str), line, re.IGNORECASE)
            m2 = re.search(latencyPattern2 % (grep_str), line, re.IGNORECASE)
            m3 = re.search(latencyPattern3 % (grep_str), line, re.IGNORECASE)
            m4 = re.search(latencyPattern4 % (grep_str), line, re.IGNORECASE)
        while(not m1 and not m2 and not m3 and not m4):
            try:
                line = lines.pop(0)
                if self.parse_dt(line) >= end_tm:
                    return global_start_tm, result
            except Exception:
                return global_start_tm, result
            if is_casesensitive:
                m1 = re.search(latencyPattern1 % (grep_str), line)
                m2 = re.search(latencyPattern2 % (grep_str), line)
                m3 = re.search(latencyPattern3 % (grep_str), line)
                m4 = re.search(latencyPattern4 % (grep_str), line)
            else:
                m1 = re.search(latencyPattern1 % (grep_str), line, re.IGNORECASE)
                m2 = re.search(latencyPattern2 % (grep_str), line, re.IGNORECASE)
                m3 = re.search(latencyPattern3 % (grep_str), line, re.IGNORECASE)
                m4 = re.search(latencyPattern4 % (grep_str), line, re.IGNORECASE)

        value = {}
        diff = {}

        slice_start = start_tm
        slice_end = slice_start + slice_size
        while(self.parse_dt(line) >= slice_end):
            #value[slice_start.strftime(DT_FMT)] = []
            #diff[slice_start.strftime(DT_FMT)] = []
            slice_start = slice_end
            slice_end = slice_start + slice_size

        if slice_end > end_tm:
            slice_end = end_tm

        pattern = ""
        prev = []
        slice_val = []
        pattern_type = 0

        # print str(m1) + " : " + str(m2) + " " + str(m3) + " " +str(m4)
        if m1:
            pattern = latencyPattern1 % (grep_str)
            slice_val.append(int(m1.group(1)))
        elif m2:
            pattern = latencyPattern2 % (grep_str)
            slice_val = map(lambda x: int(x), m2.group(1).split(","))
            pattern_type = 1
        elif m3:
            pattern = latencyPattern3 % (grep_str)
            slice_val = map(lambda x: int(x), list(m3.groups()))
            pattern_type = 2
        elif m4:
            pattern = latencyPattern4 % (grep_str)
            slice_val.append(int(m4.group(1)))
            pattern_type = 3
        else:
            print "no match"
            return global_start_tm, result

        for line in lines:
            # print line
            if self.parse_dt(line) >= end_tm:
                under_limit = True
                if limit:
                    under_limit = False
                if prev:
                    if limit:
                        temp = ([b - a for b, a in zip(slice_val, prev)])
                        if any(i >= limit for i in temp):
                            diff[slice_start.strftime(DT_FMT)] = (
                                [b for b in temp])
                            under_limit = True
                        temp = []
                    else:
                        diff[slice_start.strftime(DT_FMT)] = (
                            [b - a for b, a in zip(slice_val, prev)])
                else:
                    if not limit or any(i >= limit for i in slice_val):
                        diff[slice_start.strftime(DT_FMT)] = (
                            [b for b in slice_val])
                        under_limit = True

                if under_limit:
                    value[slice_start.strftime(DT_FMT)] = (
                        [b for b in slice_val])
                slice_val = []
                break

            if self.parse_dt(line) >= slice_end:
                under_limit = True
                if limit:
                    under_limit = False
                if prev:
                    if limit:
                        temp = ([b - a for b, a in zip(slice_val, prev)])
                        if any(i >= limit for i in temp):
                            diff[slice_start.strftime(DT_FMT)] = (
                                [b for b in temp])
                            under_limit = True
                        temp = []
                    else:
                        diff[slice_start.strftime(DT_FMT)] = (
                            [b - a for b, a in zip(slice_val, prev)])
                else:
                    if not limit or any(i >= limit for i in slice_val):
                        diff[slice_start.strftime(DT_FMT)] = (
                            [b for b in slice_val])
                        under_limit = True

                if under_limit:
                    value[slice_start.strftime(DT_FMT)] = (
                        [b for b in slice_val])
                prev = slice_val
                slice_start = slice_end
                slice_end = slice_start + slice_size
                slice_val = []
                if slice_end > end_tm:
                    slice_end = end_tm

            if is_casesensitive:
                m = re.search(pattern, line)
            else:
                m = re.search(pattern, line, re.IGNORECASE)

            if m:
                if pattern_type == 2:
                    current = map(lambda x: int(x), list(m.groups()))
                else:
                    current = map(lambda x: int(x), m.group(1).split(","))
                if slice_val:
                    slice_val = ([b + a for b, a in zip(current, slice_val)])
                else:
                    slice_val = ([b for b in current])

        if slice_val:
            under_limit = True
            if limit:
                under_limit = False
            if prev:
                if limit:
                    temp = ([b - a for b, a in zip(slice_val, prev)])
                    if any(i >= limit for i in temp):
                        diff[slice_start.strftime(DT_FMT)] = (
                            [b for b in temp])
                        under_limit = True
                    temp = []
                else:
                    diff[slice_start.strftime(DT_FMT)] = (
                        [b - a for b, a in zip(slice_val, prev)])
            else:
                if not limit or any(i >= limit for i in slice_val):
                    diff[slice_start.strftime(DT_FMT)] = (
                        [b for b in slice_val])
                    under_limit = True

            if under_limit:
                value[slice_start.strftime(DT_FMT)] = ([b for b in slice_val])

        result["value"] = value
        result["diff"] = diff

        return start_tm, result

Example 52

Project: pgcli Source File: tabulate.py
Function: tabulate
def tabulate(tabular_data, headers=[], tablefmt="simple",
             dcmlfmt="d", floatfmt="g", numalign="decimal", stralign="left",
             missingval=""):
    """Format a fixed width table for pretty printing.

    >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
    ---  ---------
      1      2.34
    -56      8.999
      2  10001
    ---  ---------

    The first required argument (`tabular_data`) can be a
    list-of-lists (or another iterable of iterables), a list of named
    tuples, a dictionary of iterables, an iterable of dictionaries,
    a two-dimensional NumPy array, NumPy record array, or a Pandas'
    dataframe.


    Table headers
    -------------

    To print nice column headers, supply the second argument (`headers`):

      - `headers` can be an explicit list of column headers
      - if `headers="firstrow"`, then the first row of data is used
      - if `headers="keys"`, then dictionary keys or column indices are used

    Otherwise a headerless table is produced.

    If the number of headers is less than the number of columns, they
    are supposed to be names of the last columns. This is consistent
    with the plain-text format of R and Pandas' dataframes.

    >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
    ...       headers="firstrow"))
           sex      age
    -----  -----  -----
    Alice  F         24
    Bob    M         19


    Column alignment
    ----------------

    `tabulate` tries to detect column types automatically, and aligns
    the values properly. By default it aligns decimal points of the
    numbers (or flushes integer numbers to the right), and flushes
    everything else to the left. Possible column alignments
    (`numalign`, `stralign`) are: "right", "center", "left", "decimal"
    (only for `numalign`), and None (to disable alignment).


    Table formats
    -------------

    `floatfmt` is a format specification used for columns which
    contain numeric data with a decimal point.

    `None` values are replaced with a `missingval` string:

    >>> print(tabulate([["spam", 1, None],
    ...                 ["eggs", 42, 3.14],
    ...                 ["other", None, 2.7]], missingval="?"))
    -----  --  ----
    spam    1  ?
    eggs   42  3.14
    other   ?  2.7
    -----  --  ----

    Various plain-text table formats (`tablefmt`) are supported:
    'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
     'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
    currently supported formats.

    "plain" format doesn't use any pseudographics to draw tables,
    it separates columns with a double space:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "plain"))
    strings      numbers
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
    spam   41.9999
    eggs  451

    "simple" format is like Pandoc simple_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "simple"))
    strings      numbers
    ---------  ---------
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
    ----  --------
    spam   41.9999
    eggs  451
    ----  --------

    "grid" is similar to tables produced by Emacs table.el package or
    Pandoc grid_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "grid"))
    +-----------+-----------+
    | strings   |   numbers |
    +===========+===========+
    | spam      |   41.9999 |
    +-----------+-----------+
    | eggs      |  451      |
    +-----------+-----------+

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
    +------+----------+
    | spam |  41.9999 |
    +------+----------+
    | eggs | 451      |
    +------+----------+

    "fancy_grid" draws a grid using box-drawing characters:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "fancy_grid"))
    ╒═══════════╤═══════════╕
    │ strings   │   numbers │
    ╞═══════════╪═══════════╡
    │ spam      │   41.9999 │
    ├───────────┼───────────┤
    │ eggs      │  451      │
    ╘═══════════╧═══════════╛

    "pipe" is like tables in PHP Markdown Extra extension or Pandoc
    pipe_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "pipe"))
    | strings   |   numbers |
    |:----------|----------:|
    | spam      |   41.9999 |
    | eggs      |  451      |

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
    |:-----|---------:|
    | spam |  41.9999 |
    | eggs | 451      |

    "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
    are slightly different from "pipe" format by not using colons to
    define column alignment, and using a "+" sign to indicate line
    intersections:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "orgtbl"))
    | strings   |   numbers |
    |-----------+-----------|
    | spam      |   41.9999 |
    | eggs      |  451      |


    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
    | spam |  41.9999 |
    | eggs | 451      |

    "rst" is like a simple table format from reStructuredText; please
    note that reStructuredText accepts also "grid" tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "rst"))
    =========  =========
    strings      numbers
    =========  =========
    spam         41.9999
    eggs        451
    =========  =========

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
    ====  ========
    spam   41.9999
    eggs  451
    ====  ========

    "mediawiki" produces a table markup used in Wikipedia and on other
    MediaWiki-based sites:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="mediawiki"))
    {| class="wikitable" style="text-align: left;"
    |+ <!-- caption -->
    |-
    ! strings   !! align="right"|   numbers
    |-
    | spam      || align="right"|   41.9999
    |-
    | eggs      || align="right"|  451
    |}

    "html" produces HTML markup:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="html"))
    <table>
    <tr><th>strings  </th><th style="text-align: right;">  numbers</th></tr>
    <tr><td>spam     </td><td style="text-align: right;">  41.9999</td></tr>
    <tr><td>eggs     </td><td style="text-align: right;"> 451     </td></tr>
    </table>

    "latex" produces a tabular environment of LaTeX docuement markup:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
    \\begin{tabular}{lr}
    \\hline
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\hline
    \\end{tabular}

    "latex_booktabs" produces a tabular environment of LaTeX docuement markup
    using the booktabs.sty package:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
    \\begin{tabular}{lr}
    \\toprule
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\bottomrule
    \end{tabular}

    Also returns a tuple of the raw rows pulled from tabular_data
    """
    if tabular_data is None:
        tabular_data = []
    list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)

    # optimization: look for ANSI control codes once,
    # enable smart width functions only if a control code is found
    _text_type_encode = lambda x: _text_type(utf8tounicode(x))
    plain_text = '\n'.join(['\t'.join(map(_text_type_encode, headers))] + \
                            ['\t'.join(map(_text_type_encode, row)) for row in list_of_lists])
    has_invisible = (re.search(_invisible_codes, plain_text) or
      re.search(_invisible_codes, missingval))
    if has_invisible:
        width_fn = _visible_width
    else:
        width_fn = wcswidth

    # format rows and columns, convert numeric values to strings
    cols = list(zip(*list_of_lists))
    coltypes = list(map(_column_type, cols))
    cols = [[_format(v, ct, dcmlfmt, floatfmt, missingval) for v in c]
             for c,ct in zip(cols, coltypes)]

    # align columns
    aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
    minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
    cols = [_align_column(c, a, minw, has_invisible)
            for c, a, minw in zip(cols, aligns, minwidths)]

    if headers:
        # align headers and add headers
        t_cols = cols or [['']] * len(headers)
        t_aligns = aligns or [stralign] * len(headers)
        minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
        headers = [_align_header(h, a, minw)
                   for h, a, minw in zip(headers, t_aligns, minwidths)]
        rows = list(zip(*cols))
    else:
        minwidths = [width_fn(c[0]) for c in cols]
        rows = list(zip(*cols))

    if not isinstance(tablefmt, TableFormat):
        tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])

    return _format_table(tablefmt, headers, rows, minwidths, aligns), rows

Example 53

Project: reprozip Source File: functional.py
@in_temp_dir
def functional_tests(raise_warnings, interactive, run_vagrant, run_docker):
    rpz_python = [os.environ.get('REPROZIP_PYTHON', sys.executable)]
    rpuz_python = [os.environ.get('REPROUNZIP_PYTHON', sys.executable)]

    # Can't match on the SignalWarning category here because of a Python bug
    # http://bugs.python.org/issue22543
    if raise_warnings:
        rpz_python.extend(['-W', 'error:signal'])
        rpuz_python.extend(['-W', 'error:signal'])

    if 'COVER' in os.environ:
        rpz_python.extend(['-m'] + os.environ['COVER'].split(' '))
        rpuz_python.extend(['-m'] + os.environ['COVER'].split(' '))

    reprozip_main = tests.parent / 'reprozip/reprozip/main.py'
    reprounzip_main = tests.parent / 'reprounzip/reprounzip/main.py'

    verbose = ['-v'] * 3
    rpz = rpz_python + [reprozip_main.absolute().path] + verbose
    rpuz = rpuz_python + [reprounzip_main.absolute().path] + verbose

    print("Command lines are:\n%r\n%r" % (rpz, rpuz))

    # ########################################
    # testrun /bin/echo
    #

    output = check_output(rpz + ['testrun', '/bin/echo', 'outputhere'])
    assert any(b' 1 | /bin/echo outputhere ' in l
               for l in output.splitlines())

    output = check_output(rpz + ['testrun', '-a', '/fake/path/echo',
                                 '/bin/echo', 'outputhere'])
    assert any(b' 1 | (/bin/echo) /fake/path/echo outputhere ' in l
               for l in output.splitlines())

    # ########################################
    # testrun multiple commands
    #

    check_call(rpz + ['testrun', 'bash', '-c',
                      'cat ../../../../../etc/passwd;'
                      'cd /var/lib;'
                      'cat ../../etc/group'])
    check_call(rpz + ['trace', '--overwrite',
                      'bash', '-c', 'cat /etc/passwd;echo'])
    check_call(rpz + ['trace', '--continue',
                      'sh', '-c', 'cat /etc/group;/usr/bin/id'])
    check_call(rpz + ['pack'])
    check_call(rpuz + ['graph', 'graph.dot'])
    check_call(rpuz + ['graph', 'graph2.dot', 'experiment.rpz'])

    sudo = ['sudo', '-E']  # -E to keep REPROZIP_USAGE_STATS

    # ########################################
    # 'simple' program: trace, pack, info, unpack
    #

    def check_simple(args, stream, infile=1):
        output = check_output(args, stream).splitlines()
        try:
            first = output.index(b"Read 6 bytes")
        except ValueError:
            stderr.write("output = %r\n" % output)
            raise
        if infile == 1:
            assert output[first + 1] == b"a = 29, b = 13"
            assert output[first + 2] == b"result = 42"
        else:  # infile == 2
            assert output[first + 1] == b"a = 25, b = 11"
            assert output[first + 2] == b"result = 36"

    # Build
    build('simple', ['simple.c'])
    # Trace
    check_call(rpz + ['trace', '--overwrite', '-d', 'rpz-simple',
                      './simple',
                      (tests / 'simple_input.txt').path,
                      'simple_output.txt'])
    orig_output_location = Path('simple_output.txt').absolute()
    assert orig_output_location.is_file()
    with orig_output_location.open(encoding='utf-8') as fp:
        assert fp.read().strip() == '42'
    orig_output_location.remove()
    # Read config
    with Path('rpz-simple/config.yml').open(encoding='utf-8') as fp:
        conf = yaml.safe_load(fp)
    other_files = set(Path(f).absolute() for f in conf['other_files'])
    expected = [Path('simple'), (tests / 'simple_input.txt')]
    assert other_files.issuperset([f.resolve() for f in expected])
    # Check input and output files
    inputs_outputs = conf['inputs_outputs']
    # Exactly one input: "arg1", "...simple_input.txt"
    # Output: 'arg2', "...simple_output.txt"
    # There might be more output files: the C coverage files
    found = 0
    for fdict in inputs_outputs:
        if Path(fdict['path']).name == b'simple_input.txt':
            assert fdict['name'] == 'arg1'
            assert fdict['read_by_runs'] == [0]
            assert not fdict.get('written_by_runs')
            found |= 0x01
        elif Path(fdict['path']).name == b'simple_output.txt':
            assert fdict['name'] == 'arg2'
            assert not fdict.get('read_by_runs')
            assert fdict['written_by_runs'] == [0]
            found |= 0x02
        else:
            # No other inputs
            assert not fdict.get('read_by_runs')
    assert found == 0x03
    # Pack
    check_call(rpz + ['pack', '-d', 'rpz-simple', 'simple.rpz'])
    Path('simple').rename('simple.orig')
    # Info
    check_call(rpuz + ['info', 'simple.rpz'])
    # Show files
    check_call(rpuz + ['showfiles', 'simple.rpz'])
    # Lists packages
    check_call(rpuz + ['installpkgs', '--summary', 'simple.rpz'])
    # Unpack directory
    check_call(rpuz + ['directory', 'setup', 'simple.rpz', 'simpledir'])
    # Run directory
    check_simple(rpuz + ['directory', 'run', 'simpledir'], 'err')
    output_in_dir = join_root(Path('simpledir/root'), orig_output_location)
    with output_in_dir.open(encoding='utf-8') as fp:
        assert fp.read().strip() == '42'
    # Delete with wrong command (should fail)
    p = subprocess.Popen(rpuz + ['chroot', 'destroy', 'simpledir'],
                         stderr=subprocess.PIPE)
    out, err = p.communicate()
    assert p.poll() != 0
    err = err.splitlines()
    assert b"Wrong unpacker used" in err[0]
    assert err[1].startswith(b"usage: ")
    # Delete directory
    check_call(rpuz + ['directory', 'destroy', 'simpledir'])
    # Unpack chroot
    check_call(sudo + rpuz + ['chroot', 'setup', '--bind-magic-dirs',
                              'simple.rpz', 'simplechroot'])
    try:
        output_in_chroot = join_root(Path('simplechroot/root'),
                                     orig_output_location)
        # Run chroot
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err')
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Get output file
        check_call(sudo + rpuz + ['chroot', 'download', 'simplechroot',
                                  'arg2:output1.txt'])
        with Path('output1.txt').open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Get random file
        check_call(sudo + rpuz + ['chroot', 'download', 'simplechroot',
                                  '%s:binc.bin' % (Path.cwd() / 'simple')])
        assert same_files('simple.orig', 'binc.bin')
        # Replace input file
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot',
                                  '%s:arg1' % (tests / 'simple_input2.txt')])
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot'])
        # Run again
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err', 2)
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '36'
        # Reset input file
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot', ':arg1'])
        # Run again
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err')
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Replace input file via path
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot',
                                  '%s:%s' % (tests / 'simple_input2.txt',
                                             tests / 'simple_input.txt')])
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot'])
        # Run again
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err', 2)
        # Delete with wrong command (should fail)
        p = subprocess.Popen(rpuz + ['directory', 'destroy', 'simplechroot'],
                             stderr=subprocess.PIPE)
        out, err = p.communicate()
        assert p.poll() != 0
        err = err.splitlines()
        assert b"Wrong unpacker used" in err[0]
        assert err[1].startswith(b"usage:")
    finally:
        # Delete chroot
        check_call(sudo + rpuz + ['chroot', 'destroy', 'simplechroot'])

    # Use reprounzip-vistrails with chroot
    check_call(sudo + rpuz + ['chroot', 'setup', '--bind-magic-dirs',
                              'simple.rpz', 'simplechroot_vt'])
    try:
        output_in_chroot = join_root(Path('simplechroot_vt/root'),
                                     orig_output_location)
        # Run using reprounzip-vistrails
        check_simple(
            sudo + rpuz_python +
            ['-m', 'reprounzip.plugins.vistrails', '1',
             'chroot', 'simplechroot_vt', '0',
             '--input-file', 'arg1:%s' % (tests / 'simple_input2.txt'),
             '--output-file', 'arg2:output_vt.txt'],
            'err', 2)
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '36'
    finally:
        # Delete chroot
        check_call(sudo + rpuz + ['chroot', 'destroy', 'simplechroot_vt'])

    if not (tests / 'vagrant').exists():
        check_call(['sudo', 'sh', '-c',
                    'mkdir %(d)s; chmod 777 %(d)s' % {'d': tests / 'vagrant'}])

    # Unpack Vagrant-chroot
    check_call(rpuz + ['vagrant', 'setup/create', '--memory', '512',
                       '--use-chroot', 'simple.rpz',
                       (tests / 'vagrant/simplevagrantchroot').path])
    print("\nVagrant project set up in simplevagrantchroot")
    try:
        if run_vagrant:
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               'arg2:voutput1.txt'])
            with Path('voutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Get random file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               '%s:binvc.bin' % (Path.cwd() / 'simple')])
            assert same_files('simple.orig', 'binvc.bin')
            # Replace input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               '%s:arg1' % (tests / 'simple_input2.txt')])
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out', 2)
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               'arg2:voutput2.txt'])
            with Path('voutput2.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '36'
            # Reset input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               ':arg1'])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               'arg2:voutput1.txt'])
            with Path('voutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Replace input file via path
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               '%s:%s' % (tests / 'simple_input2.txt',
                                          tests / 'simple_input.txt')])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out', 2)
            # Destroy
            check_call(rpuz + ['vagrant', 'destroy',
                               (tests / 'vagrant/simplevagrantchroot').path])
        elif interactive:
            print("Test and press enter")
            sys.stdin.readline()
    finally:
        if (tests / 'vagrant/simplevagrantchroot').exists():
            (tests / 'vagrant/simplevagrantchroot').rmtree()
    # Unpack Vagrant without chroot
    check_call(rpuz + ['vagrant', 'setup/create', '--dont-use-chroot',
                       'simple.rpz',
                       (tests / 'vagrant/simplevagrant').path])
    print("\nVagrant project set up in simplevagrant")
    try:
        if run_vagrant:
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrant').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               'arg2:woutput1.txt'])
            with Path('woutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Get random file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               '%s:binvs.bin' % (Path.cwd() / 'simple')])
            assert same_files('simple.orig', 'binvs.bin')
            # Replace input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrant').path,
                               '%s:arg1' % (tests / 'simple_input2.txt')])
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrant').path])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrant').path],
                         'out', 2)
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               'arg2:woutput2.txt'])
            with Path('woutput2.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '36'
            # Reset input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrant').path,
                               ':arg1'])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrant').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               'arg2:voutput1.txt'])
            with Path('voutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Destroy
            check_call(rpuz + ['vagrant', 'destroy',
                               (tests / 'vagrant/simplevagrant').path])
        elif interactive:
            print("Test and press enter")
            sys.stdin.readline()
    finally:
        if (tests / 'vagrant/simplevagrant').exists():
            (tests / 'vagrant/simplevagrant').rmtree()

    # Unpack Docker
    check_call(rpuz + ['docker', 'setup/create', 'simple.rpz', 'simpledocker'])
    print("\nDocker project set up in simpledocker")
    try:
        if run_docker:
            check_call(rpuz + ['docker', 'setup/build', 'simpledocker'])
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out')
            # Get output file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               'arg2:doutput1.txt'])
            with Path('doutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Get random file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               '%s:bind.bin' % (Path.cwd() / 'simple')])
            assert same_files('simple.orig', 'bind.bin')
            # Replace input file
            check_call(rpuz + ['docker', 'upload', 'simpledocker',
                               '%s:arg1' % (tests / 'simple_input2.txt')])
            check_call(rpuz + ['docker', 'upload', 'simpledocker'])
            check_call(rpuz + ['showfiles', 'simpledocker'])
            # Run again
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out', 2)
            # Get output file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               'arg2:doutput2.txt'])
            with Path('doutput2.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '36'
            # Reset input file
            check_call(rpuz + ['docker', 'upload', 'simpledocker',
                               ':arg1'])
            # Run again
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out')
            # Get output file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               'arg2:doutput1.txt'])
            with Path('doutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Replace input file via path
            check_call(rpuz + ['docker', 'upload', 'simpledocker',
                               '%s:%s' % (tests / 'simple_input2.txt',
                                          tests / 'simple_input.txt')])
            # Run again
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out', 2)
            # Destroy
            check_call(rpuz + ['docker', 'destroy', 'simpledocker'])
        elif interactive:
            print("Test and press enter")
            sys.stdin.readline()
    finally:
        if Path('simpledocker').exists():
            Path('simpledocker').rmtree()

    # ########################################
    # 'threads' program: testrun
    #

    # Build
    build('threads', ['threads.c'], ['-lpthread'])
    # Trace
    output = check_output(rpz + ['testrun', './threads'], 'err')
    assert any(b'successfully exec\'d /bin/./echo' in l
               for l in output.splitlines())

    # ########################################
    # 'threads2' program: testrun
    #

    # Build
    build('threads2', ['threads2.c'], ['-lpthread'])
    # Trace
    output = check_output(rpz + ['testrun', './threads2'], 'err')
    assert any(b'successfully exec\'d /bin/echo' in l
               for l in output.splitlines())

    # ########################################
    # 'segv' program: testrun
    #

    # Build
    build('segv', ['segv.c'])
    # Trace
    check_call(rpz + ['testrun', './segv'])

    # ########################################
    # 'exec_echo' program: trace, pack, run --cmdline
    #

    # Build
    build('exec_echo', ['exec_echo.c'])
    # Trace
    check_call(rpz + ['trace', '--overwrite',
                      './exec_echo', 'originalexecechooutput'])
    # Pack
    check_call(rpz + ['pack', 'exec_echo.rpz'])
    # Unpack chroot
    check_call(sudo + rpuz + ['chroot', 'setup',
                              'exec_echo.rpz', 'echochroot'])
    try:
        # Run original command-line
        output = check_output(sudo + rpuz + ['chroot', 'run',
                                             'echochroot'])
        assert output == b'originalexecechooutput\n'
        # Prints out command-line
        output = check_output(sudo + rpuz + ['chroot', 'run',
                                             'echochroot', '--cmdline'])
        assert any(b'./exec_echo originalexecechooutput' == s.strip()
                   for s in output.split(b'\n'))
        # Run with different command-line
        output = check_output(sudo + rpuz + [
            'chroot', 'run', 'echochroot',
            '--cmdline', './exec_echo', 'changedexecechooutput'])
        assert output == b'changedexecechooutput\n'
    finally:
        check_call(sudo + rpuz + ['chroot', 'destroy', 'echochroot'])

    # ########################################
    # 'exec_echo' program: testrun
    # This is built with -m32 so that we transition:
    #   python (x64) -> exec_echo (i386) -> echo (x64)
    #

    if sys.maxsize > 2 ** 32:
        # Build
        build('exec_echo32', ['exec_echo.c'], ['-m32'])
        # Trace
        check_call(rpz + ['testrun', './exec_echo32 42'])
    else:
        print("Can't try exec_echo transitions: not running on 64bits")

    # ########################################
    # Tracing non-existing program
    #

    check_call(rpz + ['testrun', './doesntexist'])

    # ########################################
    # 'connect' program: testrun
    #

    # Build
    build('connect', ['connect.c'])
    # Trace
    err = check_output(rpz + ['testrun', './connect'], 'err')
    err = err.split(b'\n')
    assert not any(b'program exited with non-zero code' in l for l in err)
    assert any(re.search(br'process connected to [0-9.]+:80', l)
               for l in err)

    # ########################################
    # 'vfork' program: testrun
    #

    # Build
    build('vfork', ['vfork.c'])
    # Trace
    err = check_output(rpz + ['testrun', './vfork'], 'err')
    err = err.split(b'\n')
    assert not any(b'program exited with non-zero code' in l for l in err)

    # ########################################
    # 'rename' program: trace
    #

    # Build
    build('rename', ['rename.c'])
    # Trace
    check_call(rpz + ['trace', '--overwrite', '-d', 'rename-trace',
                      './rename'])
    with Path('rename-trace/config.yml').open(encoding='utf-8') as fp:
        config = yaml.safe_load(fp)
    # Check that written files were logged
    database = Path.cwd() / 'rename-trace/trace.sqlite3'
    if PY3:
        # On PY3, connect() only accepts unicode
        conn = sqlite3.connect(str(database))
    else:
        conn = sqlite3.connect(database.path)
    conn.row_factory = sqlite3.Row
    rows = conn.execute(
        '''
        SELECT name FROM opened_files
        ''')
    files = set(Path(r[0]) for r in rows)
    for n in ('dir1/file', 'dir2/file', 'dir2/brokensymlink', 'dir2/symlink'):
        if (Path.cwd() / n) not in files:
            raise AssertionError("Missing file: %s" % (Path.cwd() / n))
    conn.close()
    # Check that created files won't be packed
    for f in config.get('other_files'):
        if 'dir2' in Path(f).parent.components:
            raise AssertionError("Created file shouldn't be packed: %s" %
                                 Path(f))

    # ########################################
    # Test shebang corner-cases
    #

    Path('a').symlink('b')
    with Path('b').open('w') as fp:
        fp.write('#!%s 0\nsome content\n' % (Path.cwd() / 'c'))
    Path('b').chmod(0o744)
    Path('c').symlink('d')
    with Path('d').open('w') as fp:
        fp.write('#!e')
    Path('d').chmod(0o744)
    with Path('e').open('w') as fp:
        fp.write('#!/bin/echo')
    Path('e').chmod(0o744)

    # Trace
    out = check_output(rpz + ['trace', '--overwrite', '-d', 'shebang-trace',
                              '--dont-identify-packages', './a', '1', '2'])
    out = out.splitlines()[0]
    assert out == ('e %s 0 ./a 1 2' % (Path.cwd() / 'c')).encode('ascii')

    # Check config
    with Path('shebang-trace/config.yml').open(encoding='utf-8') as fp:
        config = yaml.safe_load(fp)
    other_files = set(Path(f) for f in config['other_files']
                      if f.startswith('%s/' % Path.cwd()))

    # Check database
    database = Path.cwd() / 'shebang-trace/trace.sqlite3'
    if PY3:
        # On PY3, connect() only accepts unicode
        conn = sqlite3.connect(str(database))
    else:
        conn = sqlite3.connect(database.path)
    conn.row_factory = sqlite3.Row
    rows = conn.execute(
        '''
        SELECT name FROM opened_files
        ''')
    opened = [Path(r[0]) for r in rows
              if r[0].startswith('%s/' % Path.cwd())]
    rows = conn.execute(
        '''
        SELECT name, argv FROM executed_files
        ''')
    executed = [(Path(r[0]), r[1]) for r in rows
                if Path(r[0]).lies_under(Path.cwd())]

    print("other_files: %r" % sorted(other_files))
    print("opened: %r" % opened)
    print("executed: %r" % executed)

    assert other_files == set(Path.cwd() / p
                              for p in ('a', 'b', 'c', 'd', 'e'))
    assert opened == [Path.cwd() / 'c', Path.cwd() / 'e']
    assert executed == [(Path.cwd() / 'a', './a\x001\x002\x00')]

    # ########################################
    # Test old packages
    #

    old_packages = [
        ('simple-0.4.0.rpz',
         'https://drive.google.com/uc?export=download&id=0B3ucPz7GSthBVG4xZW1V'
         'eDhXNTQ'),
        ('simple-0.6.0.rpz',
         'https://drive.google.com/uc?export=download&id=0B3ucPz7GSthBbl9SUjhr'
         'cUdtbGs'),
        ('simple-0.7.1.rpz',
         'https://drive.google.com/uc?export=download&id=0B3ucPz7GSthBRGp2Vm5V'
         'QVpWOGs'),
    ]
    for name, url in old_packages:
        print("Testing old package %s" % name)
        f = Path(name)
        if not f.exists():
            download_file(url, f)
        # Info
        check_call(rpuz + ['info', name])
        # Show files
        check_call(rpuz + ['showfiles', name])
        # Lists packages
        check_call(rpuz + ['installpkgs', '--summary', name])
        # Unpack directory
        check_call(rpuz + ['directory', 'setup', name, 'simpledir'])
        # Run directory
        check_simple(rpuz + ['directory', 'run', 'simpledir'], 'err')
        output_in_dir = Path('simpledir/root/tmp')
        output_in_dir = output_in_dir.listdir('reprozip_*')[0]
        output_in_dir = output_in_dir / 'simple_output.txt'
        with output_in_dir.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Delete with wrong command (should fail)
        p = subprocess.Popen(rpuz + ['chroot', 'destroy', 'simpledir'],
                             stderr=subprocess.PIPE)
        out, err = p.communicate()
        assert p.poll() != 0
        err = err.splitlines()
        assert b"Wrong unpacker used" in err[0]
        assert err[1].startswith(b"usage: ")
        # Delete directory
        check_call(rpuz + ['directory', 'destroy', 'simpledir'])

    # ########################################
    # Copies back coverage report
    #

    coverage = Path('.coverage')
    if coverage.exists():
        coverage.copyfile(tests.parent / '.coverage.runpy')

Example 54

Project: django-nonrel Source File: makemessages.py
def make_messages(locale=None, domain='django', verbosity='1', all=False,
        extensions=None, symlinks=False, ignore_patterns=[], no_wrap=False,
        no_obsolete=False):
    """
    Uses the locale directory from the Django SVN tree or an application/
    project to process all
    """
    # Need to ensure that the i18n framework is enabled
    from django.conf import settings
    if settings.configured:
        settings.USE_I18N = True
    else:
        settings.configure(USE_I18N = True)

    from django.utils.translation import templatize

    invoked_for_django = False
    if os.path.isdir(os.path.join('conf', 'locale')):
        localedir = os.path.abspath(os.path.join('conf', 'locale'))
        invoked_for_django = True
        # Ignoring all contrib apps
        ignore_patterns += ['contrib/*']
    elif os.path.isdir('locale'):
        localedir = os.path.abspath('locale')
    else:
        raise CommandError("This script should be run from the Django SVN tree or your project or app tree. If you did indeed run it from the SVN checkout or your project or application, maybe you are just missing the conf/locale (in the django tree) or locale (for project and application) directory? It is not created automatically, you have to create it by hand if you want to enable i18n for your project or application.")

    if domain not in ('django', 'djangojs'):
        raise CommandError("currently makemessages only supports domains 'django' and 'djangojs'")

    if (locale is None and not all) or domain is None:
        message = "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1])
        raise CommandError(message)

    # We require gettext version 0.15 or newer.
    output = _popen('xgettext --version')[0]
    match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
    if match:
        xversion = (int(match.group('major')), int(match.group('minor')))
        if xversion < (0, 15):
            raise CommandError("Django internationalization requires GNU gettext 0.15 or newer. You are using version %s, please upgrade your gettext toolset." % match.group())

    languages = []
    if locale is not None:
        languages.append(locale)
    elif all:
        locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
        languages = [os.path.basename(l) for l in locale_dirs]

    wrap = no_wrap and '--no-wrap' or ''

    for locale in languages:
        if verbosity > 0:
            print "processing language", locale
        basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
        if not os.path.isdir(basedir):
            os.makedirs(basedir)

        pofile = os.path.join(basedir, '%s.po' % domain)
        potfile = os.path.join(basedir, '%s.pot' % domain)

        if os.path.exists(potfile):
            os.unlink(potfile)

        for dirpath, file in find_files(".", ignore_patterns, verbosity, symlinks=symlinks):
            file_base, file_ext = os.path.splitext(file)
            if domain == 'djangojs' and file_ext in extensions:
                if verbosity > 1:
                    sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
                src = open(os.path.join(dirpath, file), "rU").read()
                src = pythonize_re.sub('\n#', src)
                thefile = '%s.py' % file
                f = open(os.path.join(dirpath, thefile), "w")
                try:
                    f.write(src)
                finally:
                    f.close()
                cmd = (
                    'xgettext -d %s -L Perl %s --keyword=gettext_noop '
                    '--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
                    '--keyword=pgettext:1c,2 --keyword=npgettext:1c,2,3 '
                    '--from-code UTF-8 --add-comments=Translators -o - "%s"' % (
                        domain, wrap, os.path.join(dirpath, thefile)
                    )
                )
                msgs, errors = _popen(cmd)
                if errors:
                    os.unlink(os.path.join(dirpath, thefile))
                    if os.path.exists(potfile):
                        os.unlink(potfile)
                    raise CommandError(
                        "errors happened while running xgettext on %s\n%s" %
                        (file, errors))
                if msgs:
                    old = '#: ' + os.path.join(dirpath, thefile)[2:]
                    new = '#: ' + os.path.join(dirpath, file)[2:]
                    msgs = msgs.replace(old, new)
                    if os.path.exists(potfile):
                        # Strip the header
                        msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
                    else:
                        msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
                    f = open(potfile, 'ab')
                    try:
                        f.write(msgs)
                    finally:
                        f.close()
                os.unlink(os.path.join(dirpath, thefile))
            elif domain == 'django' and (file_ext == '.py' or file_ext in extensions):
                thefile = file
                orig_file = os.path.join(dirpath, file)
                if file_ext in extensions:
                    src = open(orig_file, "rU").read()
                    thefile = '%s.py' % file
                    f = open(os.path.join(dirpath, thefile), "w")
                    try:
                        f.write(templatize(src, orig_file[2:]))
                    finally:
                        f.close()
                if verbosity > 1:
                    sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
                cmd = (
                    'xgettext -d %s -L Python %s --keyword=gettext_noop '
                    '--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
                    '--keyword=ugettext_noop --keyword=ugettext_lazy '
                    '--keyword=ungettext_lazy:1,2 --keyword=pgettext:1c,2 '
                    '--keyword=npgettext:1c,2,3 --keyword=pgettext_lazy:1c,2 '
                    '--keyword=npgettext_lazy:1c,2,3 --from-code UTF-8 '
                    '--add-comments=Translators -o - "%s"' % (
                        domain, wrap, os.path.join(dirpath, thefile))
                )
                msgs, errors = _popen(cmd)
                if errors:
                    if thefile != file:
                        os.unlink(os.path.join(dirpath, thefile))
                    if os.path.exists(potfile):
                        os.unlink(potfile)
                    raise CommandError(
                        "errors happened while running xgettext on %s\n%s" %
                        (file, errors))
                if msgs:
                    if thefile != file:
                        old = '#: ' + os.path.join(dirpath, thefile)[2:]
                        new = '#: ' + orig_file[2:]
                        msgs = msgs.replace(old, new)
                    if os.path.exists(potfile):
                        # Strip the header
                        msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
                    else:
                        msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
                    f = open(potfile, 'ab')
                    try:
                        f.write(msgs)
                    finally:
                        f.close()
                if thefile != file:
                    os.unlink(os.path.join(dirpath, thefile))

        if os.path.exists(potfile):
            msgs, errors = _popen('msguniq %s --to-code=utf-8 "%s"' %
                                  (wrap, potfile))
            if errors:
                os.unlink(potfile)
                raise CommandError(
                    "errors happened while running msguniq\n%s" % errors)
            if os.path.exists(pofile):
                f = open(potfile, 'w')
                try:
                    f.write(msgs)
                finally:
                    f.close()
                msgs, errors = _popen('msgmerge %s -q "%s" "%s"' %
                                      (wrap, pofile, potfile))
                if errors:
                    os.unlink(potfile)
                    raise CommandError(
                        "errors happened while running msgmerge\n%s" % errors)
            elif not invoked_for_django:
                msgs = copy_plural_forms(msgs, locale, domain, verbosity)
            msgs = msgs.replace(
                "#. #-#-#-#-#  %s.pot (PACKAGE VERSION)  #-#-#-#-#\n" % domain, "")
            f = open(pofile, 'wb')
            try:
                f.write(msgs)
            finally:
                f.close()
            os.unlink(potfile)
            if no_obsolete:
                msgs, errors = _popen('msgattrib %s -o "%s" --no-obsolete "%s"' %
                                      (wrap, pofile, pofile))
                if errors:
                    raise CommandError(
                        "errors happened while running msgattrib\n%s" % errors)

Example 55

Project: RentCrawer Source File: RentCrawler.py
    def run(self):
        try:
            print "Crawler is running now."
            # creat database
            conn = sqlite3.connect(self.config.db_file)
            conn.text_factory = str
            cursor = conn.cursor()
            cursor.execute(
                'CREATE TABLE IF NOT EXISTS rent(id INTEGER PRIMARY KEY, title TEXT, url TEXT UNIQUE,itemtime timestamp, crawtime timestamp ,author TEXT, source TEXT,keyword TEXT,note TEXT)')
            cursor.close()
            start_time = RentCrawlerUtils.getTimeFromStr(self.config.start_time)
            print "searching data after date ", start_time

            cursor = conn.cursor()

            search_list = list(self.config.key_search_word_list)
            custom_black_list=list(self.config.custom_black_list)

            # New SMTH
            if self.config.newsmth_enable:
                newsmth_main_url = 'http://www.newsmth.net'
                newsmth_regex = r'<table class="board-list tiz"(?:\s|\S)*</td></tr></table>'
                #must do like this
                for keyword in search_list:
                    print '>>>>>>>>>>Search newsmth %s ...' % keyword
                    url = 'http://www.newsmth.net/nForum/s/article?ajax&au&b=HouseRent&t1=' + keyword
                    r = requests.get(url, headers=self.newsmth_headers)
                    if r.status_code == 200:
                        # print r.text
                        match = re.search(newsmth_regex, r.text)
                        if match:
                            try:
                                text = match.group(0)
                                soup = BeautifulSoup(text)
                                for tr in soup.find_all('tr')[1:]:
                                    title_element = tr.find_all(attrs={'class': 'title_9'})[0]
                                    title_text = title_element.text

                                    #exclude what in blacklist
                                    if RentCrawlerUtils.isInBalckList(custom_black_list, title_text):
                                        continue
                                    if RentCrawlerUtils.isInBalckList(self.smth_black_list, title_text):
                                        continue
                                    time_text = tr.find_all(attrs={'class': 'title_10'})[0].text  #13:47:32或者2015-05-12

                                    #data ahead of the specific date
                                    if RentCrawlerUtils.getTimeFromStr(time_text) < start_time:
                                        continue
                                    link_text = newsmth_main_url + title_element.find_all('a')[0].get('href').replace(
                                        '/nForum/article/', '/nForum/#!article/')
                                    author_text = tr.find_all(attrs={'class': 'title_12'})[0].find_all('a')[0].text
                                    try:
                                        cursor.execute(
                                            'INSERT INTO rent(id,title,url,itemtime,crawtime,author,source,keyword,note) VALUES(NULL,?,?,?,?,?,?,?,?)',
                                            [title_text, link_text, RentCrawlerUtils.getTimeFromStr(time_text),
                                             datetime.datetime.now(), author_text, keyword,
                                             'newsmth', ''])
                                        print 'add new data:', title_text, time_text, author_text, link_text, keyword
                                        #/nForum/article/HouseRent/225839 /nForum/#!article/HouseRent/225839
                                    except sqlite3.Error, e:
                                        print 'data exists:', title_text, link_text, e
                            except Exception, e:
                                print "error match table", e
                        else:
                            print "no data"
                    else:
                        print 'request url error %s -status code: %s:' % (url, r.status_code)
            else:
                print 'newsmth not enabled'
            # end newsmth

            #Douban: Beijing Rent,Beijing Rent Douban
            if self.config.douban_enable:
                print 'douban'
                douban_url = ['http://www.douban.com/group/search?group=35417&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=26926&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=262626&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=252218&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=279962&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=257523&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=232413&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=135042&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=252091&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=10479&cat=1013&sort=time&q=',
                              'http://www.douban.com/group/search?group=221207&cat=1013&sort=time&q=']
                douban_url_name = (u'Douban-北京租房', u'Douban-北京租房豆瓣', u'Douban-北京无中介租房',
                                   u'Douban-北京租房专家', u'Douban-北京租房(非中介)', u'Douban-北京租房房东联盟(中介勿扰) ',
                                   u'Douban-北京租房(密探)', u'Douban-北漂爱合租(租房)', u'Douban-豆瓣♥北京♥租房',
                                   u'Douban-吃喝玩乐在北京', u'Douban-北京CBD租房')

                for i in range(len(list(douban_url))):
                    print 'start i->',i
                    for j in range(len(search_list)):
                        keyword = search_list[j]
                        print 'start i->j %s->%s %s' %(i,j,keyword)
                        print '>>>>>>>>>>Search %s  %s ...' % (douban_url_name[i].encode('utf-8'), keyword)
                        url_link = douban_url[i] + keyword
                        r = requests.get(url_link, headers=self.douban_headers)
                        if r.status_code == 200:
                            try:
                                if i==0:
                                    self.douban_headers['Cookie']=r.cookies
                                soup = BeautifulSoup(r.text)
                                table = soup.find_all(attrs={'class': 'olt'})[0]
                                for tr in table.find_all('tr'):
                                    td = tr.find_all('td')

                                    title_element = td[0].find_all('a')[0]
                                    title_text = title_element.get('title')
                                    #exclude what in blacklist
                                    if RentCrawlerUtils.isInBalckList(custom_black_list, title_text):
                                        continue
                                    if RentCrawlerUtils.isInBalckList(self.douban_black_list, title_text):
                                        continue
                                    time_text = td[1].get('title')

                                    #data ahead of the specific date
                                    if RentCrawlerUtils.getTimeFromStr(time_text) < start_time:
                                        continue
                                    link_text = title_element.get('href');

                                    reply_count = td[2].find_all('span')[0].text
                                    try:
                                        cursor.execute(
                                            'INSERT INTO rent(id,title,url,itemtime,crawtime,author,source,keyword,note) VALUES(NULL,?,?,?,?,?,?,?,?)',
                                            [title_text, link_text, RentCrawlerUtils.getTimeFromStr(time_text),
                                             datetime.datetime.now(), '', keyword,
                                             douban_url_name[i], reply_count])
                                        print 'add new data:', title_text, time_text, reply_count, link_text, keyword
                                    except sqlite3.Error, e:
                                        print 'data exists:', title_text, link_text, e
                            except Exception, e:
                                print "error match table", e
                        else:
                            print 'request url error %s -status code: %s:' % (url_link, r.status_code)
                        time.sleep(self.config.douban_sleep_time)
                        #print 'end i->',i
            else:
                print 'douban not enabled'
            #end douban

            cursor.close()

            cursor = conn.cursor()
            cursor.execute('SELECT * FROM rent ORDER BY itemtime DESC ,crawtime DESC')
            values = cursor.fetchall()

            #export to html file
            file = open(self.config.result_file, 'w')
            with file:
                file.writelines('<html><head>')
                file.writelines('<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>')
                file.writelines('<title>Rent Crawer Result</title></head><body>')
                file.writelines('<table rules=all>')
                file.writelines('<h1>' + prog_info + '</h1>')
                file.writelines(
                    '<tr><td>索引Index</td><td>标题Title</td><td>链接Link</td><td>发帖时间Page Time</td><td>抓取时间Crawl Time</td><td>作者Author</td><td>关键字Keyword</td><td>来源Source</td></tr>')
                for row in values:
                    file.write('<tr>')
                    for member in row:
                        file.write('<td>')
                        member = str(member)
                        if 'http' in member:
                            file.write('<a href="' + member + '" target="_black">' + member + '</a>')
                        else:
                            file.write(member)
                        file.write('</td>')
                    file.writelines('</tr>')
                file.writelines('</table>')
                file.writelines('</body></html>')
            cursor.close()
        except Exception, e:
            print "Error:", e.message
        finally:
            conn.commit()
            conn.close()
            print "Search Finish,Please open result.html to view result"

Example 56

Project: xmldataset Source File: __init__.py
    def _expand_profile(self, profile_input):
        """Expands the supplied profile to a python data structure"""

        # ------------------------------------------------------------------------------
        #   Declare the Indentation History as starting at 0
        # ------------------------------------------------------------------------------
        indentation_history = [0]

        # ------------------------------------------------------------------------------
        #   Profile holders
        # ------------------------------------------------------------------------------
        complex_profile = {}
        complex_profile_history = [complex_profile]
        current_profile_position = complex_profile_history[-1]

        # ------------------------------------------------------------------------------
        #   Capture available tokens from the profile input using a carriage return
        #   as a separator
        # ------------------------------------------------------------------------------
        tokens = profile_input.split('\n')

        # ------------------------------------------------------------------------------
        #   Process each token
        # ------------------------------------------------------------------------------
        for token in tokens:

            # ------------------------------------------------------------------------------
            #   Declare holders for the length_indentation and token_data
            # ------------------------------------------------------------------------------
            length_indentation = None
            token_data = None

            # ------------------------------------------------------------------------------
            #   Attempt to match token with indentation
            # ------------------------------------------------------------------------------
            match = re.search('(\s+)(.*)', token) # pylint: disable=W1401

            # ------------------------------------------------------------------------------
            #   If a match is found capture the indentation and the token_data
            # ------------------------------------------------------------------------------
            if match:
                length_indentation = len(match.group(1))
                token_data = match.group(2).strip()
            # ------------------------------------------------------------------------------
            #   Otherwise mark the indentation as 0 and remove any carriage returns from
            #   the token_data
            # ------------------------------------------------------------------------------
            else:
                length_indentation = 0
                token_data = token.strip()

            # ------------------------------------------------------------------------------
            #   If token data is available
            # ------------------------------------------------------------------------------
            if token_data:

                # ------------------------------------------------------------------------------
                #   Store the previous_indentation information
                # ------------------------------------------------------------------------------
                previous_indentation = indentation_history[-1]

                # ------------------------------------------------------------------------------
                #    If the indentation has increased, store the indentation in the history
                # ------------------------------------------------------------------------------
                if length_indentation > previous_indentation:
                    indentation_history.append(length_indentation)

                # ------------------------------------------------------------------------------
                #    Otherwise if the indentation has decreased
                # ------------------------------------------------------------------------------
                elif previous_indentation > length_indentation:

                    # ------------------------------------------------------------------------------
                    #    Step back through the indentation
                    # ------------------------------------------------------------------------------
                    while previous_indentation > length_indentation:
                        indentation_history.pop()
                        previous_indentation = indentation_history[-1]

                        # ------------------------------------------------------------------------------
                        #    ... And the associated history
                        # ------------------------------------------------------------------------------
                        complex_profile_history.pop()
                        current_profile_position = complex_profile_history[-1]

                # ------------------------------------------------------------------------------
                #    If the token contains a value ( signified by an = sign )
                # ------------------------------------------------------------------------------
                if '=' in token_data:

                    # ------------------------------------------------------------------------------
                    #    Capture the key and the record_holder
                    # ------------------------------------------------------------------------------
                    key, record_holder = token_data.split('=')

                    # ------------------------------------------------------------------------------
                    #    Remove leading and ending space using strip() for the key and record_holder
                    # ------------------------------------------------------------------------------
                    key = key.strip()
                    record_holder = record_holder.strip()

                    # ------------------------------------------------------------------------------
                    #    Check for an Ignore marker
                    # ------------------------------------------------------------------------------
                    if record_holder == '__IGNORE__':

                        if key not in current_profile_position:
                            current_profile_position[key] = {}

                        current_profile_position[key]['__IGNORE__'] = 1

                    # ------------------------------------------------------------------------------
                    #    Otherwise continue
                    # ------------------------------------------------------------------------------
                    else:

                        # ------------------------------------------------------------------------------
                        #    Split unprocessed records
                        # ------------------------------------------------------------------------------
                        records_unprocessed = record_holder.split(' ')

                        # ------------------------------------------------------------------------------
                        #    Capture New Dataset markers where available
                        # ------------------------------------------------------------------------------
                        if key == '__NEW_DATASET__':
                            current_profile_position['__NEW_DATASET__'] = records_unprocessed

                        # ------------------------------------------------------------------------------
                        #    Otherwise Capture External Values where available
                        # ------------------------------------------------------------------------------
                        elif key == '__EXTERNAL_VALUE__':
                            current_profile_position['__EXTERNAL_VALUE__'] = records_unprocessed

                        # ------------------------------------------------------------------------------
                        #    Otherwise Capture New External Values where available
                        # ------------------------------------------------------------------------------
                        elif key == '__NEW_EXTERNAL_VALUE_HOLDER__':
                            current_profile_position['__NEW_EXTERNAL_VALUE_HOLDER__'] = records_unprocessed

                        # ------------------------------------------------------------------------------
                        #    Otherwise Capture Processing markers where available
                        # ------------------------------------------------------------------------------
                        elif key == '__DATASET_PROCESSING__':
                            current_profile_position['__DATASET_PROCESSING__'] = records_unprocessed

                        # ------------------------------------------------------------------------------
                        #    Otherwise Capture Always Follow markers where available
                        # ------------------------------------------------------------------------------
                        elif key == '__ALWAYS_FOLLOW__':
                            current_profile_position['__ALWAYS_FOLLOW__'] = records_unprocessed

                        # ------------------------------------------------------------------------------
                        #    Otherwise ....
                        # ------------------------------------------------------------------------------
                        else:

                            # ------------------------------------------------------------------------------
                            #    Add an order processing sequence to the profile, create where necessary
                            #    or append
                            # ------------------------------------------------------------------------------
                            if '__order__' not in current_profile_position:
                                current_profile_position['__order__'] = [key]
                            else:
                                current_profile_position['__order__'].append(key)

                            # ------------------------------------------------------------------------------
                            #    Process records_unprocessed
                            # ------------------------------------------------------------------------------
                            for record_unprocessed in records_unprocessed:

                                # ------------------------------------------------------------------------------
                                #    Add the key
                                # ------------------------------------------------------------------------------
                                if key not in current_profile_position:
                                    current_profile_position[key] = {}

                                # ------------------------------------------------------------------------------
                                #    Create the record holder or append a new record
                                # ------------------------------------------------------------------------------
                                if '__record__' not in current_profile_position[key]:
                                    current_profile_position[key]['__record__'] = [{}]
                                else:
                                    current_profile_position[key]['__record__'].append({})

                                # ------------------------------------------------------------------------------
                                #    Process each record
                                # ------------------------------------------------------------------------------
                                for record in record_unprocessed.split(','):
                                    record_key, record_value = record.split(':')
                                    current_profile_position[key]['__record__'][-1][record_key] = record_value

                # ------------------------------------------------------------------------------
                #    If the token does not contain an = sign
                # ------------------------------------------------------------------------------
                else:

                    # ------------------------------------------------------------------------------
                    #    Create a named dictionary
                    # ------------------------------------------------------------------------------
                    holder = {}

                    # ------------------------------------------------------------------------------
                    #    Store the named dictionary as the current token
                    # ------------------------------------------------------------------------------
                    current_profile_position[token_data] = holder

                    # ------------------------------------------------------------------------------
                    #    Add an order processing sequence
                    # ------------------------------------------------------------------------------
                    if '__order__' not in current_profile_position:
                        current_profile_position['__order__'] = [token_data]
                    else:
                        current_profile_position['__order__'].append(token_data)

                    # ------------------------------------------------------------------------------
                    #    Update the current position to the named dict
                    # ------------------------------------------------------------------------------
                    current_profile_position = holder

                    # ------------------------------------------------------------------------------
                    #    Add to the current history
                    # ------------------------------------------------------------------------------
                    complex_profile_history.append(holder)

        # ------------------------------------------------------------------------------
        #    Return the complex profile
        # ------------------------------------------------------------------------------
        return complex_profile

Example 57

Project: stonix Source File: NetworkTuning.py
    def fixSolaris1(self):
        sfc = {"ndd -set /dev/tcp tcp_rev_src_routes": "0",
               "ndd -set /dev/tcp tcp_conn_req_max_q0": "4096",
               "ndd -set /dev/tcp tcp_conn_req_max_q": "1024",
               "ndd -set /dev/tcp tcp_extra_priv_ports_add": "6112",
               "ndd -set /dev/arp arp_cleanup_interval": "60000",
               "ndd -set /dev/ip ip_forward_src_routed": "0",
               "ndd -set /dev/ip ip6_forward_src_routed": "0",
               "ndd -set /dev/ip ip_forward_directed_broadcasts": "0",
               "ndd -set /dev/ip ip_respond_to_timestamp": "0",
               "ndd -set /dev/ip ip_respond_to_timestamp_broadcast": "0",
               "ndd -set /dev/ip ip_respond_to_address_mask_broadcast": "0",
               "ndd -set /dev/ip ip_respond_to_echo_broadcast": "0",
               "ndd -set /dev/ip ip_ire_arp_interval": "60000",
               "ndd -set /dev/ip ip_ignore_redirect": "1",
               "ndd -set /dev/ip ip6_ignore_redirect": "1",
               "ndd -set /dev/ip ip_strict_dst_multihoming": "1",
               "ndd -set /dev/ip ip6_strict_dst_multihoming": "1",
               "ndd -set /dev/ip ip_send_redirects": "0"}
        path = "/etc/init.d/S70ndd-nettune"
        tmppath = "/etc/init.d/S70ndd-nettune.tmp"
        success = True
        # file exists...
        if os.path.exists(path):
            contents = readFile(path, self.logger)
            # but it's blank do the following:
            if not contents:
                tempstring = ""
                for key in sfc:
                    tempstring += key + " " + sfc[key] + "\n"
            # it's not blank do the following
            else:
                if not checkPerms(path, [0, 3, 484], self.logger):
                    self.iditerator += 1
                    myid = iterate(self.iditerator, self.rulenumber)
                    if not setPerms(path, [0, 3, 484], self.logger,
                                    self.statechglogger, myid):
                        self.detailedresults = "Couldn\'t'set the permissions \
                                                                   on: " + path
                        self.logger.log(LogPriority.DEBUG,
                                        self.detailedresults)
                        success = False
                tempstring = ""
                for key in sfc:
                    found = False
                    newcontents = []
                    keysplit = key.split()
                    for line in contents:
                        linesplit = line.split()
                        if re.search(keysplit[3] + "?", line):
                            if len(linesplit) != 5:
                                continue
                            else:
                                if linesplit[0].strip() != "ndd":
                                    continue
                                elif linesplit[1].strip() != "-set":
                                    continue
                                elif linesplit[2].strip() != \
                                        keysplit[2].strip():
                                    continue
                                elif linesplit[4].strip() != sfc[key]:
                                    continue
                                else:
                                    found = True
                                    newcontents.append(line)
                        else:
                            newcontents.append(line)
                    if not found:
                        newcontents.append(key + " " + sfc[key] + "\n")
                    contents = newcontents
                for line in contents:
                    tempstring += line
            self.iditerator += 1
            myid = iterate(self.iditerator, self.rulenumber)
            if writeFile(tmppath, tempstring, self.logger):
                event = {"eventtype": "conf",
                         "filepath": path}
                self.statechglogger.recordchgevent(myid, event)
                self.statechglogger.recordfilechange(path, tmppath, myid)
                os.rename(tmppath, path)
                os.chown(path, 0, 3)
                os.chmod(path, 484)
                resetsecon(path)
            else:
                success = False
        # file doesn't exist, just write all directives to file
        else:
            try:
                self.iditerator += 1
                myid = iterate(self.iditerator, self.rulenumber)
                file(path, "w+")
                event = {"eventtype": "creation",
                         "filepath": path,
                         "id": myid}
                self.statechglogger.recordchgevent(myid, event)
                tempstring = ""
                for key in sfc:
                    tempstring += key + " " + sfc[key] + "\n"
                if writeFile(path, tempstring, self.logger):
                    if not checkPerms(path, [0, 3, 484], self.logger):
                        os.chown(path, 0, 3)
                        os.chmod(path, 484)
                else:
                    success = False
            except IOError:
                self.detailedresults = "unable to open the specified file"
                self.detailedresults += traceback.format_exc()
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False

        sympath = "/etc/init.d/S70ndd-nettune"
        path = "/etc/rc1.d/K70ndd-nettune"
        cmd = ["ln", "-s", sympath, path]

        # symbolic link doesn't exist
        if not os.path.exists(path):
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to create link between "
                self.detailedresults += sympath + " and " + path
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
        # file exists but isn't a link
        elif not os.path.islink(path):
            os.remove("/etc/rc1.d/K70ndd-nettune")
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to create link between "
                self.detailedresults += sympath + " and " + path
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
        # check to see if link exists but doesn't point to the right file
        else:
            cmdls = ["ls", "-l", path]
            if not self.cmdhelper.executeCommand(cmdls):
                self.detailedresults = "unable to run command: ls -l " + path
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
            else:
                output = self.cmdhelper.getOutput()
                error = self.cmdhelper.getError()
                if output:
                    if output[-2] == "->" and output[-1] != sympath:
                        os.remove(path)
                        if not self.cmdhelper.executeCommand(cmd):
                            self.detailedresults = "unable to create link"
                            self.logger.log(LogPriority.DEBUG,
                                            self.detailedresults)
                            success = False
                elif error:
                    self.logger.log(LogPriority.DEBUG, error)
                    success = False
        # symbolic link doesn't exist
        path = "/etc/rc2.d/S70ndd-nettune"
        cmd = ["ln", "-s", sympath, path]
        if not os.path.exists(path):
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to create link between "
                self.detailedresults += sympath + " and " + path + "\n"
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
        # file exists but isn't a link
        elif not os.path.islink(path):
            os.remove(path)
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to create link between "
                self.detailedresults += sympath + " and " + path + "\n"
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
        # check to see if link exists but doesn't point to the right file
        else:
            cmd = ["ls", "-l", path]
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to run command: ls -l " + path
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
            else:
                output = self.cmdhelper.getOutput()
                error = self.cmdhelper.getError()
                if output:
                    if output[-2] == "->" and output[-1] != sympath:
                        os.remove(path)
                        if not self.cmdhelper.executeCommand(cmd):
                            self.detailedresults = "unable to create link"
                            self.logger.log(LogPriority.DEBUG,
                                            self.detailedresults)
                            success = False
                elif error:
                    self.logger.log(LogPriority.DEBUG, error)
                    success = False
        # symbolic link doesn't exist
        path = "/etc/rcS.d/K70ndd-nettune"
        cmd = ["ln", "-s", sympath, path]
        if not os.path.exists(path):
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to create link between "
                self.detailedresults += sympath + " and " + path
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
        # file exists but isn't a link
        elif not os.path.islink(path):
            os.remove(path)
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to create link between "
                self.detailedresults += sympath + " and " + path
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
        # check to see if link exists but doesn't point to the right file
        else:
            cmd = ["ls", "-l", path]
            if not self.cmdhelper.executeCommand(cmd):
                self.detailedresults = "unable to run command: ls -l " + path
                self.logger.log(LogPriority.DEBUG, self.detailedresults)
                success = False
            else:
                output = self.cmdhelper.getOutput()
                error = self.cmdhelper.getError()
                if output:
                    if output[-2] == "->" and output[-1] != sympath:
                        if not self.cmdhelper.executeCommand(cmd):
                            self.detailedresults = "unable to create link"
                            self.logger.log(LogPriority.DEBUG,
                                            self.detailedresults)
                            success = False
                elif error:
                    self.logger.log(LogPriority.DEBUG, [error])
                    success = False
        return success

Example 58

Project: youtube-dl Source File: rtmp.py
Function: real_download
    def real_download(self, filename, info_dict):
        def run_rtmpdump(args):
            start = time.time()
            resume_percent = None
            resume_downloaded_data_len = None
            proc = subprocess.Popen(args, stderr=subprocess.PIPE)
            cursor_in_new_line = True
            proc_stderr_closed = False
            while not proc_stderr_closed:
                # read line from stderr
                line = ''
                while True:
                    char = proc.stderr.read(1)
                    if not char:
                        proc_stderr_closed = True
                        break
                    if char in [b'\r', b'\n']:
                        break
                    line += char.decode('ascii', 'replace')
                if not line:
                    # proc_stderr_closed is True
                    continue
                mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
                if mobj:
                    downloaded_data_len = int(float(mobj.group(1)) * 1024)
                    percent = float(mobj.group(2))
                    if not resume_percent:
                        resume_percent = percent
                        resume_downloaded_data_len = downloaded_data_len
                    time_now = time.time()
                    eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
                    speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
                    data_len = None
                    if percent > 0:
                        data_len = int(downloaded_data_len * 100 / percent)
                    self._hook_progress({
                        'status': 'downloading',
                        'downloaded_bytes': downloaded_data_len,
                        'total_bytes_estimate': data_len,
                        'tmpfilename': tmpfilename,
                        'filename': filename,
                        'eta': eta,
                        'elapsed': time_now - start,
                        'speed': speed,
                    })
                    cursor_in_new_line = False
                else:
                    # no percent for live streams
                    mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
                    if mobj:
                        downloaded_data_len = int(float(mobj.group(1)) * 1024)
                        time_now = time.time()
                        speed = self.calc_speed(start, time_now, downloaded_data_len)
                        self._hook_progress({
                            'downloaded_bytes': downloaded_data_len,
                            'tmpfilename': tmpfilename,
                            'filename': filename,
                            'status': 'downloading',
                            'elapsed': time_now - start,
                            'speed': speed,
                        })
                        cursor_in_new_line = False
                    elif self.params.get('verbose', False):
                        if not cursor_in_new_line:
                            self.to_screen('')
                        cursor_in_new_line = True
                        self.to_screen('[rtmpdump] ' + line)
            proc.wait()
            if not cursor_in_new_line:
                self.to_screen('')
            return proc.returncode

        url = info_dict['url']
        player_url = info_dict.get('player_url')
        page_url = info_dict.get('page_url')
        app = info_dict.get('app')
        play_path = info_dict.get('play_path')
        tc_url = info_dict.get('tc_url')
        flash_version = info_dict.get('flash_version')
        live = info_dict.get('rtmp_live', False)
        conn = info_dict.get('rtmp_conn')
        protocol = info_dict.get('rtmp_protocol')
        real_time = info_dict.get('rtmp_real_time', False)
        no_resume = info_dict.get('no_resume', False)
        continue_dl = self.params.get('continuedl', True)

        self.report_destination(filename)
        tmpfilename = self.temp_name(filename)
        test = self.params.get('test', False)

        # Check for rtmpdump first
        if not check_executable('rtmpdump', ['-h']):
            self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
            return False

        # Download using rtmpdump. rtmpdump returns exit code 2 when
        # the connection was interrupted and resuming appears to be
        # possible. This is part of rtmpdump's normal usage, AFAIK.
        basic_args = [
            'rtmpdump', '--verbose', '-r', url,
            '-o', tmpfilename]
        if player_url is not None:
            basic_args += ['--swfVfy', player_url]
        if page_url is not None:
            basic_args += ['--pageUrl', page_url]
        if app is not None:
            basic_args += ['--app', app]
        if play_path is not None:
            basic_args += ['--playpath', play_path]
        if tc_url is not None:
            basic_args += ['--tcUrl', tc_url]
        if test:
            basic_args += ['--stop', '1']
        if flash_version is not None:
            basic_args += ['--flashVer', flash_version]
        if live:
            basic_args += ['--live']
        if isinstance(conn, list):
            for entry in conn:
                basic_args += ['--conn', entry]
        elif isinstance(conn, compat_str):
            basic_args += ['--conn', conn]
        if protocol is not None:
            basic_args += ['--protocol', protocol]
        if real_time:
            basic_args += ['--realtime']

        args = basic_args
        if not no_resume and continue_dl and not live:
            args += ['--resume']
        if not live and continue_dl:
            args += ['--skip', '1']

        args = [encodeArgument(a) for a in args]

        self._debug_cmd(args, exe='rtmpdump')

        RD_SUCCESS = 0
        RD_FAILED = 1
        RD_INCOMPLETE = 2
        RD_NO_CONNECT = 3

        retval = run_rtmpdump(args)

        if retval == RD_NO_CONNECT:
            self.report_error('[rtmpdump] Could not connect to RTMP server.')
            return False

        while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
            prevsize = os.path.getsize(encodeFilename(tmpfilename))
            self.to_screen('[rtmpdump] %s bytes' % prevsize)
            time.sleep(5.0)  # This seems to be needed
            args = basic_args + ['--resume']
            if retval == RD_FAILED:
                args += ['--skip', '1']
            args = [encodeArgument(a) for a in args]
            retval = run_rtmpdump(args)
            cursize = os.path.getsize(encodeFilename(tmpfilename))
            if prevsize == cursize and retval == RD_FAILED:
                break
            # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
            if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024:
                self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
                retval = RD_SUCCESS
                break
        if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
            fsize = os.path.getsize(encodeFilename(tmpfilename))
            self.to_screen('[rtmpdump] %s bytes' % fsize)
            self.try_rename(tmpfilename, filename)
            self._hook_progress({
                'downloaded_bytes': fsize,
                'total_bytes': fsize,
                'filename': filename,
                'status': 'finished',
            })
            return True
        else:
            self.to_stderr('\n')
            self.report_error('rtmpdump exited with code %d' % retval)
            return False

Example 59

Project: gitian-builder Source File: gitian_updater.py
def run():
    full_prog = sys.argv[0]

    prog = os.path.basename(full_prog)

    parser = argparse.ArgumentParser(description='Download a verify a gitian package')
    parser.add_argument('-u', '--url', metavar='URL', type=str, nargs='+', required=False,
                       help='one or more URLs where the package can be found')
    parser.add_argument('-c', '--config', metavar='CONF', type=str, required=not have_injected_config,
                       help='a configuration file')
    parser.add_argument('-d', '--dest', metavar='DEST', type=str, required=False,
                       help='the destination directory for unpacking')
    parser.add_argument('-q', '--quiet', action='append_const', const=1, default=[], help='be quiet')
    parser.add_argument('-f', '--force', action='store_true', help='force downgrades and such')
    parser.add_argument('-n', '--dryrun', action='store_true', help='do not actually copy to destination')
    parser.add_argument('-m', '--customize', metavar='OUTPUT', type=str, help='generate a customized version of the script with the given config')
    parser.add_argument('-w', '--wait', type=float, metavar='HOURS', help='observe a waiting period or use zero for no waiting')
    parser.add_argument('-g', '--gpg', metavar='GPG', type=str, help='path to GnuPG')
    parser.add_argument('-p', '--post', metavar='COMMAND', type=str, help='Run after a successful install')

    args = parser.parse_args()

    quiet = len(args.quiet)

    if args.config:
        f = file(args.config, 'r')
        if args.customize:
            s = file(full_prog, 'r')
            script = s.read()
            s.close()
            config = f.read()
            script = script.replace(inject_config_string, config)
            s = file(args.customize, 'w')
            s.write(script)
            s.close()
            os.chmod(args.customize, 0750)
            sys.exit(0)

        config = yaml.safe_load(f)
        f.close()
    else:
        config = yaml.safe_load(injected_config)

    dest_path = args.dest

    if not dest_path:
        parser.error('argument -d/--dest is required unless -m is specified')

    if args.wait is not None:
        config['waiting_period'] = args.wait


    gpg_path = args.gpg

    if not gpg_path:
        gpg_path = 'gpg'

    rsses = []

    if args.url:
        urls = [{ 'url' : url, 'version_url' : None} for url in args.url]
    else:
        urls = config.get('urls')
        if not urls:
            parser.error('argument -u/--url is required since config does not specify it')
        if config.has_key('rss'):
            rsses = config['rss']

    # TODO: rss, atom, etc.

    old_manifest = None

    if path.exists(dest_path):
        files = os.listdir(dest_path)
        if path.dirname(full_prog) == dest_path:
            files.remove(prog)

        if not files.count('.gitian-manifest') and len(files) > 0:
            print>>sys.stderr, "destination already exists, no .gitian-manifest and directory not empty. Please empty destination."
            sys.exit(1)
        f = file(os.path.join(dest_path,'.gitian-manifest'), 'r')
        old_manifest = yaml.load(f, OrderedDictYAMLLoader)
        f.close()

    if config.get('waiting_period', 0) > 0:
        waiting_file = path.join(dest_path, '.gitian-waiting')
        if path.exists(waiting_file):
            f = file(waiting_file, 'r')
            waiting = yaml.load(f)
            f.close()
            wait_start = waiting['time']
            out_manifest = waiting['out_manifest']
            waiting_path = waiting['waiting_path']
            wait_time = wait_start + config['waiting_period'] * 3600 - time.time()
            if wait_time > 0:
                print>>sys.stderr, "Waiting another %.2f hours before applying update in %s"%(wait_time / 3600, waiting_path)
                sys.exit(100)
            os.remove(waiting_file)
            if args.dryrun:
                print>>sys.stderr, "Dry run, not copying"
            else:
                copy_to_destination(path.join(waiting_path, 'unpack'), dest_path, out_manifest, old_manifest)
                if args.post:
                    os.system(args.post)
                if quiet == 0:
                    print>>sys.stderr, "Copied from waiting area to destination"
            shutil.rmtree(waiting_path)
            sys.exit(0)

    temp_dir = tempfile.mkdtemp('', prog)

    atexit.register(remove_temp, temp_dir)

    package_file = path.join(temp_dir, 'package')

    downloaded = False
    checked = False

    if rsses:
        import libxml2
        for rss in rsses:
            try:
                feed = libxml2.parseDoc(urllib2.urlopen(rss['url']).read())
                url = None
                release = None

                # Find the first matching node
                for node in feed.xpathEval(rss['xpath']):
                    m = re.search(rss['pattern'], str(node))
                    if m:
                        if len(m.groups()) > 0:
                            release = m.group(1)
                        url = str(node)
                        break

                # Make sure it's a new release
                if old_manifest and release == old_manifest['release'] and not args.force:
                    checked = True
                else:
                    try:
                        download(url, package_file)
                        downloaded = True
                        break
                    except:
                        print>>sys.stderr, "could not download from %s, trying next rss"%(url)
                        pass
            except:
                print>>sys.stderr, "could read not from rss %s"%(rss)
                pass

    if not downloaded:
        for url in urls:
            try:
                release = None
                if url['version_url']:
                    f = urllib2.urlopen(url['version_url'])
                    release = f.read(100).strip()
                    f.close()
                if old_manifest and release == old_manifest['release'] and not args.force:
                    checked = True
                else:
                    download(url['url'], package_file)
                    downloaded = True
            except:
                print>>sys.stderr, "could not download from %s, trying next url"%(url)
                raise

    if not downloaded:
        if checked:
            if quiet == 0:
                print>>sys.stderr, "same release, not downloading"
        else:
            print>>sys.stderr, "out of places to try downloading from, try later"
        sys.exit(2)

    unpack_dir = path.join(temp_dir, 'unpack')
    files = extract(unpack_dir, package_file)

    import_keys(gpg_path, temp_dir, config)

    (success, assertions, out_manifest) = get_assertions(gpg_path, temp_dir, unpack_dir, files)

    if old_manifest:
        check_name_and_version(out_manifest, old_manifest)

    if not success and quiet <= 1:
        print>>sys.stderr, "There were errors getting assertions"

    total_weight = check_assertions(config, assertions)
    if total_weight is None:
        print>>sys.stderr, "There were errors checking assertions, build is untrusted, aborting"
        sys.exit(5)

    if quiet == 0:
        print>>sys.stderr, "Successful with signature weight %d"%(total_weight)

    if config.get('waiting_period', 0) > 0 and path.exists(dest_path):
        waiting_path = tempfile.mkdtemp('', prog)
        shutil.copytree(unpack_dir, path.join(waiting_path, 'unpack'))
        f = file(path.join(dest_path, '.gitian-waiting'), 'w')
        yaml.dump({'time': time.time(), 'out_manifest': out_manifest, 'waiting_path': waiting_path}, f)
        f.close()
        if quiet == 0:
            print>>sys.stderr, "Started waiting period"
    else:
        if args.dryrun:
            print>>sys.stderr, "Dry run, not copying"
        else:
            copy_to_destination(unpack_dir, dest_path, out_manifest, old_manifest)


    if args.post:
        os.system(args.post)

Example 60

Project: ev3dev-lang-python Source File: helper.py
Function: do_get
    def do_GET(self):
        """
        Returns True if the requested URL is supported
        """

        if RobotWebHandler.do_GET(self):
            return True

        global motor_max_speed
        global medium_motor_max_speed
        global max_move_xy_seq
        global joystick_engaged

        if medium_motor_max_speed is None:
            motor_max_speed = self.robot.left_motor.max_speed

            if hasattr(self.robot, 'medium_motor'):
                medium_motor_max_speed = self.robot.medium_motor.max_speed
            else:
                medium_motor_max_speed = 0

        '''
        Sometimes we get AJAX requests out of order like this:
        2016-09-06 02:29:35,846 DEBUG: seq 65: (x, y): 0, 44 -> speed 462 462
        2016-09-06 02:29:35,910 DEBUG: seq 66: (x, y): 0, 45 -> speed 473 473
        2016-09-06 02:29:35,979 DEBUG: seq 67: (x, y): 0, 46 -> speed 483 483
        2016-09-06 02:29:36,033 DEBUG: seq 69: (x, y): -1, 48 -> speed 491 504
        2016-09-06 02:29:36,086 DEBUG: seq 68: (x, y): -1, 47 -> speed 480 494
        2016-09-06 02:29:36,137 DEBUG: seq 70: (x, y): -1, 49 -> speed 501 515
        2016-09-06 02:29:36,192 DEBUG: seq 73: (x, y): -2, 51 -> speed 509 536
        2016-09-06 02:29:36,564 DEBUG: seq 74: (x, y): -3, 51 -> speed 496 536
        2016-09-06 02:29:36,649  INFO: seq 75: CLIENT LOG: touchend
        2016-09-06 02:29:36,701 DEBUG: seq 71: (x, y): -1, 50 -> speed 512 525
        2016-09-06 02:29:36,760 DEBUG: seq 76: move stop
        2016-09-06 02:29:36,814 DEBUG: seq 72: (x, y): -1, 51 -> speed 522 536

        This can be bad because the last command sequentially was #76 which was "move stop"
        but we RXed seq #72 after that so we started moving again and never stopped

        A quick fix is to have the client send us an AJAX request to let us know
        when the joystick has been engaged so that we can ignore any move-xy events
        that we get out of order and show up after "move stop" but before the
        next "joystick-engaged"

        We can also ignore any move-xy requests that show up late by tracking the
        max seq for any move-xy we service.
        '''
        # dwalton - fix this

        path = self.path.split('/')
        seq = int(path[1])
        action = path[2]

        # desktop interface
        if action == 'move-start':
            direction = path[3]
            speed_percentage = path[4]
            log.debug("seq %d: move %s" % (seq, direction))

            left_speed = int(int(speed_percentage) * motor_max_speed)/100.0
            right_speed = int(int(speed_percentage) * motor_max_speed)/100.0

            if direction == 'forward':
                self.robot.left_motor.run_forever(speed_sp=left_speed)
                self.robot.right_motor.run_forever(speed_sp=right_speed)

            elif direction == 'backward':
                self.robot.left_motor.run_forever(speed_sp=left_speed * -1)
                self.robot.right_motor.run_forever(speed_sp=right_speed * -1)

            elif direction == 'left':
                self.robot.left_motor.run_forever(speed_sp=left_speed * -1)
                self.robot.right_motor.run_forever(speed_sp=right_speed)

            elif direction == 'right':
                self.robot.left_motor.run_forever(speed_sp=left_speed)
                self.robot.right_motor.run_forever(speed_sp=right_speed * -1)

        # desktop & mobile interface
        elif action == 'move-stop':
            log.debug("seq %d: move stop" % seq)
            self.robot.left_motor.stop()
            self.robot.right_motor.stop()
            joystick_engaged = False

        # medium motor
        elif action == 'motor-stop':
            motor = path[3]
            log.debug("seq %d: motor-stop %s" % (seq, motor))

            if motor == 'medium':
                if hasattr(self.robot, 'medium_motor'):
                    self.robot.medium_motor.stop()
            else:
                raise Exception("motor %s not supported yet" % motor)

        elif action == 'motor-start':
            motor = path[3]
            direction = path[4]
            speed_percentage = path[5]
            log.debug("seq %d: start motor %s, direction %s, speed_percentage %s" % (seq, motor, direction, speed_percentage))

            if motor == 'medium':
                if hasattr(self.robot, 'medium_motor'):
                    if direction == 'clockwise':
                        medium_speed = int(int(speed_percentage) * medium_motor_max_speed)/100.0
                        self.robot.medium_motor.run_forever(speed_sp=medium_speed)

                    elif direction == 'counter-clockwise':
                        medium_speed = int(int(speed_percentage) * medium_motor_max_speed)/100.0
                        self.robot.medium_motor.run_forever(speed_sp=medium_speed * -1)
                else:
                    log.info("we do not have a medium_motor")
            else:
                raise Exception("motor %s not supported yet" % motor)

        # mobile interface
        elif action == 'move-xy':
            x = int(path[3])
            y = int(path[4])

            if joystick_engaged:
                if seq > max_move_xy_seq:
                    (left_speed, right_speed) = xy_to_speed(x, y, motor_max_speed)
                    log.debug("seq %d: (x, y) %4d, %4d -> speed %d %d" % (seq, x, y, left_speed, right_speed))
                    max_move_xy_seq = seq

                    if left_speed == 0:
                        self.robot.left_motor.stop()
                    else:
                        self.robot.left_motor.run_forever(speed_sp=left_speed)

                    if right_speed == 0:
                        self.robot.right_motor.stop()
                    else:
                        self.robot.right_motor.run_forever(speed_sp=right_speed)
                else:
                    log.debug("seq %d: (x, y) %4d, %4d (ignore, max seq %d)" %
                              (seq, x, y, max_move_xy_seq))
            else:
                log.debug("seq %d: (x, y) %4d, %4d (ignore, joystick idle)" %
                          (seq, x, y))

        elif action == 'joystick-engaged':
            joystick_engaged = True

        elif action == 'log':
            msg = ''.join(path[3:])
            re_msg = re.search('^(.*)\?', msg)

            if re_msg:
                msg = re_msg.group(1)

            log.debug("seq %d: CLIENT LOG: %s" % (seq, msg))

        else:
            log.warning("Unsupported URL %s" % self.path)

        # It is good practice to send this but if we are getting move-xy we
        # tend to get a lot of them and we need to be as fast as possible so
        # be bad and don't send a reply. This takes ~20ms.
        if action != 'move-xy':
            self.send_response(204)

        return True

Example 61

Project: cgat Source File: CSV2DB.py
def run(infile, options, report_step=10000):

    options.tablename = quoteTableName(
        options.tablename, backend=options.backend)

    if options.map:
        m = {}
        for x in options.map:
            f, t = x.split(":")
            m[f] = t
        options.map = m
    else:
        options.map = {}

    existing_tables = set()

    quick_import_separator = "\t"

    if options.database_backend == "postgres":
        import psycopg2
        raise NotImplementedError("needs refactoring for commandline options")
        dbhandle = psycopg2.connect(options.psql_connection)
        error = psycopg2.Error
        options.null = "NULL"
        options.string_value = "'%s'"
        options.text = "TEXT"
        options.index = "TEXT"
        if options.insert_quick:
            raise ValueError("quick import not implemented.")

    elif options.database_backend == "mysql":
        import MySQLdb
        dbhandle = MySQLdb.connect(host=options.database_host,
                                   user=options.database_username,
                                   passwd=options.database_password,
                                   port=options.database_port,
                                   db=options.database_name)
        error = Exception
        options.null = "NULL"
        options.string_value = "%s"
        options.text = "TEXT"
        options.index = "VARCHAR(40)"
        if options.insert_quick:
            raise ValueError("quick import not implemented.")

    elif options.backend == "sqlite":
        import sqlite3
        dbhandle = sqlite3.connect(options.database_name)
        try:
            os.chmod(options.database_name, 0o664)
        except OSError as msg:
            E.warn("could not change permissions of database: %s" % msg)

        # Avoid the following error:
        # sqlite3.ProgrammingError: You must not use 8-bit bytestrings
        # unless you use a text_factory that can interpret 8-bit
        # bytestrings (like text_factory = str). It is highly
        # recommended that you instead just switch your application
        # to Unicode strings
        # Note: might be better to make csv2db unicode aware.
        dbhandle.text_factory = str

        error = sqlite3.OperationalError
        options.insert_many = True  # False
        options.null = None  # "NULL"
        options.text = "TEXT"
        options.index = "TEXT"
        options.string_value = "%s"  # "'%s'"

        statement = "SELECT name FROM sqlite_master WHERE type='table'"
        cc = executewait(dbhandle, statement, error, options.retry)
        existing_tables = set([x[0] for x in cc])
        cc.close()

        # use , as separator
        quick_import_statement = \
            "sqlite3 %s '.import %%s %s'" % \
            (options.database_name, options.tablename)

        quick_import_separator = "|"

    if options.header is not None:
        options.header = [x.strip() for x in options.header.split(",")]

    if options.utf:
        reader = CSV.UnicodeDictReader(infile,
                                       dialect=options.dialect,
                                       fieldnames=options.header)
    else:
        reader = csv.DictReader(CSV.CommentStripper(infile),
                                dialect=options.dialect,
                                fieldnames=options.header)

    if options.replace_header:
        try:
            next(reader)
        except StopIteration:
            pass

    E.info("reading %i columns to guess column types" % options.guess_size)

    rows = []
    for row in reader:
        if None in row:
            raise ValueError(
                "undefined columns in input file at row: %s" % row)

        try:
            rows.append(IOTools.convertDictionary(row, map=options.map))
        except TypeError as msg:
            E.warn(
                "incomplete line? Type error in conversion: "
                "'%s' with data: %s" % (msg, str(row)))
        except ValueError as msg:
            E.warn(
                "incomplete line? Type error in conversion: "
                "'%s' with data: %s" % (msg, str(row)))

        if len(rows) >= options.guess_size:
            break

    E.info("read %i rows for type guessing" % len(rows))
    E.info("creating table")

    if len(rows) == 0:
        if options.allow_empty:
            if not reader.fieldnames:
                E.warn("no data - no table created")
            else:
                # create empty table and exit
                take, map_column2type, ignored = createTable(
                    dbhandle,
                    error,
                    options.tablename,
                    options,
                    retry=options.retry,
                    headers=reader.fieldnames,
                    ignore_empty=options.ignore_empty,
                    ignore_columns=options.ignore_columns,
                    rename_columns=options.rename_columns,
                    lowercase=options.lowercase,
                    ignore_duplicates=options.ignore_duplicates,
                    indices=options.indices,
                    first_column=options.first_column,
                    existing_tables=existing_tables,
                    append=options.append)
                E.info("empty table created")
            return
        else:
            raise ValueError("empty table")
    else:
        take, map_column2type, ignored = createTable(
            dbhandle,
            error,
            options.tablename,
            options,
            rows=rows,
            retry=options.retry,
            headers=reader.fieldnames,
            ignore_empty=options.ignore_empty,
            ignore_columns=options.ignore_columns,
            rename_columns=options.rename_columns,
            lowercase=options.lowercase,
            ignore_duplicates=options.ignore_duplicates,
            indices=options.indices,
            first_column=options.first_column,
            existing_tables=existing_tables,
            append=options.append)

    def row_iter(rows, reader):
        for row in rows:
            yield quoteRow(row, take, map_column2type,
                           options.missing_values,
                           null=options.null,
                           string_value=options.string_value)
        for data in reader:
            yield quoteRow(IOTools.convertDictionary(data, map=options.map),
                           take,
                           map_column2type,
                           options.missing_values,
                           null=options.null,
                           string_value=options.string_value)

    ninput = 0

    E.info("inserting data")

    if options.insert_quick:
        E.info("using quick insert")

        outfile, filename = tempfile.mkstemp()

        E.debug("dumping data into %s" % filename)

        for d in row_iter(rows, reader):

            ninput += 1
            os.write(outfile, quick_import_separator.join(
                [str(d[x]) for x in take]) + "\n")

            if ninput % report_step == 0:
                E.info("iteration %i\n" % ninput)

        os.close(outfile)

        statement = quick_import_statement % filename
        E.debug(statement)

        # infinite loop possible
        while 1:

            retcode = E.run(statement, cwd=os.getcwd(), close_fds=True)

            if retcode != 0:
                E.warn("import error using statement: %s" % statement)

                if not options.retry:
                    raise ValueError(
                        "import error using statement: %s" % statement)

                time.sleep(5)
                continue

            break

        os.remove(filename)

        # there is no way to insert NULL values into sqlite. The only
        # solution is to update all colums.
        for column in take:
            executewait(dbhandle,
                        "UPDATE %s SET %s = NULL WHERE %s = 'None'" % (
                            options.tablename, column, column),
                        error,
                        options.retry)

    elif options.insert_many:
        data = []
        for d in row_iter(rows, reader):
            ninput += 1

            data.append([d[x] for x in take])

            if ninput % report_step == 0:
                E.info("iteration %i" % ninput)

        statement = "INSERT INTO %s VALUES (%s)" % (
            options.tablename, ",".join("?" * len(take)))

        E.info("inserting %i rows" % len(data))
        E.debug("multiple insert:\n# %s" % statement)

        while 1:
            try:
                dbhandle.executemany(statement, data)
            except error as msg:
                E.warn("import failed: msg=%s, statement=\n  %s" %
                       (msg, statement))
                # TODO: check for database locked msg
                if not options.retry:
                    raise error(msg)
                if not re.search("locked", str(msg)):
                    raise error(msg)
                time.sleep(5)
                continue
            break

    else:
        # insert line by line (could not figure out how to do bulk loading with
        # subprocess and COPY FROM STDIN)
        statement = "INSERT INTO %s VALUES (%%(%s)s)" % (options.tablename,
                                                         ')s, %('.join(take))
        # output data used for guessing:
        for d in row_iter(rows, reader):

            ninput += 1
            E.debug("single insert:\n# %s" % (statement % d))
            cc = executewait(dbhandle, statement, error,
                             retry=options.retry,
                             args=d)
            cc.close()

            if ninput % report_step == 0:
                E.info("iteration %i" % ninput)

    E.info("building indices")
    nindex = 0
    for index in options.indices:

        nindex += 1
        try:
            statement = "CREATE INDEX %s_index%i ON %s (%s)" % (
                options.tablename, nindex, options.tablename, index)
            cc = executewait(dbhandle, statement, error, options.retry)
            cc.close()
            E.info("added index on column %s" % (index))
        except error as msg:
            E.info("adding index on column %s failed: %s" % (index, msg))

    statement = "SELECT COUNT(*) FROM %s" % (options.tablename)
    cc = executewait(dbhandle, statement, error, options.retry)
    result = cc.fetchone()
    cc.close()

    noutput = result[0]

    E.info("ninput=%i, noutput=%i, nskipped_columns=%i" %
           (ninput, noutput, len(ignored)))

    dbhandle.commit()

Example 62

Project: wikiteam Source File: wikiteam.py
def getParameters(params=[]):
    """ Import parameters into variable """
    
    if not params:
        params = sys.argv
    
    config = {}
    parser = argparse.ArgumentParser(description='Tools for downloading and preserving wikis.')

    # General params
    parser.add_argument(
        '-v', '--version', action='version', version=getVersion())
    parser.add_argument(
        '--cookies', metavar="cookies.txt", help="Path to a cookies.txt file.")
    parser.add_argument(
        '--delay',
        metavar=5,
        default=0,
        type=float,
        help="Adds a delay (in seconds).")
    parser.add_argument(
        '--retries',
        metavar=5,
        default=5,
        help="Maximum number of retries.")
    parser.add_argument('--path', help='Path to store wiki dump at.')
    parser.add_argument(
        '--resume',
        action='store_true',
        help='Resumes previous incomplete dump (requires --path).')
    parser.add_argument('--force', action='store_true', help='')
    parser.add_argument(
        '--user', help='Username if authentication is required.')
    parser.add_argument(
        '--pass',
        dest='password',
        help='Password if authentication is required.')

    # URL params
    # This script should work with any general URL, finding out
    # API, index.php or whatever by itself when necessary
    groupWiki = parser.add_argument_group()
    groupWiki.add_argument(
        'wiki',
        default='',
        nargs='?',
        help="URL to wiki (e.g. http://wiki.domain.org).")
    # URL params for MediaWiki
    groupWiki.add_argument(
        '--mwapi',
        help="URL to MediaWiki API (e.g. http://wiki.domain.org/w/api.php).")
    groupWiki.add_argument(
        '--mwindex',
        help="URL to MediaWiki index.php (e.g. http://wiki.domain.org/w/index.php).")

    # Download params
    groupDownload = parser.add_argument_group(
        'Data to download',
        'What info download from the wiki')
    groupDownload.add_argument(
        '--pages',
        action='store_true',
        help="Generates a dump of pages (--pages --curonly for current revisions only).")
    groupDownload.add_argument('--curonly', action='store_true',
                               help='Store only the current version of pages.')
    groupDownload.add_argument(
        '--images', action='store_true', help="Generates an image dump.")
    groupDownload.add_argument(
        '--namespaces',
        metavar="1,2,3",
        help='Comma-separated value of namespaces to include (all by default).')
    groupDownload.add_argument(
        '--exnamespaces',
        metavar="1,2,3",
        help='Comma-separated value of namespaces to exclude.')

    # Meta info params
    groupMeta = parser.add_argument_group(
        'Meta info',
        'What meta info to retrieve from the wiki')
    groupMeta.add_argument(
        '--get-api',
        action='store_true',
        help="Returns wiki API when available.")
    groupMeta.add_argument(
        '--get-index',
        action='store_true',
        help="Returns wiki Index.php when available.")
    groupMeta.add_argument(
        '--get-page-titles',
        action='store_true',
        help="Returns wiki page titles.")
    groupMeta.add_argument(
        '--get-image-names',
        action='store_true',
        help="Returns wiki image names.")
    groupMeta.add_argument(
        '--get-namespaces',
        action='store_true',
        help="Returns wiki namespaces.")
    groupMeta.add_argument(
        '--get-wiki-engine',
        action='store_true',
        help="Returns wiki engine.")

    args = parser.parse_args()
    #sys.stderr.write(args)
    
    # Not wiki? Exit
    if not args.wiki:
        sys.stderr.write('ERROR: Provide a URL to a wiki\n')
        parser.print_help()
        sys.exit(1)
    
    # Don't mix download params and meta info params
    if (args.pages or args.images) and \
       (args.get_api or args.get_index or args.get_page_titles or args.get_image_names or args.get_namespaces or args.get_wiki_engine):
        sys.stderr.write('ERROR: Don\'t mix download params and meta info params\n')
        parser.print_help()
        sys.exit(1)

    # No download params and no meta info params? Exit
    if (not args.pages and not args.images) and \
       (not args.get_api and not args.get_index and not args.get_page_titles and not args.get_image_names and not args.get_namespaces and not args.get_wiki_engine):
        sys.stderr.write('ERROR: Use at least one download param or meta info param\n')
        parser.print_help()
        sys.exit(1)

    # Load cookies
    cj = cookielib.MozillaCookieJar()
    if args.cookies:
        cj.load(args.cookies)
        sys.stderr.write('Using cookies from %s\n' % args.cookies)

    # check user and pass (one requires both)
    if (args.user and not args.password) or (args.password and not args.user):
        sys.stderr.write('ERROR: Both --user and --pass are required for authentication.\n')
        parser.print_help()
        sys.exit(1)
    
    session = None
    if args.user and args.password:
        import requests
        session = requests.Session()
        session.cookies = cj
        session.headers.update({'User-Agent': getUserAgent()})
        session.auth = (args.user, args.password)
        #session.mount(args.mw_api.split('/api.php')[0], HTTPAdapter(max_retries=max_ret)) Mediawiki-centric, be careful

    # check URLs
    for url in [args.mwapi, args.mwindex, args.wiki]:
        if url and (not url.startswith('http://') and not url.startswith('https://')):
            sys.stderr.write(url)
            sys.stderr.write('ERROR: URLs must start with http:// or https://\n')
            parser.print_help()
            sys.exit(1)
    
    # Meta info params
    metainfo = '' # only one allowed, so we don't mix output
    if args.get_api:
        metainfo = 'get_api'
    elif args.get_index:
        metainfo = 'get_index'
    elif args.get_page_titles:
        metainfo = 'get_page_titles'
    elif args.get_image_names:
        metainfo = 'get_image_names'
    elif args.get_namespaces:
        metainfo = 'get_namespaces'
    elif args.get_wiki_engine:
        metainfo = 'get_wiki_engine'

    namespaces = ['all']
    exnamespaces = []
    # Process namespace inclusions
    if args.namespaces:
        # fix, why - ?  and... --namespaces= all with a space works?
        if re.search(
                r'[^\d, \-]',
                args.namespaces) and args.namespaces.lower() != 'all':
            sys.stderr.write("Invalid namespace values.\nValid format is integer(s) separated by commas\n")
            sys.exit()
        else:
            ns = re.sub(' ', '', args.namespaces)
            if ns.lower() == 'all':
                namespaces = ['all']
            else:
                namespaces = [int(i) for i in ns.split(',')]

    # Process namespace exclusions
    if args.exnamespaces:
        if re.search(r'[^\d, \-]', args.exnamespaces):
            sys.stderr.write("Invalid namespace values.\nValid format is integer(s) separated by commas\n")
            sys.exit(1)
        else:
            ns = re.sub(' ', '', args.exnamespaces)
            if ns.lower() == 'all':
                sys.stderr.write('You cannot exclude all namespaces.\n')
                sys.exit(1)
            else:
                exnamespaces = [int(i) for i in ns.split(',')]

    # --curonly requires --xml
    if args.curonly and not args.pages:
        sys.stderr.write("--curonly requires --pages\n")
        parser.print_help()
        sys.exit(1)
    
    config = {
        'cookies': args.cookies or '', 
        'curonly': args.curonly, 
        'date': datetime.datetime.now().strftime('%Y%m%d'), 
        'delay': args.delay, 
        'exnamespaces': exnamespaces, 
        'images': args.images, 
        'logs': False, 
        'metainfo': metainfo, 
        'namespaces': namespaces, 
        'pages': args.pages, 
        'path': args.path and os.path.normpath(args.path) or '', 
        'retries': int(args.retries), 
        'wiki': args.wiki, 
        'wikicanonical': '', 
        'wikiengine': getWikiEngine(args.wiki), 
        'other': {
            'configfilename': 'config.txt', 
            'filenamelimit': 100,  # do not change
            'force': args.force, 
            'resume': args.resume, 
            'session': session, 
        }
    }
    
    # Get ready special variables (API for MediWiki, etc)
    if config['wikiengine'] == 'mediawiki':
        import mediawiki
        config['mwexport'] = 'Special:Export'
        if not args.mwapi:
            config['mwapi'] = mediawiki.mwGetAPI(config=config)
            if not config['mwapi']:
                sys.stderr.write('ERROR: Provide a URL to API\n')
                sys.exit(1)
            else:
                data={
                    'action': 'query',
                    'meta': 'siteinfo',
                    'siprop': 'namespaces',
                    'format': 'json'}
                r = getURL(config['mwapi'], data=data)
                config['mwexport'] = getJSON(r)['query']['namespaces']['-1']['*'] \
                    + ':Export'
        if not args.mwindex:
            config['mwindex'] = mediawiki.mwGetIndex(config=config)
            if not config['mwindex']:
                sys.stderr.write('ERROR: Provide a URL to Index.php\n')
                sys.exit(1)
    elif wikiengine == 'wikispaces':
        import wikispaces
        # use wikicanonical for base url for Wikispaces?
    
    # calculating path, if not defined by user with --path=
    if not config['path']:
        config['path'] = './%s-%s-wikidump' % (domain2prefix(config=config), config['date'])

    return config

Example 63

Project: snowy Source File: startmigration.py
    def handle(self, app=None, name="", added_model_list=None, added_field_list=None, initial=False, freeze_list=None, auto=False, **options):
        
        # Any supposed lists that are None become empty lists
        added_model_list = added_model_list or []
        added_field_list = added_field_list or []
        
        # Make sure options are compatable
        if initial and (added_model_list or added_field_list or auto):
            print "You cannot use --initial and other options together"
            return
        if auto and (added_model_list or added_field_list or initial):
            print "You cannot use --auto and other options together"
            return
        
        # specify the default name 'initial' if a name wasn't specified and we're
        # doing a migration for an entire app
        if not name and initial:
            name = 'initial'
        
        # if not name, there's an error
        if not name:
            print "You must name this migration"
            return
        
        if not app:
            print "Please provide an app in which to create the migration."
            return
        
        # Make sure the app is short form
        app = app.split(".")[-1]
        
        # See if the app exists
        app_models_module = models.get_app(app)
        if not app_models_module:
            print "App '%s' doesn't seem to exist, isn't in INSTALLED_APPS, or has no models." % app
            return
        
        # If they've set SOUTH_AUTO_FREEZE_APP = True (or not set it - defaults to True)
        if not hasattr(settings, 'SOUTH_AUTO_FREEZE_APP') or settings.SOUTH_AUTO_FREEZE_APP:
            if freeze_list and app not in freeze_list:
                freeze_list += [app]
            else:
                freeze_list = [app]
        
        # Make the migrations directory if it's not there
        app_module_path = app_models_module.__name__.split('.')[0:-1]
        try:
            app_module = __import__('.'.join(app_module_path), {}, {}, [''])
        except ImportError:
            print "Couldn't find path to App '%s'." % app
            return
            
        migrations_dir = os.path.join(
            os.path.dirname(app_module.__file__),
            "migrations",
        )
        
        # Make sure there's a migrations directory and __init__.py
        if not os.path.isdir(migrations_dir):
            print "Creating migrations directory at '%s'..." % migrations_dir
            os.mkdir(migrations_dir)
        init_path = os.path.join(migrations_dir, "__init__.py")
        if not os.path.isfile(init_path):
            # Touch the init py file
            print "Creating __init__.py in '%s'..." % migrations_dir
            open(init_path, "w").close()
        
        # See what filename is next in line. We assume they use numbers.
        migrations = migration.get_migration_names(migration.get_app(app))
        highest_number = 0
        for migration_name in migrations:
            try:
                number = int(migration_name.split("_")[0])
                highest_number = max(highest_number, number)
            except ValueError:
                pass
        
        # Make the new filename
        new_filename = "%04i%s_%s.py" % (
            highest_number + 1,
            "".join([random.choice(string.letters.lower()) for i in range(0)]), # Possible random stuff insertion
            name,
        )
        
        # Find the source file encoding, using PEP 0263's method
        encoding = None
        first_two_lines = inspect.getsourcelines(app_models_module)[0][:2]
        for line in first_two_lines:
            if re.search("coding[:=]\s*([-\w.]+)", line):
                encoding = line
        
        # Initialise forwards, backwards and models to blank things
        forwards = ""
        backwards = ""
        frozen_models = {} # Frozen models, used by the Fake ORM
        stub_models = {} # Frozen models, but only enough for relation ends (old mock models)
        complete_apps = set() # Apps that are completely frozen - useable for diffing.
        
        # Sets of actions
        added_models = set()
        deleted_models = [] # Special: contains instances _not_ string keys
        added_fields = set()
        deleted_fields = [] # Similar to deleted_models
        changed_fields = [] # (mkey, fname, old_def, new_def)
        added_uniques = set() # (mkey, field_names)
        deleted_uniques = set() # (mkey, field_names)
        
        # --initial means 'add all models in this app'.
        if initial:
            for model in models.get_models(app_models_module):
                added_models.add("%s.%s" % (app, model._meta.object_name))
        
        # Added models might be 'model' or 'app.model'.
        for modelname in added_model_list:
            if "." in modelname:
                added_models.add(modelname)
            else:
                added_models.add("%s.%s" % (app, modelname))
        
        # Fields need translating from "model.field" to (app.model, field)
        for fielddef in added_field_list:
            try:
                modelname, fieldname = fielddef.split(".", 1)
            except ValueError:
                print "The field specification '%s' is not in modelname.fieldname format." % fielddef
            else:
                added_fields.add(("%s.%s" % (app, modelname), fieldname))
        
        # Add anything frozen (I almost called the dict Iceland...)
        if freeze_list:
            for item in freeze_list:
                if "." in item:
                    # It's a specific model
                    app_name, model_name = item.split(".", 1)
                    model = models.get_model(app_name, model_name)
                    if model is None:
                        print "Cannot find the model '%s' to freeze it." % item
                        return
                    frozen_models[model] = None
                else:
                    # Get everything in an app!
                    frozen_models.update(dict([(x, None) for x in models.get_models(models.get_app(item))]))
                    complete_apps.add(item.split(".")[-1])
            # For every model in the freeze list, add in dependency stubs
            for model in frozen_models:
                stub_models.update(model_dependencies(model))
        
        
        ### Automatic Detection ###
        if auto:
            # Get the last migration for this app
            last_models = None
            app_module = migration.get_app(app)
            if app_module is None:
                print "You cannot use automatic detection on the first migration of an app. Try --initial instead."
            else:
                migrations = list(migration.get_migration_classes(app_module))
                if not migrations:
                    print "You cannot use automatic detection on the first migration of an app. Try --initial instead."
                else:
                    if hasattr(migrations[-1], "complete_apps") and \
                       app in migrations[-1].complete_apps:
                        last_models = migrations[-1].models
                        last_orm = migrations[-1].orm
                    else:
                        print "You cannot use automatic detection, since the previous migration does not have this whole app frozen.\nEither make migrations using '--freeze %s' or set 'SOUTH_AUTO_FREEZE_APP = True' in your settings.py." % app
            
            # Right, did we manage to get the last set of models?
            if last_models is None:
                return
            
            # Good! Get new things.
            new = dict([
                (model_key(model), prep_for_freeze(model))
                for model in models.get_models(app_models_module)
            ])
            # And filter other apps out of the old
            old = dict([
                (key, fields)
                for key, fields in last_models.items()
                if key.split(".", 1)[0] == app
            ])
            am, dm, cm, af, df, cf = models_diff(old, new)
            
            # For models that were there before and after, do a meta diff
            was_meta_change = False
            for mkey in cm:
                au, du = meta_diff(old[mkey].get("Meta", {}), new[mkey].get("Meta", {}))
                for entry in au:
                    added_uniques.add((mkey, entry))
                    was_meta_change = True
                for entry in du:
                    deleted_uniques.add((mkey, entry))
                    was_meta_change = True
            
            if not (am or dm or af or df or cf or was_meta_change):
                print "Nothing seems to have changed."
                return
            
            # Add items to the todo lists
            added_models.update(am)
            added_fields.update(af)
            changed_fields.extend(cf)
            
            # Deleted models are from the past, and so we use instances instead.
            for mkey in dm:
                model = last_orm[mkey]
                fields = last_models[mkey]
                if "Meta" in fields:
                    del fields['Meta']
                deleted_models.append((model, fields, last_models))
            
            # For deleted fields, we tag the instance on the end too
            for mkey, fname in df:
                deleted_fields.append((
                    mkey,
                    fname,
                    last_orm[mkey]._meta.get_field_by_name(fname)[0],
                    last_models[mkey][fname],
                    last_models,
                ))
        
        
        ### Added model ###
        for mkey in added_models:
            
            print " + Added model '%s'" % (mkey,)
            
            model = model_unkey(mkey)
            
            # Add the model's dependencies to the stubs
            stub_models.update(model_dependencies(model))
            # Get the field definitions
            fields = modelsparser.get_model_fields(model)
            # Turn the (class, args, kwargs) format into a string
            fields = triples_to_defs(app, model, fields)
            # Make the code
            forwards += CREATE_TABLE_SNIPPET % (
                model._meta.object_name,
                model._meta.db_table,
                "\n            ".join(["('%s', %s)," % (fname, fdef) for fname, fdef in fields.items()]),
                model._meta.app_label,
                model._meta.object_name,
            )
            # And the backwards code
            backwards += DELETE_TABLE_SNIPPET % (
                model._meta.object_name, 
                model._meta.db_table
            )
            # Now add M2M fields to be done
            for field in model._meta.local_many_to_many:
                added_fields.add((mkey, field.attname))
            # And unique_togethers to be added
            for ut in model._meta.unique_together:
                added_uniques.add((mkey, tuple(ut)))
        
        
        ### Added fields ###
        for mkey, field_name in added_fields:
            
            print " + Added field '%s.%s'" % (mkey, field_name)
            
            # Get the model
            model = model_unkey(mkey)
            # Get the field
            try:
                field = model._meta.get_field(field_name)
            except FieldDoesNotExist:
                print "Model '%s' doesn't have a field '%s'" % (mkey, field_name)
                return
            
            # ManyToMany fields need special attention.
            if isinstance(field, models.ManyToManyField):
                if not field.rel.through: # Bug #120
                    # Add a stub model for each side
                    stub_models[model] = None
                    stub_models[field.rel.to] = None
                    # And a field defn, that's actually a table creation
                    forwards += CREATE_M2MFIELD_SNIPPET % (
                        model._meta.object_name,
                        field.name,
                        field.m2m_db_table(),
                        field.m2m_column_name()[:-3], # strip off the '_id' at the end
                        model._meta.object_name,
                        field.m2m_reverse_name()[:-3], # strip off the '_id' at the ned
                        field.rel.to._meta.object_name
                        )
                    backwards += DELETE_M2MFIELD_SNIPPET % (
                        model._meta.object_name,
                        field.name,
                        field.m2m_db_table()
                    )
                continue
            
            # GenericRelations need ignoring
            if isinstance(field, GenericRelation):
                continue
            
            # Add any dependencies
            stub_models.update(field_dependencies(field))
            
            # Work out the definition
            triple = remove_useless_attributes(
                modelsparser.get_model_fields(model)[field_name])
            
            field_definition = make_field_constructor(app, field, triple)
            
            forwards += CREATE_FIELD_SNIPPET % (
                model._meta.object_name,
                field.name,
                model._meta.db_table,
                field.name,
                field_definition,
            )
            backwards += DELETE_FIELD_SNIPPET % (
                model._meta.object_name,
                field.name,
                model._meta.db_table,
                field.column,
            )
        
        
        ### Deleted fields ###
        for mkey, field_name, field, triple, last_models in deleted_fields:
            
            print " - Deleted field '%s.%s'" % (mkey, field_name)
            
            # Get the model
            model = model_unkey(mkey)
            
            # ManyToMany fields need special attention.
            if isinstance(field, models.ManyToManyField):
                # Add a stub model for each side, if they're not already there
                # (if we just added old versions, we might override new ones)
                if model not in stub_models:
                    stub_models[model] = last_models
                if field.rel.to not in last_models:
                    stub_models[field.rel.to] = last_models
                # And a field defn, that's actually a table deletion
                forwards += DELETE_M2MFIELD_SNIPPET % (
                    model._meta.object_name,
                    field.name,
                    field.m2m_db_table()
                )
                backwards += CREATE_M2MFIELD_SNIPPET % (
                    model._meta.object_name,
                    field.name,
                    field.m2m_db_table(),
                    field.m2m_column_name()[:-3], # strip off the '_id' at the end
                    model._meta.object_name,
                    field.m2m_reverse_name()[:-3], # strip off the '_id' at the ned
                    field.rel.to._meta.object_name
                    )
                continue
            
            # Add any dependencies
            deps = field_dependencies(field, last_models)
            deps.update(stub_models)
            stub_models = deps
            
            # Work out the definition
            triple = remove_useless_attributes(triple)
            field_definition = make_field_constructor(app, field, triple)
            
            forwards += DELETE_FIELD_SNIPPET % (
                model._meta.object_name,
                field.name,
                model._meta.db_table,
                field.column,
            )
            backwards += CREATE_FIELD_SNIPPET % (
                model._meta.object_name,
                field.name,
                model._meta.db_table,
                field.name,
                field_definition,
            )
        
        
        ### Deleted model ###
        for model, fields, last_models in deleted_models:
            
            print " - Deleted model '%s.%s'" % (model._meta.app_label,model._meta.object_name)
            
            # Add the model's dependencies to the stubs
            deps = model_dependencies(model, last_models)
            deps.update(stub_models)
            stub_models = deps
            
            # Turn the (class, args, kwargs) format into a string
            fields = triples_to_defs(app, model, fields)
            
            # Make the code
            forwards += DELETE_TABLE_SNIPPET % (
                model._meta.object_name, 
                model._meta.db_table
            )
            # And the backwards code
            backwards += CREATE_TABLE_SNIPPET % (
                model._meta.object_name,
                model._meta.db_table,
                "\n            ".join(["('%s', %s)," % (fname, fdef) for fname, fdef in fields.items()]),
                model._meta.app_label,
                model._meta.object_name,
            )
        
        
        ### Changed fields ###
        for mkey, field_name, old_triple, new_triple in changed_fields:
            
            model = model_unkey(mkey)
            old_def = triples_to_defs(app, model, {
                field_name: old_triple,
            })[field_name]
            new_def = triples_to_defs(app, model, {
                field_name: new_triple,
            })[field_name]
            
            # We need to create the field, to see if it needs _id, or if it's an M2M
            field = model._meta.get_field_by_name(field_name)[0]
            
            if hasattr(field, "m2m_db_table"):
                # See if anything has ACTUALLY changed
                if old_triple[1] != new_triple[1]:
                    print " ! Detected change to the target model of M2M field '%s.%s'. South can't handle this; leaving this change out." % (mkey, field_name)
                continue
            
            print " ~ Changed field '%s.%s'." % (mkey, field_name)
            
            forwards += CHANGE_FIELD_SNIPPET % (
                model._meta.object_name,
                field_name,
                model._meta.db_table,
                field.get_attname(),
                new_def,
            )
            
            backwards += CHANGE_FIELD_SNIPPET % (
                model._meta.object_name,
                field_name,
                model._meta.db_table,
                field.get_attname(),
                old_def,
            )
        
        
        ### Added unique_togethers ###
        for mkey, ut in added_uniques:
            
            model = model_unkey(mkey)
            print " + Added unique_together for [%s] on %s." % (", ".join(ut), model._meta.object_name)
            
            cols = [get_field_column(model, f) for f in ut]
            
            forwards += CREATE_UNIQUE_SNIPPET % (
                ", ".join(ut),
                model._meta.object_name,
                model._meta.db_table,
                cols,
            )
            
            backwards += DELETE_UNIQUE_SNIPPET % (
                ", ".join(ut),
                model._meta.object_name,
                model._meta.db_table,
                cols,
            )
        
        
        ### Deleted unique_togethers ###
        for mkey, ut in deleted_uniques:
            
            model = model_unkey(mkey)
            print " - Deleted unique_together for [%s] on %s." % (", ".join(ut), model._meta.object_name)
            
            forwards += DELETE_UNIQUE_SNIPPET % (
                ", ".join(ut),
                model._meta.object_name,
                model._meta.db_table,
                ut,
            )
            
            backwards += CREATE_UNIQUE_SNIPPET % (
                ", ".join(ut),
                model._meta.object_name,
                model._meta.db_table,
                ut,
            )
        
        
        # Default values for forwards/backwards
        if (not forwards) and (not backwards):
            forwards = '"Write your forwards migration here"'
            backwards = '"Write your backwards migration here"'
        
        all_models = {}
        
        # Fill out frozen model definitions
        for model, last_models in frozen_models.items():
            all_models[model_key(model)] = prep_for_freeze(model, last_models)
        
        # Fill out stub model definitions
        for model, last_models in stub_models.items():
            key = model_key(model)
            if key in all_models:
                continue # We'd rather use full models than stubs.
            all_models[key] = prep_for_stub(model, last_models)
        
        # Do some model cleanup, and warnings
        for modelname, model in all_models.items():
            for fieldname, fielddef in model.items():
                # Remove empty-after-cleaning Metas.
                if fieldname == "Meta" and not fielddef:
                    del model['Meta']
                # Warn about undefined fields
                elif fielddef is None:
                    print "WARNING: Cannot get definition for '%s' on '%s'. Please edit the migration manually." % (
                        fieldname,
                        modelname,
                    )
                    model[fieldname] = FIELD_NEEDS_DEF_SNIPPET
        
        # Write the migration file
        fp = open(os.path.join(migrations_dir, new_filename), "w")
        fp.write(MIGRATION_SNIPPET % (
            encoding or "", '.'.join(app_module_path), 
            forwards, 
            backwards, 
            pprint_frozen_models(all_models),
            complete_apps and "complete_apps = [%s]" % (", ".join(map(repr, complete_apps))) or ""
        ))
        fp.close()
        print "Created %s." % new_filename

Example 64

Project: fontlab-scripts Source File: convertToTTF.py
Function: process_fonts
def processFonts(fontsList):
    totalFonts = len(fontsList)

    print "%d fonts found:\n%s\n" % (totalFonts, '\n'.join(fontsList))

    setType1openPrefs()
    setTTgeneratePrefs()
    setTTautohintPrefs()

    fontIndex = 1
    for pfaPath in fontsList:

        # Make temporary encoding file from GOADB file. This step needs to
        # be done per font, because the directory tree selected may contain
        # more than one family, or because the glyph set of a given family
        # may not be the same for both Roman/Upright and Italic/Sloped.
        encPath = None
        goadbPath = None

        # The GOADB can be located in the same folder or up to two
        # levels above in the directory tree
        sameLevel = os.path.join(os.path.dirname(pfaPath), kGOADBfileName)
        oneUp = os.path.join(
            os.path.dirname(os.path.dirname(pfaPath)), kGOADBfileName)
        twoUp = os.path.join(
            os.path.dirname(os.path.dirname(os.path.dirname(pfaPath))), kGOADBfileName)

        if os.path.exists(sameLevel):
            goadbPath = sameLevel
        elif os.path.exists(oneUp):
            goadbPath = oneUp
        elif os.path.exists(twoUp):
            goadbPath = twoUp

        if goadbPath:
            encPath = makeTempEncFileFromGOADB(goadbPath)
        else:
            print "Could not find %s file." % kGOADBfileName
            print "Skipping %s" % pfaPath
            print

        if not encPath:
            continue

        # Checking if a derivedchars file exists.
        # If not, the dvInput step is skipped.
        makeDV = False

        for file in os.listdir(os.path.split(pfaPath)[0]):
            if re.search(r'derivedchars(.+?)?$', file) and dvModuleFound:
                makeDV = True

        fontIsTXT = False
        fontIsUFO = False

        if kFontTXT in pfaPath:
            fontIsTXT = True
            pfaPath = convertTXTfontToPFA(pfaPath)

        elif kFontUFO in pfaPath or (pfaPath[-4:].lower() in [".ufo"]):
            # Support more than just files named "font.ufo"
            fontIsUFO = True
            pfaPath = convertUFOfontToPFA(pfaPath)

        fl.Open(pfaPath)
        print "\nProcessing %s ... (%d/%d)" % (
            fl.font.font_name, fontIndex, totalFonts)
        fontIndex += 1

        fontZonesWereReplaced = replaceFontZonesByFamilyZones()
        baselineZonesWereRemoved = removeBottomZonesAboveBaseline()

        # NOTE: After making changes to the PostScript alignment zones, the TT
        # equivalents have to be updated as well, but I couldn't find a way
        # to do it via scripting (because TTH.top_zones and TTH.bottom_zones
        # are read-only, and despite that functionality being available in
        # the UI, there's no native function to update TT zones from T1 zones).
        # So the solution is to generate a new T1 font and open it back.
        pfaPathTemp = pfaPath.replace('.pfa', '_TEMP_.pfa')
        infPathTemp = pfaPathTemp.replace('.pfa', '.inf')
        if baselineZonesWereRemoved or fontZonesWereReplaced:
            fl.GenerateFont(eval("ftTYPE1ASCII"), pfaPathTemp)
            fl[fl.ifont].modified = 0
            fl.Close(fl.ifont)
            fl.Open(pfaPathTemp)
            if os.path.exists(infPathTemp):
                # Delete the .INF file (bug in FL v5.1.x)
                os.remove(infPathTemp)

        # Load encoding file
        fl.font.encoding.Load(encPath)

        # Make sure the Font window is in 'Names mode'
        fl.CallCommand(fl_cmd.FontModeNames)

        # Sort glyphs by encoding
        fl.CallCommand(fl_cmd.FontSortByCodepage)

        # read derivedchars file, make components
        if makeDV:
            dvInput_module.run(verbose=False)

        convertT1toTT()
        changeTTfontSettings()

        # Switch the Font window to 'Index mode'
        fl.CallCommand(fl_cmd.FontModeIndex)

        # path to the folder containing the font, and the font's file name
        folderPath, fontFileName = os.path.split(pfaPath)
        ppmsFilePath = os.path.join(folderPath, kPPMsFileName)
        if os.path.exists(ppmsFilePath):
            hPPMs, vPPMs = readPPMsFile(ppmsFilePath)
            replaceStemsAndPPMs(hPPMs, vPPMs)

        tthintsFilePath = os.path.join(folderPath, kTTHintsFileName)
        if os.path.exists(tthintsFilePath):
            inputTTHints.run(folderPath)
            # readTTHintsFile(tthintsFilePath)
            # replaceTTHints()

        # FontLab 5.1.5 Mac Build 5714 does NOT respect the unchecked
        # option "Automatically add .null, CR and space characters"
        for gName in ["NULL", "CR"]:
            gIndex = fl.font.FindGlyph(gName)
            if gIndex != -1:
                del fl.font.glyphs[gIndex]

        vfbPath = pfaPath.replace('.pfa', '.vfb')
        fl.Save(vfbPath)

        # The filename of the TT output is hardcoded
        ttfPath = os.path.join(folderPath, kFontTTF)
        fl.GenerateFont(eval("ftTRUETYPE"), ttfPath)

        fl[fl.ifont].modified = 0
        fl.Close(fl.ifont)

        # The TT font generated with FontLab ends up with a few glyph names
        # changed. Fix the glyph names so that makeOTF does not fail.
        postProccessTTF(ttfPath)

        # Delete temporary Encoding file:
        if os.path.exists(encPath):
            os.remove(encPath)

        # Delete temp PFA:
        if os.path.exists(pfaPathTemp):
            os.remove(pfaPathTemp)

        # Cleanup after processing from TXT type1 font or UFO font
        if fontIsTXT or fontIsUFO:
            if os.path.exists(pfaPath):
                os.remove(pfaPath)
            if os.path.exists(ttfPath):
                finalTTFpath = ttfPath.replace('_TEMP_.ttf', '.ttf')
                if finalTTFpath != ttfPath:
                    if PC:
                        os.remove(finalTTFpath)
                    os.rename(ttfPath, finalTTFpath)

            if os.path.exists(vfbPath):
                finalVFBpath = vfbPath.replace('_TEMP_.vfb', '.vfb')
                if finalVFBpath != vfbPath:
                    if PC and os.path.exists(finalVFBpath):
                        os.remove(finalVFBpath)
                    os.rename(vfbPath, finalVFBpath)

            # remove FontLab leftovers
            pfmPath = pfaPathTemp.replace('.pfa', '.pfm')
            afmPath = pfaPathTemp.replace('.pfa', '.afm')
            if os.path.exists(pfmPath):
                os.remove(pfmPath)
            if os.path.exists(afmPath):
                os.remove(afmPath)

Example 65

Project: reviewboard Source File: siteconfig.py
def load_site_config(full_reload=False):
    """Load stored site configuration settings.

    This populates the Django settings object with any keys that need to be
    there.
    """
    def apply_setting(settings_key, db_key, default=None):
        """Apply the given siteconfig value to the Django settings object."""
        db_value = siteconfig.settings.get(db_key)

        if db_value:
            setattr(settings, settings_key, db_value)
        elif default:
            setattr(settings, settings_key, default)

    def update_haystack_settings():
        """Update the haystack settings in site config."""
        search_backend_id = (siteconfig.get('search_backend_id') or
                             defaults['search_backend_id'])
        search_backend = search_backend_registry.get_search_backend(
            search_backend_id)

        if not search_backend:
            raise ImproperlyConfigured(_(
                'The search engine "%s" could not be found. If this is '
                'provided by an extension, you will have to make sure that '
                'extension is enabled.'
                % search_backend_id
            ))

        apply_setting(
            'HAYSTACK_CONNECTIONS', None,
            {
                'default': search_backend.configuration,
            })

        # Re-initialize Haystack's connection information to use the updated
        # settings.
        connections.connections_info = settings.HAYSTACK_CONNECTIONS
        connections._connections = {}

    # If siteconfig needs to be saved back to the DB, set dirty=true
    dirty = False
    try:
        siteconfig = SiteConfiguration.objects.get_current()
    except SiteConfiguration.DoesNotExist:
        raise ImproperlyConfigured(
            "The site configuration entry does not exist in the database. "
            "Re-run `./manage.py` syncdb to fix this.")
    except Exception as e:
        # We got something else. Likely, this doesn't exist yet and we're
        # doing a syncdb or something, so silently ignore.
        logging.error('Could not load siteconfig: %s' % e)
        return

    # Populate defaults if they weren't already set.
    if not siteconfig.get_defaults():
        siteconfig.add_defaults(defaults)

    # The default value for DEFAULT_EMAIL_FROM (webmaster@localhost)
    # is less than good, so use a better one if it's set to that or if
    # we haven't yet set this value in siteconfig.
    mail_default_from = \
        siteconfig.settings.get('mail_default_from',
                                global_settings.DEFAULT_FROM_EMAIL)

    if (not mail_default_from or
            mail_default_from == global_settings.DEFAULT_FROM_EMAIL):
        domain = siteconfig.site.domain.split(':')[0]
        siteconfig.set('mail_default_from', 'noreply@' + domain)

    # STATIC_* and MEDIA_* must be different paths, and differ in meaning.
    # If site_static_* is empty or equal to media_static_*, we're probably
    # migrating from an earlier Review Board install.
    site_static_root = siteconfig.settings.get('site_static_root', '')
    site_media_root = siteconfig.settings.get('site_media_root')

    if site_static_root == '' or site_static_root == site_media_root:
        siteconfig.set('site_static_root', settings.STATIC_ROOT)

    site_static_url = siteconfig.settings.get('site_static_url', '')
    site_media_url = siteconfig.settings.get('site_media_url')

    if site_static_url == '' or site_static_url == site_media_url:
        siteconfig.set('site_static_url', settings.STATIC_URL)

    # Populate the settings object with anything relevant from the siteconfig.
    apply_django_settings(siteconfig, settings_map)

    if full_reload and not getattr(settings, 'RUNNING_TEST', False):
        # Logging may have changed, so restart logging.
        restart_logging()

    # Now for some more complicated stuff...

    update_haystack_settings()

    # Site administrator settings
    apply_setting("ADMINS", None, (
        (siteconfig.get("site_admin_name", ""),
         siteconfig.get("site_admin_email", "")),
    ))

    apply_setting("MANAGERS", None, settings.ADMINS)

    # Explicitly base this off the STATIC_URL
    apply_setting("ADMIN_MEDIA_PREFIX", None, settings.STATIC_URL + "admin/")

    # Set the auth backends
    auth_backend_id = siteconfig.settings.get("auth_backend", "builtin")
    builtin_backend_obj = auth_backends.get('backend_id', 'builtin')
    builtin_backend = "%s.%s" % (builtin_backend_obj.__module__,
                                 builtin_backend_obj.__name__)

    if auth_backend_id == "custom":
        custom_backends = siteconfig.settings.get("auth_custom_backends")

        if isinstance(custom_backends, six.string_types):
            custom_backends = (custom_backends,)
        elif isinstance(custom_backends, list):
            custom_backends = tuple(custom_backends)

        settings.AUTHENTICATION_BACKENDS = custom_backends

        if builtin_backend not in custom_backends:
            settings.AUTHENTICATION_BACKENDS += (builtin_backend,)
    else:
        backend = auth_backends.get('backend_id', auth_backend_id)

        if backend and backend is not builtin_backend_obj:
            settings.AUTHENTICATION_BACKENDS = \
                ("%s.%s" % (backend.__module__, backend.__name__),
                 builtin_backend)
        else:
            settings.AUTHENTICATION_BACKENDS = (builtin_backend,)

        # If we're upgrading from a 1.x LDAP configuration, populate
        # ldap_uid and clear ldap_uid_mask
        if auth_backend_id == "ldap":
            if not hasattr(settings, 'LDAP_UID'):
                if hasattr(settings, 'LDAP_UID_MASK'):
                    # Get the username attribute from the old UID mask
                    # LDAP attributes can contain only alphanumeric
                    # characters and the hyphen and must lead with an
                    # alphabetic character. This is not dependent upon
                    # locale.
                    m = re.search("([a-zA-Z][a-zA-Z0-9-]+)=%s",
                                  settings.LDAP_UID_MASK)
                    if m:
                        # Assign LDAP_UID the value of the retrieved attribute
                        settings.LDAP_UID = m.group(1)
                    else:
                        # Couldn't match the old value?
                        # This should be impossible, but in this case, let's
                        # just guess a sane default and hope for the best.
                        settings.LDAP_UID = 'uid'

                else:
                    # Neither the old nor new value?
                    # This should be impossible, but in this case, let's just
                    # guess a sane default and hope for the best.
                    settings.LDAP_UID = 'uid'

                # Remove the LDAP_UID_MASK value
                settings.LDAP_UID_MASK = None

                siteconfig.set('auth_ldap_uid', settings.LDAP_UID)
                siteconfig.set('auth_ldap_uid_mask', settings.LDAP_UID_MASK)
                # Set the dirty flag so we save this back
                dirty = True

    # Add APITokenBackend to the list of auth backends. This one is always
    # present, and is used only for API requests.
    settings.AUTHENTICATION_BACKENDS += (
        'reviewboard.webapi.auth_backends.TokenAuthBackend',
    )

    # Set the storage backend
    storage_backend = siteconfig.settings.get('storage_backend', 'builtin')

    if storage_backend in storage_backend_map:
        settings.DEFAULT_FILE_STORAGE = storage_backend_map[storage_backend]
    else:
        settings.DEFAULT_FILE_STORAGE = storage_backend_map['builtin']

    # These blow up if they're not the perfectly right types
    settings.AWS_QUERYSTRING_AUTH = siteconfig.get('aws_querystring_auth')
    settings.AWS_ACCESS_KEY_ID = six.text_type(
        siteconfig.get('aws_access_key_id'))
    settings.AWS_SECRET_ACCESS_KEY = six.text_type(
        siteconfig.get('aws_secret_access_key'))
    settings.AWS_STORAGE_BUCKET_NAME = six.text_type(
        siteconfig.get('aws_s3_bucket_name'))
    try:
        settings.AWS_CALLING_FORMAT = int(siteconfig.get('aws_calling_format'))
    except ValueError:
        settings.AWS_CALLING_FORMAT = 0

    settings.SWIFT_AUTH_URL = six.text_type(
        siteconfig.get('swift_auth_url'))
    settings.SWIFT_USERNAME = six.text_type(
        siteconfig.get('swift_username'))
    settings.SWIFT_KEY = six.text_type(
        siteconfig.get('swift_key'))
    try:
        settings.SWIFT_AUTH_VERSION = int(siteconfig.get('swift_auth_version'))
    except:
        settings.SWIFT_AUTH_VERSION = 1
    settings.SWIFT_CONTAINER_NAME = six.text_type(
        siteconfig.get('swift_container_name'))

    if siteconfig.settings.get('site_domain_method', 'http') == 'https':
        os.environ[b'HTTPS'] = b'on'
    else:
        os.environ[b'HTTPS'] = b'off'

    # Save back changes if they have been made
    if dirty:
        siteconfig.save()

    site_settings_loaded.send(sender=None)

    return siteconfig

Example 66

Project: crash Source File: tabulate.py
Function: tabulate
def tabulate(tabular_data, headers=(), tablefmt="simple",
             floatfmt="g", numalign="decimal", stralign="left",
             missingval=""):
    """Format a fixed width table for pretty printing.

    >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
    ---  ---------
      1      2.34
    -56      8.999
      2  10001
    ---  ---------

    The first required argument (`tabular_data`) can be a
    list-of-lists (or another iterable of iterables), a list of named
    tuples, a dictionary of iterables, an iterable of dictionaries,
    a two-dimensional NumPy array, NumPy record array, or a Pandas'
    dataframe.


    Table headers
    -------------

    To print nice column headers, supply the second argument (`headers`):

      - `headers` can be an explicit list of column headers
      - if `headers="firstrow"`, then the first row of data is used
      - if `headers="keys"`, then dictionary keys or column indices are used

    Otherwise a headerless table is produced.

    If the number of headers is less than the number of columns, they
    are supposed to be names of the last columns. This is consistent
    with the plain-text format of R and Pandas' dataframes.

    >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
    ...       headers="firstrow"))
           sex      age
    -----  -----  -----
    Alice  F         24
    Bob    M         19


    Column alignment
    ----------------

    `tabulate` tries to detect column types automatically, and aligns
    the values properly. By default it aligns decimal points of the
    numbers (or flushes integer numbers to the right), and flushes
    everything else to the left. Possible column alignments
    (`numalign`, `stralign`) are: "right", "center", "left", "decimal"
    (only for `numalign`), and None (to disable alignment).


    Table formats
    -------------

    `floatfmt` is a format specification used for columns which
    contain numeric data with a decimal point.

    `None` values are replaced with a `missingval` string:

    >>> print(tabulate([["spam", 1, None],
    ...                 ["eggs", 42, 3.14],
    ...                 ["other", None, 2.7]], missingval="?"))
    -----  --  ----
    spam    1  ?
    eggs   42  3.14
    other   ?  2.7
    -----  --  ----

    Various plain-text table formats (`tablefmt`) are supported:
    'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
     'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
    currently supported formats.

    "plain" format doesn't use any pseudographics to draw tables,
    it separates columns with a double space:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "plain"))
    strings      numbers
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
    spam   41.9999
    eggs  451

    "simple" format is like Pandoc simple_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "simple"))
    strings      numbers
    ---------  ---------
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
    ----  --------
    spam   41.9999
    eggs  451
    ----  --------

    "grid" is similar to tables produced by Emacs table.el package or
    Pandoc grid_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "grid"))
    +-----------+-----------+
    | strings   |   numbers |
    +===========+===========+
    | spam      |   41.9999 |
    +-----------+-----------+
    | eggs      |  451      |
    +-----------+-----------+

    >>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
    ...                ["text", "numbers", "other"], "grid"))
    +-------------+----------+-------+
    | text        |  numbers | other |
    +=============+==========+=======+
    | this        |  41.9999 | foo   |
    | is          |          | bar   |
    | a multiline |          |       |
    | text        |          |       |
    +-------------+----------+-------+
    | NULL        | 451      |       |
    +-------------+----------+-------+

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
    +------+----------+
    | spam |  41.9999 |
    +------+----------+
    | eggs | 451      |
    +------+----------+

    "fancy_grid" draws a grid using box-drawing characters:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "fancy_grid"))
    ╒═══════════╤═══════════╕
    │ strings   │   numbers │
    ╞═══════════╪═══════════╡
    │ spam      │   41.9999 │
    ├───────────┼───────────┤
    │ eggs      │  451      │
    ╘═══════════╧═══════════╛

    "pipe" is like tables in PHP Markdown Extra extension or Pandoc
    pipe_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "pipe"))
    | strings   |   numbers |
    |:----------|----------:|
    | spam      |   41.9999 |
    | eggs      |  451      |

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
    |:-----|---------:|
    | spam |  41.9999 |
    | eggs | 451      |

    "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
    are slightly different from "pipe" format by not using colons to
    define column alignment, and using a "+" sign to indicate line
    intersections:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "orgtbl"))
    | strings   |   numbers |
    |-----------+-----------|
    | spam      |   41.9999 |
    | eggs      |  451      |


    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
    | spam |  41.9999 |
    | eggs | 451      |

    "rst" is like a simple table format from reStructuredText; please
    note that reStructuredText accepts also "grid" tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "rst"))
    =========  =========
    strings      numbers
    =========  =========
    spam         41.9999
    eggs        451
    =========  =========

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
    ====  ========
    spam   41.9999
    eggs  451
    ====  ========

    "mediawiki" produces a table markup used in Wikipedia and on other
    MediaWiki-based sites:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="mediawiki"))
    {| class="wikitable" style="text-align: left;"
    |+ <!-- caption -->
    |-
    ! strings   !! align="right"|   numbers
    |-
    | spam      || align="right"|   41.9999
    |-
    | eggs      || align="right"|  451
    |}

    "html" produces HTML markup:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="html"))
    <table>
    <tr><th>strings  </th><th style="text-align: right;">  numbers</th></tr>
    <tr><td>spam     </td><td style="text-align: right;">  41.9999</td></tr>
    <tr><td>eggs     </td><td style="text-align: right;"> 451     </td></tr>
    </table>

    "latex" produces a tabular environment of LaTeX docuement markup:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
    \\begin{tabular}{lr}
    \\hline
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\hline
    \\end{tabular}

    "latex_booktabs" produces a tabular environment of LaTeX docuement markup
    using the booktabs.sty package:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
    \\begin{tabular}{lr}
    \\toprule
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\bottomrule
    \end{tabular}
    """
    if tabular_data is None:
        tabular_data = []
    list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)

    # optimization: look for ANSI control codes once,
    # enable smart width functions only if a control code is found
    plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
                            ['\t'.join(map(_text_type, row)) for row in list_of_lists])
    has_invisible = re.search(_invisible_codes, plain_text)
    if has_invisible:
        width_fn = _visible_width
    else:
        width_fn = _max_line_width

    # format rows and columns, convert numeric values to strings
    cols = list(zip(*list_of_lists))
    coltypes = list(map(_column_type, cols))
    cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
             for c,ct in zip(cols, coltypes)]

    # align columns
    aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
    minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
    cols = [_align_column(c, a, minw, has_invisible)
            for c, a, minw in zip(cols, aligns, minwidths)]

    if headers:
        # align headers and add headers
        t_cols = cols or [['']] * len(headers)
        t_aligns = aligns or [stralign] * len(headers)
        minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
        headers = [_align_header(h, a, minw)
                   for h, a, minw in zip(headers, t_aligns, minwidths)]
        rows = list(zip(*cols))
    else:
        minwidths = [width_fn(c[0]) for c in cols]
        rows = list(zip(*cols))

    if not isinstance(tablefmt, TableFormat):
        tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])

    return _format_table(tablefmt, headers, rows, minwidths, aligns)

Example 67

Project: VizAlerts Source File: tabhttp.py
def export_view(view, format):

    # assign variables (clean this up later)
    username = view.subscriber_sysname
    sitename = unicode(view.site_name).replace('Default', '')
    if view.subscriber_domain != 'local': # leave it as None if Server uses local authentication
        subscriberdomain = view.subscriber_domain
    else:
        subscriberdomain = None

    timeout_s = view.timeout_s
    refresh = view.force_refresh
    attempts = 0
    pngwidth = view.viz_png_width
    pngheight = view.viz_png_height

    server = config.configs['server']
    encrypt = config.configs['server.ssl']
    certcheck = config.configs['server.certcheck']
    certfile = config.configs['server.certfile']
    tempdir = config.configs['temp.dir']
    if config.configs['trusted.useclientip']:
        clientip = config.configs['trusted.clientip']
    else:
        clientip = None

    # variables used later in the script
    response = None
    ticket = None

    # overrides for various url components
    if config.configs['server.ssl']:
        protocol = u'https'
    else:
        protocol = u'http'

    #viewurlsuffix may be of form workbook/view
    #or workbook/view?param1=value1&param2=value2
    #in the latter case separate it out
    search = re.search(u'(.*?)\?(.*)', view.view_url_suffix)
    if search:
        viewurlsuffix = search.group(1)
        extraurlparameter = '?' + search.group(2)
    else:
        viewurlsuffix = view.view_url_suffix
        # always need a ? to add in the formatparam and potentially refresh URL parameters
        extraurlparameter = '?'

    # set up format
    # if user hasn't overriden PNG with size setting then use the default  
    if format == Format.PNG and ':size=' not in extraurlparameter:
            formatparam = u'&:format=' + format + u'&:size={},{}'.format(str(pngwidth), str(pngheight))
    else:
        formatparam = u'&:format=' + format

    if sitename != '':
        sitepart = u'/t/' + sitename
    else:
        sitepart = sitename

    # get the full URL (minus the ticket) for logging and error reporting
    displayurl = protocol + u'://' + server + sitepart + u'/views/' + viewurlsuffix + extraurlparameter + formatparam
    if refresh:
        displayurl = displayurl + u'&:refresh=y'   # show admin/users that we forced a refresh

    while attempts < view.data_retrieval_tries:
        try:
            attempts += 1

            # get a trusted ticket
            ticket = get_trusted_ticket(server, sitename, username, encrypt, certcheck, certfile, subscriberdomain, clientip)

            # build final URL
            url = protocol + u'://' + server + u'/trusted/' + ticket + sitepart + u'/views/' + viewurlsuffix + extraurlparameter + formatparam
            if refresh:
                url = url + u'&:refresh=y'   # force a refresh of the data--we don't want alerts based on cached (stale) data

            log.logger.debug(u'Getting vizdata from: {}'.format(url))

            # Make the GET call to obtain the data
            response = None
            if subscriberdomain:
                # Tableau Server is using AD auth (is this even needed? May need to remove later)
                if certcheck:
                    log.logger.debug('Validating cert for this request using certfile {}'.format(certfile))
                    if not certfile:
                        certfile = requests.utils.DEFAULT_CA_BUNDLE_PATH
                    response = requests.get(url, auth=HttpNtlmAuth(subscriberdomain + u'\\' + username, ''), verify=certfile, timeout=timeout_s)
                else:
                    log.logger.debug('NOT Validating cert for this request')
                    requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # disable warnings for unverified certs
                    response = requests.get(url, auth=HttpNtlmAuth(subscriberdomain + u'\\' + username, ''), verify=False, timeout=timeout_s)
            else:
                # Server is using local auth
                if certcheck:
                    log.logger.debug('Validating cert for this request using certfile {}'.format(certfile))
                    if not certfile:
                        certfile = requests.utils.DEFAULT_CA_BUNDLE_PATH
                    response = requests.get(url, auth=(username, ''), verify=certfile, timeout=timeout_s)
                else:
                    log.logger.debug('NOT Validating cert for this request')
                    requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # disable warnings for unverified certs
                    response = requests.get(url, auth=(username, ''), verify=False, timeout=timeout_s)
            response.raise_for_status()

            # Create the temporary file, datestring is down to microsecond to prevent dups since
            # we are excluding any extraurl parameters for space & security reasons
            # (users might obfuscate results by hiding URL parameters)
            datestring = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
            filename = datestring + '_' + threading.current_thread().name + '_' + viewurlsuffix.replace('/', '-') + '.' + format
            filepath = tempdir + filename

            log.logger.info(u'Attempting to write to: {}'.format(filepath))

            if format == Format.CSV:
                f = open(filepath, 'w')
                f.write(response.content.replace('\r\n', '\n')) # remove extra carriage returns
            else:
                f = open(filepath, 'wb')
                for block in response.iter_content(1024):
                    if not block:
                        break
                    f.write(block)
            f.close()
            return unicode(filepath)
        except requests.exceptions.Timeout as e:
            errormessage = cgi.escape(u'Timeout error. Could not retrieve vizdata from url {} within {} seconds, after {} tries'.format(displayurl, timeout_s, attempts))
            log.logger.error(errormessage)
            if attempts >= view.data_retrieval_tries:
                raise UserWarning(errormessage)
            else:
                continue
        except requests.exceptions.HTTPError as e:
            errormessage = cgi.escape(u'HTTP error getting vizdata from url {}. Code: {} Reason: {}'.format(displayurl, e.response.status_code, e.response.reason))
            log.logger.error(errormessage)
            if attempts >= view.data_retrieval_tries:
                raise UserWarning(errormessage)
            else:
                continue
        except requests.exceptions.SSLError as e:
            errormessage = cgi.escape(u'SSL error getting vizdata from url {}. Error: {}'.format(displayurl, e))
            log.logger.error(errormessage)
            if attempts >= view.data_retrieval_tries:
                raise UserWarning(errormessage)
            else:
                continue
        except requests.exceptions.RequestException as e:
            errormessage = cgi.escape(u'Request Exception getting vizdata from url {}. Error: {}'.format(displayurl, e))
            if response:
                errormessage += ' Response: {}'.format(response)
            if hasattr(e, 'code'):
                errormessage += ' Code: {}'.format(e.code)
            if hasattr(e, 'reason'):
                errormessage += ' Reason: {}'.format(e.reason)
            log.logger.error(errormessage)
            if attempts >= view.data_retrieval_tries:
                raise UserWarning(errormessage)
            else:
                continue
        except IOError as e:
            errormessage = cgi.escape(u'Unable to write the file {} for url {}, error: {}'.format(filepath, displayurl, e))
            log.logger.error(errormessage)
            if attempts >= view.data_retrieval_tries:
                raise UserWarning(errormessage)
            else:
                continue
        except Exception as e:
            errormessage = cgi.escape(u'Generic exception trying to export the url {} to {}, error: {}'.format(displayurl, format, e))
            if response:
                errormessage = errormessage + ', response: {}'.format(response)
            if hasattr(e, 'code'):
                errormessage += ' Code: {}'.format(e.code)
            if hasattr(e, 'reason'):
                errormessage += ' Reason: {}'.format(e.reason)
            log.logger.error(errormessage)
            if attempts >= view.data_retrieval_tries:
                raise UserWarning(errormessage)
            else:
                continue

        # got through with no errors
        break

Example 68

Project: cylc Source File: updater_graph.py
    def update_graph(self):
        # TODO - check edges against resolved ones
        # (adding new ones, and nodes, if necessary)

        self.action_required = False
        try:
            self.oldest_point_string = (
                self.global_summary['oldest cycle point string'])
            self.newest_point_string = (
                self.global_summary['newest cycle point string'])
            if TASK_STATUS_RUNAHEAD not in self.updater.filter_states_excl:
                # Get a graph out to the max runahead point.
                try:
                    self.newest_point_string = (
                        self.global_summary[
                            'newest runahead cycle point string'])
                except KeyError:
                    # back compat <= 6.2.0
                    pass
        except KeyError:
            # Pre cylc-6 back compat.
            self.oldest_point_string = (
                self.global_summary['oldest cycle time'])
            self.newest_point_string = (
                self.global_summary['newest cycle time'])

        if self.focus_start_point_string:
            oldest = self.focus_start_point_string
            newest = self.focus_stop_point_string
        else:
            oldest = self.oldest_point_string
            newest = self.newest_point_string

        group_for_server = self.group
        if self.group == []:
            group_for_server = None

        ungroup_for_server = self.ungroup
        if self.ungroup == []:
            ungroup_for_server = None

        try:
            res = self.updater.suite_info_client.get_info(
                'get_graph_raw', start_point_string=oldest,
                stop_point_string=newest,
                group_nodes=group_for_server,
                ungroup_nodes=ungroup_for_server,
                ungroup_recursive=self.ungroup_recursive,
                group_all=self.group_all,
                ungroup_all=self.ungroup_all
            )
        except Exception as exc:
            print >> sys.stderr, str(exc)
            return False

        self.have_leaves_and_feet = True
        gr_edges, suite_polling_tasks, self.leaves, self.feet = res
        gr_edges = [tuple(edge) for edge in gr_edges]

        current_id = self.get_graph_id(gr_edges)
        needs_redraw = current_id != self.prev_graph_id

        if needs_redraw:
            self.graphw = CGraphPlain(
                self.cfg.suite, suite_polling_tasks)
            self.graphw.add_edges(
                gr_edges, ignore_suicide=self.ignore_suicide)

            nodes_to_remove = set()

            # Remove nodes representing filtered-out tasks.
            if (self.updater.filter_name_string or
                    self.updater.filter_states_excl):
                for node in self.graphw.nodes():
                    id = node.get_name()
                    # Don't need to guard against special nodes here (yet).
                    name, point_string = TaskID.split(id)
                    if name not in self.all_families:
                        # This node is a task, not a family.
                        if id in self.updater.filt_task_ids:
                            nodes_to_remove.add(node)
                        elif id not in self.updater.kept_task_ids:
                            # A base node - these only appear in the graph.
                            filter_string = self.updater.filter_name_string
                            if (filter_string and
                                    filter_string not in name and
                                    not re.search(filter_string, name)):
                                # A base node that fails the name filter.
                                nodes_to_remove.add(node)
                    elif id in self.fam_state_summary:
                        # Remove family nodes if all members filtered out.
                        remove = True
                        for mem in self.descendants[name]:
                            mem_id = TaskID.get(mem, point_string)
                            if mem_id in self.updater.kept_task_ids:
                                remove = False
                                break
                        if remove:
                            nodes_to_remove.add(node)
                    elif id in self.updater.full_fam_state_summary:
                        # An updater-filtered-out family.
                        nodes_to_remove.add(node)

            # Base node cropping.
            if self.crop:
                # Remove all base nodes.
                for node in (set(self.graphw.nodes()) - nodes_to_remove):
                    if node.get_name() not in self.state_summary:
                        nodes_to_remove.add(node)
            else:
                # Remove cycle points containing only base nodes.
                non_base_point_strings = set()
                point_string_nodes = {}
                for node in set(self.graphw.nodes()) - nodes_to_remove:
                    node_id = node.get_name()
                    name, point_string = TaskID.split(node_id)
                    point_string_nodes.setdefault(point_string, [])
                    point_string_nodes[point_string].append(node)
                    if (node_id in self.state_summary or
                            node_id in self.fam_state_summary):
                        non_base_point_strings.add(point_string)
                pure_base_point_strings = (
                    set(point_string_nodes) - non_base_point_strings)
                for point_string in pure_base_point_strings:
                    for node in point_string_nodes[point_string]:
                        nodes_to_remove.add(node)
            self.graphw.cylc_remove_nodes_from(list(nodes_to_remove))
            # TODO - remove base nodes only connected to other base nodes?
            # Should these even exist any more?

            # Make family nodes octagons.
            for node in self.graphw.nodes():
                node_id = node.get_name()
                try:
                    name, point_string = TaskID.split(node_id)
                except ValueError:
                    # Special node.
                    continue
                if name in self.all_families:
                    node.attr['shape'] = 'doubleoctagon'

            if self.subgraphs_on:
                self.graphw.add_cycle_point_subgraphs(gr_edges)

        # Set base node style defaults
        for node in self.graphw.nodes():
            node.attr.setdefault('style', 'filled')
            node.attr['color'] = '#888888'
            node.attr['fillcolor'] = 'white'
            node.attr['fontcolor'] = '#888888'

        for id in self.state_summary:
            try:
                node = self.graphw.get_node(id)
            except KeyError:
                continue
            self.set_live_node_attr(node, id)

        for id in self.fam_state_summary:
            try:
                node = self.graphw.get_node(id)
            except:
                continue
            self.set_live_node_attr(node, id)

        self.graphw.graph_attr['rankdir'] = self.orientation

        if self.write_dot_frames:
            arg = os.path.join(
                self.suite_share_dir, 'frame' + '-' +
                str(self.graph_frame_count) + '.dot')
            self.graphw.write(arg)
            self.graph_frame_count += 1

        self.prev_graph_id = current_id
        return not needs_redraw

Example 69

Project: terminal_markdown_viewer Source File: tabulate.py
Function: tabulate
def tabulate(tabular_data, headers=(), tablefmt="simple",
             floatfmt="g", numalign="decimal", stralign="left",
             missingval=""):
    """Format a fixed width table for pretty printing.

    >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
    ---  ---------
      1      2.34
    -56      8.999
      2  10001
    ---  ---------

    The first required argument (`tabular_data`) can be a
    list-of-lists (or another iterable of iterables), a list of named
    tuples, a dictionary of iterables, an iterable of dictionaries,
    a two-dimensional NumPy array, NumPy record array, or a Pandas'
    dataframe.


    Table headers
    -------------

    To print nice column headers, supply the second argument (`headers`):

      - `headers` can be an explicit list of column headers
      - if `headers="firstrow"`, then the first row of data is used
      - if `headers="keys"`, then dictionary keys or column indices are used

    Otherwise a headerless table is produced.

    If the number of headers is less than the number of columns, they
    are supposed to be names of the last columns. This is consistent
    with the plain-text format of R and Pandas' dataframes.

    >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
    ...       headers="firstrow"))
           sex      age
    -----  -----  -----
    Alice  F         24
    Bob    M         19


    Column alignment
    ----------------

    `tabulate` tries to detect column types automatically, and aligns
    the values properly. By default it aligns decimal points of the
    numbers (or flushes integer numbers to the right), and flushes
    everything else to the left. Possible column alignments
    (`numalign`, `stralign`) are: "right", "center", "left", "decimal"
    (only for `numalign`), and None (to disable alignment).


    Table formats
    -------------

    `floatfmt` is a format specification used for columns which
    contain numeric data with a decimal point.

    `None` values are replaced with a `missingval` string:

    >>> print(tabulate([["spam", 1, None],
    ...                 ["eggs", 42, 3.14],
    ...                 ["other", None, 2.7]], missingval="?"))
    -----  --  ----
    spam    1  ?
    eggs   42  3.14
    other   ?  2.7
    -----  --  ----

    Various plain-text table formats (`tablefmt`) are supported:
    'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
     'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
    currently supported formats.

    "plain" format doesn't use any pseudographics to draw tables,
    it separates columns with a double space:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "plain"))
    strings      numbers
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
    spam   41.9999
    eggs  451

    "simple" format is like Pandoc simple_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "simple"))
    strings      numbers
    ---------  ---------
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
    ----  --------
    spam   41.9999
    eggs  451
    ----  --------

    "grid" is similar to tables produced by Emacs table.el package or
    Pandoc grid_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "grid"))
    +-----------+-----------+
    | strings   |   numbers |
    +===========+===========+
    | spam      |   41.9999 |
    +-----------+-----------+
    | eggs      |  451      |
    +-----------+-----------+

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
    +------+----------+
    | spam |  41.9999 |
    +------+----------+
    | eggs | 451      |
    +------+----------+

    "fancy_grid" draws a grid using box-drawing characters:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "fancy_grid"))
    ╒═══════════╤═══════════╕
    │ strings   │   numbers │
    ╞═══════════╪═══════════╡
    │ spam      │   41.9999 │
    ├───────────┼───────────┤
    │ eggs      │  451      │
    ╘═══════════╧═══════════╛

    "pipe" is like tables in PHP Markdown Extra extension or Pandoc
    pipe_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "pipe"))
    | strings   |   numbers |
    |:----------|----------:|
    | spam      |   41.9999 |
    | eggs      |  451      |

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
    |:-----|---------:|
    | spam |  41.9999 |
    | eggs | 451      |

    "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
    are slightly different from "pipe" format by not using colons to
    define column alignment, and using a "+" sign to indicate line
    intersections:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "orgtbl"))
    | strings   |   numbers |
    |-----------+-----------|
    | spam      |   41.9999 |
    | eggs      |  451      |


    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
    | spam |  41.9999 |
    | eggs | 451      |

    "rst" is like a simple table format from reStructuredText; please
    note that reStructuredText accepts also "grid" tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "rst"))
    =========  =========
    strings      numbers
    =========  =========
    spam         41.9999
    eggs        451
    =========  =========

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
    ====  ========
    spam   41.9999
    eggs  451
    ====  ========

    "mediawiki" produces a table markup used in Wikipedia and on other
    MediaWiki-based sites:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="mediawiki"))
    {| class="wikitable" style="text-align: left;"
    |+ <!-- caption -->
    |-
    ! strings   !! align="right"|   numbers
    |-
    | spam      || align="right"|   41.9999
    |-
    | eggs      || align="right"|  451
    |}

    "html" produces HTML markup:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="html"))
    <table>
    <thead>
    <tr><th>strings  </th><th style="text-align: right;">  numbers</th></tr>
    </thead>
    <tbody>
    <tr><td>spam     </td><td style="text-align: right;">  41.9999</td></tr>
    <tr><td>eggs     </td><td style="text-align: right;"> 451     </td></tr>
    </tbody>
    </table>

    "latex" produces a tabular environment of LaTeX docuement markup:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
    \\begin{tabular}{lr}
    \\hline
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\hline
    \\end{tabular}

    "latex_booktabs" produces a tabular environment of LaTeX docuement markup
    using the booktabs.sty package:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
    \\begin{tabular}{lr}
    \\toprule
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\bottomrule
    \end{tabular}
    """
    if tabular_data is None:
        tabular_data = []
    list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)

    # optimization: look for ANSI control codes once,
    # enable smart width functions only if a control code is found
    plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
                            ['\t'.join(map(_text_type, row)) for row in list_of_lists])
    has_invisible = re.search(_invisible_codes, plain_text)
    if has_invisible:
        width_fn = _visible_width
    else:
        width_fn = len

    # format rows and columns, convert numeric values to strings
    cols = list(zip(*list_of_lists))
    coltypes = list(map(_column_type, cols))
    cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
             for c,ct in zip(cols, coltypes)]

    # align columns
    aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
    minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
    cols = [_align_column(c, a, minw, has_invisible)
            for c, a, minw in zip(cols, aligns, minwidths)]

    if headers:
        # align headers and add headers
        t_cols = cols or [['']] * len(headers)
        t_aligns = aligns or [stralign] * len(headers)
        minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
        headers = [_align_header(h, a, minw)
                   for h, a, minw in zip(headers, t_aligns, minwidths)]
        rows = list(zip(*cols))
    else:
        minwidths = [width_fn(c[0]) for c in cols]
        rows = list(zip(*cols))

    if not isinstance(tablefmt, TableFormat):
        tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])

    return _format_table(tablefmt, headers, rows, minwidths, aligns)

Example 70

Project: rivescript-python Source File: sorting.py
def sort_trigger_set(triggers, exclude_previous=True, say=None):
    """Sort a group of triggers in optimal sorting order.

    The optimal sorting order is, briefly:
    * Atomic triggers (containing nothing but plain words and alternation
      groups) are on top, with triggers containing the most words coming
      first. Triggers with equal word counts are sorted by length, and then
      alphabetically if they have the same length.
    * Triggers containing optionals are sorted next, by word count like
      atomic triggers.
    * Triggers containing wildcards are next, with ``_`` (alphabetic)
      wildcards on top, then ``#`` (numeric) and finally ``*``.
    * At the bottom of the sorted list are triggers consisting of only a
      single wildcard, in the order: ``_``, ``#``, ``*``.

    Triggers that have ``{weight}`` tags are grouped together by weight
    value and sorted amongst themselves. Higher weighted groups are then
    ordered before lower weighted groups regardless of the normal sorting
    algorithm.

    Triggers that come from topics which inherit other topics are also
    sorted with higher priority than triggers from the inherited topics.

    Arguments:
        triggers ([]str): Array of triggers to sort.
        exclude_previous (bool): Create a sort buffer for 'previous' triggers.
        say (function): A reference to ``RiveScript._say()`` or provide your
            own function.
    """
    if say is None:
        say = lambda x: x

    # KEEP IN MIND: the `triggers` array is composed of array elements of the form
    # ["trigger text", pointer to trigger data]
    # So this code will use e.g. `trig[0]` when referring to the trigger text.

    # Create a priority map.
    prior = {
        0: []  # Default priority=0
    }

    for trig in triggers:
        if exclude_previous and trig[1]["previous"]:
            continue

        match, weight = re.search(RE.weight, trig[0]), 0
        if match:
            weight = int(match.group(1))
        if weight not in prior:
            prior[weight] = []

        prior[weight].append(trig)

    # Keep a running list of sorted triggers for this topic.
    running = []

    # Sort them by priority.
    for p in sorted(prior.keys(), reverse=True):
        say("\tSorting triggers with priority " + str(p))

        # So, some of these triggers may include {inherits} tags, if they
        # came form a topic which inherits another topic. Lower inherits
        # values mean higher priority on the stack.
        inherits = -1          # -1 means no {inherits} tag
        highest_inherits = -1  # highest inheritance number seen

        # Loop through and categorize these triggers.
        track = {
            inherits: init_sort_track()
        }

        for trig in prior[p]:
            pattern = trig[0]
            say("\t\tLooking at trigger: " + pattern)

            # See if it has an inherits tag.
            match = re.search(RE.inherit, pattern)
            if match:
                inherits = int(match.group(1))
                if inherits > highest_inherits:
                    highest_inherits = inherits
                say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherits))
                pattern = re.sub(RE.inherit, "", pattern)
                trig[0] = pattern
            else:
                inherits = -1

            # If this is the first time we've seen this inheritance level,
            # initialize its track structure.
            if inherits not in track:
                track[inherits] = init_sort_track()

            # Start inspecting the trigger's contents.
            if '_' in pattern:
                # Alphabetic wildcard included.
                cnt = utils.word_count(pattern)
                say("\t\t\tHas a _ wildcard with " + str(cnt) + " words.")
                if cnt > 1:
                    if cnt not in track[inherits]['alpha']:
                        track[inherits]['alpha'][cnt] = []
                    track[inherits]['alpha'][cnt].append(trig)
                else:
                    track[inherits]['under'].append(trig)
            elif '#' in pattern:
                # Numeric wildcard included.
                cnt = utils.word_count(pattern)
                say("\t\t\tHas a # wildcard with " + str(cnt) + " words.")
                if cnt > 1:
                    if cnt not in track[inherits]['number']:
                        track[inherits]['number'][cnt] = []
                    track[inherits]['number'][cnt].append(trig)
                else:
                    track[inherits]['pound'].append(trig)
            elif '*' in pattern:
                # Wildcard included.
                cnt = utils.word_count(pattern)
                say("\t\t\tHas a * wildcard with " + str(cnt) + " words.")
                if cnt > 1:
                    if cnt not in track[inherits]['wild']:
                        track[inherits]['wild'][cnt] = []
                    track[inherits]['wild'][cnt].append(trig)
                else:
                    track[inherits]['star'].append(trig)
            elif '[' in pattern:
                # Optionals included.
                cnt = utils.word_count(pattern)
                say("\t\t\tHas optionals and " + str(cnt) + " words.")
                if cnt not in track[inherits]['option']:
                    track[inherits]['option'][cnt] = []
                track[inherits]['option'][cnt].append(trig)
            else:
                # Totally atomic.
                cnt = utils.word_count(pattern)
                say("\t\t\tTotally atomic and " + str(cnt) + " words.")
                if cnt not in track[inherits]['atomic']:
                    track[inherits]['atomic'][cnt] = []
                track[inherits]['atomic'][cnt].append(trig)

        # Move the no-{inherits} triggers to the bottom of the stack.
        track[highest_inherits + 1] = track[-1]
        del(track[-1])

        # Add this group to the sort list.
        for ip in sorted(track.keys()):
            say("ip=" + str(ip))
            for kind in ['atomic', 'option', 'alpha', 'number', 'wild']:
                for wordcnt in sorted(track[ip][kind], reverse=True):
                    # Triggers with a matching word count should be sorted
                    # by length, descending.
                    running.extend(sorted(track[ip][kind][wordcnt], key=len, reverse=True))
            running.extend(sorted(track[ip]['under'], key=len, reverse=True))
            running.extend(sorted(track[ip]['pound'], key=len, reverse=True))
            running.extend(sorted(track[ip]['star'], key=len, reverse=True))
    return running

Example 71

Project: lulzbot-telegram-bot Source File: bot.py
def echo():
    global LAST_UPDATE_ID

    # Request updates from last updated_id
    for update in bot.getUpdates(offset=LAST_UPDATE_ID):
        if LAST_UPDATE_ID < update.update_id:
            # chat_id is required to reply any message
            chat_id = update.message.chat_id
            message = update.message.text

            if (message):
                if '/start' in message or '/help' in message or '/list' in message or '/commands' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    bot.sendMessage(chat_id=chat_id,text=help.encode('utf8'),disable_web_page_preview=True)

                ##################----- PublicPlugins -----##################

                #Require API keys

                '''Youtube search'''
                if '/yt' in message or '/youtube' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    replacer = {'/youtube':'','/yt':''}
                    search_term = replace_all(message,replacer)
                    if len(search_term)<1:
                       bot.sendMessage(chat_id=chat_id,text='Youtube API calls are costly. Use it like /yt keywords; Ex, /yt Iron Maiden')
                    else:
                       bot.sendMessage(chat_id=chat_id,text=youtube(search_term).encode('utf8'))

                '''Twitter latest tweets of user'''
                if '/twitter' in message or '/tw' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    replacer = {'/twitter':'','/tw':''}
                    username = replace_all(message,replacer)
                    if len(username)<1:
                        bot.sendMessage(chat_id=chat_id,text='Use it like: /tw username; Ex, /tw pytacular')
                    else:
                        bot.sendMessage(chat_id=chat_id,text=twitter(username).encode('utf8'))

                '''Gets twitter trends by country /tt countryname, ex /tt India '''
                if '/tt' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    replacer = {'/tt ':''}
                    place = replace_all(message,replacer)
                    bot.sendMessage(chat_id=chat_id,text=twittertrends(place).encode('utf8'))

                '''Search twitter for top 4 related tweets. /ts #Privacy'''
                if '/ts' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    replacer = {'/ts ':''}
                    search_term = replace_all(message,replacer)
                    bot.sendMessage(chat_id=chat_id,text=twittersearch(search_term).encode('utf8'))

                '''Instagram latest posts of user'''
                if '/insta' in message or '/instagram' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    bot.sendMessage(chat_id=chat_id,text='Instagram has restricted API access. This will not work anymore. Sorry :(')

                '''Game "Hot or Not" '''
                if '/hon' in message or '/hotornot' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    custom_keyboard = [[ telegram.Emoji.THUMBS_UP_SIGN, telegram.Emoji.THUMBS_DOWN_SIGN ]]
                    reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard,resize_keyboard=True,one_time_keyboard=True)
                    image = imgur_hon()
                    bot.sendMessage(chat_id=chat_id,text='Fetched from Imgur Subreddit r/models : '+image, reply_markup=reply_markup)

                '''Bing Image Search'''
                if '/image' in message or '/img' in message:
                    bot.sendChatAction(chat_id=chat_id,action=telegram.ChatAction.TYPING)
                    replacer = {'/image':'','/img':''}
                    search_term = replace_all(message,replacer)
                    pic_link = bingsearch(search_term,'Image')
                    bot.sendMessage(chat_id=chat_id,text=pic_link)

                '''Microsoft translator'''
                if '/translate' in message:
                    message = message.replace('/translate','').encode('utf8')
                    message_broken = shlex.split(message)
                    error = 'Not enough parameters. Use, /translate en hi "Hello world" or /translate help to know more'
                    if not len(message_broken)<1:
                        if message_broken[0] == 'help':
                            help_string = """ Example, /translate en hi "Hello world"
                                    ar-Arabic | bs-Latn-Bosnian (Latin) | bg-Bulgarian | ca-Catalan | zh-CHS-Chinese Simplified |
                                    zh-CHT-Chinese Traditional|hr-Croatian | cs-Czech | da-Danish | nl-Dutch |en-English | cy-Welsh |
                                    et-Estonian | fi-Finnish | fr-French | de-German | el-Greek | ht-Haitian Creole | he-Hebrew |
                                    hi-Hindi | mww-Hmong Daw | hu-Hungarian | id-Indonesian | it-Italian | ja-Japanese | tlh-Klingon |
                                    tlh - Qaak-Klingon (pIqaD) | ko-Korean | lv-Latvian | lt-Lithuanian | ms-Malay | mt-Maltese |
                                    no-Norwegian | fa-Persian | pl-Polish | pt-Portuguese | otq-Querétaro Otomi | ro-Romanian |
                                    ru-Russian | sr-Cyrl-Serbian (Cyrillic) | sr-Latn-Serbian (Latin) | sk-Slovak | sl-Slovenian |
                                    es-Spanish | sv-Swedish | th-Thai | tr-Turkish | uk-Ukrainian | ur-Urdu | vi-Vietnamese |
                                    """
                            bot.sendMessage(chat_id=chat_id,text=help_string)
                        else:
                            if len(message_broken)<3:
                                bot.sendMessage(chat_id=chat_id,text=error)
                            else:
                                lang_from = message_broken[0]
                                lang_to = message_broken[1]
                                lang_text = message_broken[2]
                                print lang_from+lang_to+lang_text
                                bot.sendMessage(chat_id=chat_id,text=btranslate(lang_text,lang_from,lang_to))
                    else:
                        bot.sendMessage(chat_id=chat_id,text=error)

                '''Random cat pic'''
                if '/cats' in message:
                    bot.sendMessage(chat_id=chat_id,text='Hold on, digging out a random cat pic!')
                    url = "http://thecatapi.com/api/images/get?api_key="+cat_API_key+"&format=xml"
                    xml_src = urllib2.urlopen(url)
                    data = xml_src.read()
                    xml_src.close()
                    data = xmltodict.parse(data)
                    piclink =  data['response']['data']['images']['image']['url']
                    source_url = data['response']['data']['images']['image']['source_url']
                    threadobjcats = UploadThread(bot,chat_id,piclink,caption=source_url)
                    threadobjcats.setName('catsthread')
                    threadobjcats.start()

                # Don't need an API key

                '''Google search'''
                if '/google' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    search_term = message.replace('/google','')
                    if len(search_term)<1:
                        bot.sendMessage(chat_id=chat_id,text='Use it like: /google what is a bot')
                    else:
                        bot.sendMessage(chat_id=chat_id,text=google(search_term))
                '''Wikipedia search'''
                if '/wiki' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    search_term = message.replace('/wiki ','')
                    if len(search_term)<1:
                        bot.sendMessage(chat_id=chat_id,text='Use it like: /wiki Anaconda')
                    else:
                        reply=wiki(search_term)
                        bot.sendMessage(chat_id=chat_id,text=reply)
                        if ("Cannot acces link!" in reply):
                            reply="No wikipedia article on that but got some google results for you \n"+google(message)
                            bot.sendMessage(chat_id=chat_id,text=reply)

                '''Weather by city,state'''
                if '/weather' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    reply=weather(message)
                    bot.sendMessage(chat_id=chat_id,text=reply)

                '''Github public feed of any user'''
                if '/github' in message or '/gh' in message:
                    bot.sendChatAction(chat_id=chat_id,action=telegram.ChatAction.TYPING)
                    replacer = {'/github':'','/gh':''}
                    username = replace_all(message,replacer)
                    if len(username)<1:
                        bot.sendMessage(chat_id=chat_id,text='Use it like: /github username Ex, /github bhavyanshu or /gh bhavyanshu')
                    else:
                        bot.sendMessage(chat_id=chat_id,text=gitfeed(username))

                '''Giphy to search for gif by keyword'''
                if '/giphy' in message or '/gif' in message:
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    replacer = {'/giphy':'','/gif':''}
                    search_term = replace_all(message,replacer)
                    if len(search_term)<1:
                        bot.sendMessage(chat_id=chat_id,text='Use it like: /giphy keyword ; Ex, /giphy cats or /gif cats')
                    else:
                        img = translate(search_term)
                        print img.fixed_height.downsampled.url
                        bot.sendMessage(chat_id=chat_id,text='Hang in there. Fetching gif..-Powered by GIPHY!')
                        bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.UPLOAD_PHOTO)
                        threadobjgiphy = UploadThread(bot,chat_id,img.fixed_height.downsampled.url.encode('utf-8'))
                        threadobjgiphy.setName('giphythread')
                        threadobjgiphy.start()

                '''Basic calculator'''
                if '/calc' in message:
                    head, sep, tail = message.partition('/')
                    input_nums = tail.replace('calc','')
                    input_nums = input_nums.replace('\'','')
                    finalexp = shlex.split(input_nums)
                    exp = finalexp[0]
                    bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
                    error = 'You think I can compute apple+mongo? Don\'t add alphabet in between please. Use like, /calc 2+2-5(4+8)'
                    if not exp:
                        bot.sendMessage(chat_id=chat_id,text='Y u no type math expression? >.<')
                    elif re.search('[a-zA-Z]', exp):
                        bot.sendMessage(chat_id=chat_id,text=error)
                    else:
                        bot.sendMessage(chat_id=chat_id,text=calculate(exp))

                # Updates global offset to get the new updates
                LAST_UPDATE_ID = update.update_id

Example 72

Project: magic-wormhole Source File: test_scripts.py
    @inlineCallbacks
    def _do_test(self, as_subprocess=False,
                 mode="text", addslash=False, override_filename=False):
        assert mode in ("text", "file", "directory")
        send_cfg = config("send")
        recv_cfg = config("receive")
        message = "blah blah blah ponies"

        for cfg in [send_cfg, recv_cfg]:
            cfg.hide_progress = True
            cfg.relay_url = self.relayurl
            cfg.transit_helper = ""
            cfg.listen = True
            cfg.code = "1-abc"
            cfg.stdout = io.StringIO()
            cfg.stderr = io.StringIO()

        send_dir = self.mktemp()
        os.mkdir(send_dir)
        receive_dir = self.mktemp()
        os.mkdir(receive_dir)

        if mode == "text":
            send_cfg.text = message

        elif mode == "file":
            send_filename = "testfile"
            with open(os.path.join(send_dir, send_filename), "w") as f:
                f.write(message)
            send_cfg.what = send_filename
            receive_filename = send_filename

            recv_cfg.accept_file = True
            if override_filename:
                recv_cfg.output_file = receive_filename = "outfile"

        elif mode == "directory":
            # $send_dir/
            # $send_dir/middle/
            # $send_dir/middle/$dirname/
            # $send_dir/middle/$dirname/[12345]
            # cd $send_dir && wormhole send middle/$dirname
            # cd $receive_dir && wormhole receive
            # expect: $receive_dir/$dirname/[12345]

            send_dirname = "testdir"
            def message(i):
                return "test message %d\n" % i
            os.mkdir(os.path.join(send_dir, "middle"))
            source_dir = os.path.join(send_dir, "middle", send_dirname)
            os.mkdir(source_dir)
            modes = {}
            for i in range(5):
                path = os.path.join(source_dir, str(i))
                with open(path, "w") as f:
                    f.write(message(i))
                if i == 3:
                    os.chmod(path, 0o755)
                modes[i] = stat.S_IMODE(os.stat(path).st_mode)
            send_dirname_arg = os.path.join("middle", send_dirname)
            if addslash:
                send_dirname_arg += os.sep
            send_cfg.what = send_dirname_arg
            receive_dirname = send_dirname

            recv_cfg.accept_file = True
            if override_filename:
                recv_cfg.output_file = receive_dirname = "outdir"

        if as_subprocess:
            wormhole_bin = self.find_executable()
            if send_cfg.text:
                content_args = ['--text', send_cfg.text]
            elif send_cfg.what:
                content_args = [send_cfg.what]

            send_args = [
                    '--relay-url', self.relayurl,
                    '--transit-helper', '',
                    'send',
                    '--hide-progress',
                    '--code', send_cfg.code,
                ] + content_args

            send_d = getProcessOutputAndValue(
                wormhole_bin, send_args,
                path=send_dir,
                env=dict(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8"),
            )
            recv_args = [
                '--relay-url', self.relayurl,
                '--transit-helper', '',
                'receive',
                '--hide-progress',
                '--accept-file',
                recv_cfg.code,
            ]
            if override_filename:
                recv_args.extend(['-o', receive_filename])

            receive_d = getProcessOutputAndValue(
                wormhole_bin, recv_args,
                path=receive_dir,
                env=dict(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8"),
            )

            (send_res, receive_res) = yield gatherResults([send_d, receive_d],
                                                          True)
            send_stdout = send_res[0].decode("utf-8")
            send_stderr = send_res[1].decode("utf-8")
            send_rc = send_res[2]
            receive_stdout = receive_res[0].decode("utf-8")
            receive_stderr = receive_res[1].decode("utf-8")
            receive_rc = receive_res[2]
            NL = os.linesep
            self.assertEqual((send_rc, receive_rc), (0, 0),
                             (send_res, receive_res))
        else:
            send_cfg.cwd = send_dir
            send_d = cmd_send.send(send_cfg)

            recv_cfg.cwd = receive_dir
            receive_d = cmd_receive.receive(recv_cfg)

            # The sender might fail, leaving the receiver hanging, or vice
            # versa. Make sure we don't wait on one side exclusively

            yield gatherResults([send_d, receive_d], True)
            send_stdout = send_cfg.stdout.getvalue()
            send_stderr = send_cfg.stderr.getvalue()
            receive_stdout = recv_cfg.stdout.getvalue()
            receive_stderr = recv_cfg.stderr.getvalue()

            # all output here comes from a StringIO, which uses \n for
            # newlines, even if we're on windows
            NL = "\n"

        self.maxDiff = None # show full output for assertion failures

        self.failUnlessEqual(send_stderr, "",
                             (send_stdout, send_stderr))
        self.failUnlessEqual(receive_stderr, "",
                             (receive_stdout, receive_stderr))

        # check sender
        if mode == "text":
            expected = ("Sending text message ({bytes:d} bytes){NL}"
                        "On the other computer, please run: "
                        "wormhole receive{NL}"
                        "Wormhole code is: {code}{NL}{NL}"
                        "text message sent{NL}").format(bytes=len(message),
                                                        code=send_cfg.code,
                                                        NL=NL)
            self.failUnlessEqual(send_stdout, expected)
        elif mode == "file":
            self.failUnlessIn("Sending {bytes:d} byte file named '{name}'{NL}"
                              .format(bytes=len(message), name=send_filename,
                                      NL=NL), send_stdout)
            self.failUnlessIn("On the other computer, please run: "
                              "wormhole receive{NL}"
                              "Wormhole code is: {code}{NL}{NL}"
                              .format(code=send_cfg.code, NL=NL),
                              send_stdout)
            self.failUnlessIn("File sent.. waiting for confirmation{NL}"
                              "Confirmation received. Transfer complete.{NL}"
                              .format(NL=NL), send_stdout)
        elif mode == "directory":
            self.failUnlessIn("Sending directory", send_stdout)
            self.failUnlessIn("named 'testdir'", send_stdout)
            self.failUnlessIn("On the other computer, please run: "
                              "wormhole receive{NL}"
                              "Wormhole code is: {code}{NL}{NL}"
                              .format(code=send_cfg.code, NL=NL), send_stdout)
            self.failUnlessIn("File sent.. waiting for confirmation{NL}"
                              "Confirmation received. Transfer complete.{NL}"
                              .format(NL=NL), send_stdout)

        # check receiver
        if mode == "text":
            self.failUnlessEqual(receive_stdout, message+NL)
        elif mode == "file":
            self.failUnlessIn("Receiving file ({bytes:d} bytes) into: {name}"
                              .format(bytes=len(message),
                                      name=receive_filename), receive_stdout)
            self.failUnlessIn("Received file written to ", receive_stdout)
            fn = os.path.join(receive_dir, receive_filename)
            self.failUnless(os.path.exists(fn))
            with open(fn, "r") as f:
                self.failUnlessEqual(f.read(), message)
        elif mode == "directory":
            want = (r"Receiving directory \(\d+ bytes\) into: {name}/"
                    .format(name=receive_dirname))
            self.failUnless(re.search(want, receive_stdout),
                            (want, receive_stdout))
            self.failUnlessIn("Received files written to {name}"
                              .format(name=receive_dirname), receive_stdout)
            fn = os.path.join(receive_dir, receive_dirname)
            self.failUnless(os.path.exists(fn), fn)
            for i in range(5):
                fn = os.path.join(receive_dir, receive_dirname, str(i))
                with open(fn, "r") as f:
                    self.failUnlessEqual(f.read(), message(i))
                self.failUnlessEqual(modes[i],
                                     stat.S_IMODE(os.stat(fn).st_mode))

        # check server stats
        self._rendezvous.get_stats()

Example 73

Project: RMG-Py Source File: adfparser.py
Function: extract
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line.find("INPUT FILE") >= 0:
        #check to make sure we aren't parsing Create jobs
            while line:

                self.updateprogress(inputfile, "Unsupported Information", self.fupdate)

                if line.find("INPUT FILE") >=0 and hasattr(self,"scftargets"):
                #does this file contain multiple calculations?
                #if so, print a warning and skip to end of file
                    self.logger.warning("Skipping remaining calculations")
                    inputfile.seek(0,2)
                    break

                if line.find("INPUT FILE") >= 0:
                    line2 = inputfile.next()
                else:
                    line2 = None

                if line2 and len(line2) <= 2:
                #make sure that it's not blank like in the NiCO4 regression
                    line2 = inputfile.next()

                if line2 and (line2.find("Create") < 0 and line2.find("create") < 0):
                    break

                line = inputfile.next()

        if line[1:10] == "Symmetry:":
            info = line.split()
            if info[1] == "NOSYM":
                self.nosymflag = True

        # Use this to read the subspecies of irreducible representations.
        # It will be a list, with each element representing one irrep.
        if line.strip() == "Irreducible Representations, including subspecies":
            dashes = inputfile.next()
            self.irreps = []
            line = inputfile.next()
            while line.strip() != "":
                self.irreps.append(line.split())
                line = inputfile.next()

        if line[4:13] == 'Molecule:':
            info = line.split()
            if info[1] == 'UNrestricted':
                self.unrestrictedflag = True

        if line[1:6] == "ATOMS":
        # Find the number of atoms and their atomic numbers
        # Also extract the starting coordinates (for a GeoOpt anyway)
            self.updateprogress(inputfile, "Attributes", self.cupdate)

            self.atomnos = []
            self.atomcoords = []
            self.coreelectrons = []

            underline = inputfile.next()  #clear pointless lines
            label1 = inputfile.next()     # 
            label2 = inputfile.next()     #
            line = inputfile.next()
            atomcoords = []
            while len(line)>2: #ensure that we are reading no blank lines
                info = line.split()
                element = info[1].split('.')[0]
                self.atomnos.append(self.table.number[element])
                atomcoords.append(map(float, info[2:5]))
                self.coreelectrons.append(int(float(info[5]) - float(info[6])))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)

            self.natom = len(self.atomnos)
            self.atomnos = numpy.array(self.atomnos, "i")

        if line[1:10] == "FRAGMENTS":
            header = inputfile.next()

            self.frags = []
            self.fragnames = []

            line = inputfile.next()
            while len(line) > 2: #ensure that we are reading no blank lines
                info = line.split()

                if len(info) == 7: #fragment name is listed here
                    self.fragnames.append("%s_%s"%(info[1],info[0]))
                    self.frags.append([])
                    self.frags[-1].append(int(info[2]) - 1)

                elif len(info) == 5: #add atoms into last fragment
                    self.frags[-1].append(int(info[0]) - 1)

                line = inputfile.next()

        # Extract charge
        if line[1:11] == "Net Charge":
            self.charge = int(line.split()[2])
            line = inputfile.next()
            if len(line.strip()):
                #  Spin polar: 1 (Spin_A minus Spin_B electrons)
                self.mult = int(line.split()[2]) + 1
                 # (Not sure about this for higher multiplicities)
            else:
                self.mult = 1

        if line[1:22] == "S C F   U P D A T E S":
        # find targets for SCF convergence

            if not hasattr(self,"scftargets"):
                self.scftargets = []

            #underline, blank, nr
            for i in range(3):
                inputfile.next()

            line = inputfile.next()
            self.SCFconv = float(line.split()[-1])
            line = inputfile.next()
            self.sconv2 = float(line.split()[-1])

        if line[1:11] == "CYCLE    1":

            self.updateprogress(inputfile, "QM convergence", self.fupdate)

            newlist = []
            line = inputfile.next()

            if not hasattr(self,"geovalues"):
                # This is the first SCF cycle
                self.scftargets.append([self.sconv2*10, self.sconv2])
            elif self.finalgeometry in [self.GETLAST, self.NOMORE]:
                # This is the final SCF cycle
                self.scftargets.append([self.SCFconv*10, self.SCFconv])
            else:
                # This is an intermediate SCF cycle
                oldscftst = self.scftargets[-1][1]
                grdmax = self.geovalues[-1][1]
                scftst = max(self.SCFconv, min(oldscftst, grdmax/30, 10**(-self.accint)))
                self.scftargets.append([scftst*10, scftst])

            while line.find("SCF CONVERGED") == -1 and line.find("SCF not fully converged, result acceptable") == -1 and line.find("SCF NOT CONVERGED") == -1:
                if line[4:12] == "SCF test":
                    if not hasattr(self, "scfvalues"):
                        self.scfvalues = []

                    info = line.split()
                    newlist.append([float(info[4]), abs(float(info[6]))])
                try:
                    line = inputfile.next()
                except StopIteration: #EOF reached?
                    self.logger.warning("SCF did not converge, so attributes may be missing")
                    break            

            if line.find("SCF not fully converged, result acceptable") > 0:
                self.logger.warning("SCF not fully converged, results acceptable")

            if line.find("SCF NOT CONVERGED") > 0:
                self.logger.warning("SCF did not converge! moenergies and mocoeffs are unreliable")

            if hasattr(self, "scfvalues"):
                self.scfvalues.append(newlist)

        # Parse SCF energy for SP calcs from bonding energy decomposition section.
        # It seems ADF does not print it earlier for SP calcualtions.
        # If it does (does it?), parse that instead.
        # Check that scfenergies does not exist, becuase gopt runs also print this,
        #   repeating the values in the last "Geometry Convergence Tests" section.
        if "Total Bonding Energy:" in line:
            if not hasattr(self, "scfenergies"):
                energy = utils.convertor(float(line.split()[3]), "hartree", "eV")
                self.scfenergies = [energy]            

        if line[51:65] == "Final Geometry":
            self.finalgeometry = self.GETLAST

        if line[1:24] == "Coordinates (Cartesian)" and self.finalgeometry in [self.NOTFOUND, self.GETLAST]:
            # Get the coordinates from each step of the GeoOpt
            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
            equals = inputfile.next()
            blank = inputfile.next()
            title = inputfile.next()
            title = inputfile.next()
            hyphens = inputfile.next()

            atomcoords = []
            line = inputfile.next()
            while line != hyphens:
                atomcoords.append(map(float, line.split()[5:8]))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)
            if self.finalgeometry == self.GETLAST: # Don't get any more coordinates
                self.finalgeometry = self.NOMORE

        if line[1:27] == 'Geometry Convergence Tests':
        # Extract Geometry convergence information
            if not hasattr(self, "geotargets"):
                self.geovalues = []
                self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            equals = inputfile.next()
            blank = inputfile.next()
            line = inputfile.next()
            temp = inputfile.next().strip().split()
            self.scfenergies.append(utils.convertor(float(temp[-1]), "hartree", "eV"))
            for i in range(6):
                line = inputfile.next()
            values = []
            for i in range(5):
                temp = inputfile.next().split()
                self.geotargets[i] = float(temp[-3])
                values.append(float(temp[-4]))
            self.geovalues.append(values)

        if line[1:27] == 'General Accuracy Parameter':
            # Need to know the accuracy of the integration grid to
            # calculate the scftarget...note that it changes with time
            self.accint = float(line.split()[-1])

        if line.find('Orbital Energies, per Irrep and Spin') > 0 and not hasattr(self, "mosyms") and self.nosymflag and not self.unrestrictedflag:
        #Extracting orbital symmetries and energies, humos for nosym case
        #Should only be for restricted case because there is a better text block for unrestricted and nosym

            self.mosyms = [[]]

            self.moenergies = [[]]

            underline = inputfile.next()
            header = inputfile.next()
            underline = inputfile.next()
            label = inputfile.next()
            line = inputfile.next()

            info = line.split()

            if not info[0] == '1':
                self.logger.warning("MO info up to #%s is missing" % info[0])

            #handle case where MO information up to a certain orbital are missing
            while int(info[0]) - 1 != len(self.moenergies[0]):
                self.moenergies[0].append(99999)
                self.mosyms[0].append('A')

            humoA = None

            while len(line) > 10:
                info = line.split()
                self.mosyms[0].append('A')
                self.moenergies[0].append(utils.convertor(float(info[2]), 'hartree', 'eV'))
                if info[1] == '0.000' and not hasattr(self, 'humos'):
                    self.humos = [len(self.moenergies[0]) - 2]
                line = inputfile.next()

            self.moenergies = [numpy.array(self.moenergies[0], "d")]
            self.humos = numpy.array(self.humos, "i")

        if line[1:29] == 'Orbital Energies, both Spins' and not hasattr(self, "mosyms") and self.nosymflag and self.unrestrictedflag:
        #Extracting orbital symmetries and energies, humos for nosym case
        #should only be here if unrestricted and nosym

            self.mosyms = [[], []]

            moenergies = [[], []]

            underline = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            underline = inputfile.next()
            line = inputfile.next()

            humoa = 0
            humob = None

            while len(line) > 5:
                info = line.split()
                if info[2] == 'A': 
                    self.mosyms[0].append('A')
                    moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
                    if info[3] != '0.00':
                        humoa = len(moenergies[0]) - 1
                elif info[2] == 'B':
                    self.mosyms[1].append('A')
                    moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
                    if info[3] != '0.00':
                        humob = len(moenergies[1]) - 1
                else:
                    print "Error reading line: %s" % line

                line = inputfile.next()

            self.moenergies = [numpy.array(x, "d") for x in moenergies]
            self.humos = numpy.array([humoa, humob], "i")


        if line[1:29] == 'Orbital Energies, all Irreps' and not hasattr(self, "mosyms"):
        #Extracting orbital symmetries and energies, humos
            self.mosyms = [[]]
            self.symlist = {}

            self.moenergies = [[]]

            underline = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            underline2 = inputfile.next()
            line = inputfile.next()

            humoa = None
            humob = None

            #multiple = {'E':2, 'T':3, 'P':3, 'D':5}
            # The above is set if there are no special irreps
            names = [irrep[0].split(':')[0] for irrep in self.irreps]
            counts = [len(irrep) for irrep in self.irreps]
            multiple = dict(zip(names, counts))
            irrepspecies = {}
            for n in range(len(names)):
                indices = range(counts[n])
                subspecies = self.irreps[n]
                irrepspecies[names[n]] = dict(zip(indices, subspecies))

            while line.strip():
                info = line.split()
                if len(info) == 5: #this is restricted
                    #count = multiple.get(info[0][0],1)
                    count = multiple.get(info[0],1)
                    for repeat in range(count): # i.e. add E's twice, T's thrice
                        self.mosyms[0].append(self.normalisesym(info[0]))
                        self.moenergies[0].append(utils.convertor(float(info[3]), 'hartree', 'eV'))

                        sym = info[0]
                        if count > 1: # add additional sym label
                            sym = self.normalisedegenerates(info[0],repeat,ndict=irrepspecies)

                        try:
                            self.symlist[sym][0].append(len(self.moenergies[0])-1)
                        except KeyError:
                            self.symlist[sym]=[[]]
                            self.symlist[sym][0].append(len(self.moenergies[0])-1)

                    if info[2] == '0.00' and not hasattr(self, 'humos'):
                        self.humos = [len(self.moenergies[0]) - (count + 1)] #count, because need to handle degenerate cases
                    line = inputfile.next()
                elif len(info) == 6: #this is unrestricted
                    if len(self.moenergies) < 2: #if we don't have space, create it
                        self.moenergies.append([])
                        self.mosyms.append([])
#                    count = multiple.get(info[0][0], 1)
                    count = multiple.get(info[0], 1)
                    if info[2] == 'A':
                        for repeat in range(count): # i.e. add E's twice, T's thrice
                            self.mosyms[0].append(self.normalisesym(info[0]))
                            self.moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))

                            sym = info[0]
                            if count > 1: #add additional sym label
                                sym = self.normalisedegenerates(info[0],repeat)

                            try:
                                self.symlist[sym][0].append(len(self.moenergies[0])-1)
                            except KeyError:
                                self.symlist[sym]=[[],[]]
                                self.symlist[sym][0].append(len(self.moenergies[0])-1)

                        if info[3] == '0.00' and humoa == None:
                            humoa = len(self.moenergies[0]) - (count + 1) #count because degenerate cases need to be handled

                    if info[2] == 'B':
                        for repeat in range(count): # i.e. add E's twice, T's thrice
                            self.mosyms[1].append(self.normalisesym(info[0]))
                            self.moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))

                            sym = info[0]
                            if count > 1: #add additional sym label
                                sym = self.normalisedegenerates(info[0],repeat)

                            try:
                                self.symlist[sym][1].append(len(self.moenergies[1])-1)
                            except KeyError:
                                self.symlist[sym]=[[],[]]
                                self.symlist[sym][1].append(len(self.moenergies[1])-1)

                        if info[3] == '0.00' and humob == None:
                            humob = len(self.moenergies[1]) - (count + 1)

                    line = inputfile.next()

                else: #different number of lines
                    print "Error", info

            if len(info) == 6: #still unrestricted, despite being out of loop
                self.humos = [humoa, humob]

            self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
            self.humos = numpy.array(self.humos, "i")

        if line[1:28] == "Vibrations and Normal Modes":
            # Section on extracting vibdisps
            # Also contains vibfreqs, but these are extracted in the
            # following section (see below)
            self.vibdisps = []
            equals = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            header = inputfile.next()
            blank = inputfile.next()
            blank = inputfile.next()

            freqs = inputfile.next()
            while freqs.strip()!="":
                minus = inputfile.next()
                p = [ [], [], [] ]
                for i in range(len(self.atomnos)):
                    broken = map(float, inputfile.next().split()[1:])
                    for j in range(0, len(broken), 3):
                        p[j/3].append(broken[j:j+3])
                self.vibdisps.extend(p[:(len(broken)/3)])
                blank = inputfile.next()
                blank = inputfile.next()
                freqs = inputfile.next()
            self.vibdisps = numpy.array(self.vibdisps, "d")

        if line[1:24] == "List of All Frequencies":
        # Start of the IR/Raman frequency section
            self.updateprogress(inputfile, "Frequency information", self.fupdate)

        #                 self.vibsyms = [] # Need to look into this a bit more
            self.vibirs = []
            self.vibfreqs = []
            for i in range(8):
                line = inputfile.next()
            line = inputfile.next().strip()
            while line:
                temp = line.split()
                self.vibfreqs.append(float(temp[0]))                    
                self.vibirs.append(float(temp[2])) # or is it temp[1]?
                line = inputfile.next().strip()
            self.vibfreqs = numpy.array(self.vibfreqs, "d")
            self.vibirs = numpy.array(self.vibirs, "d")
            if hasattr(self, "vibramans"):
                self.vibramans = numpy.array(self.vibramans, "d")


        #cuem**************************************************************************************************************8
        #delete this after new implementation using smat, eigvec print,eprint?
        if line[1:49] == "Total nr. of (C)SFOs (summation over all irreps)":
        # Extract the number of basis sets
            self.nbasis = int(line.split(":")[1].split()[0])

        # now that we're here, let's extract aonames

            self.fonames = []
            self.start_indeces = {}

            blank = inputfile.next()
            note = inputfile.next()
            symoffset = 0

            blank = inputfile.next() 
            blank = inputfile.next()
            if len(blank) > 2: #fix for ADF2006.01 as it has another note
                blank = inputfile.next()
                blank = inputfile.next()
            blank = inputfile.next()

            self.nosymreps = []
            while len(self.fonames) < self.nbasis:

                symline = inputfile.next()
                sym = symline.split()[1]
                line = inputfile.next()
                num = int(line.split(':')[1].split()[0])
                self.nosymreps.append(num)

                #read until line "--------..." is found
                while line.find('-----') < 0:
                    line = inputfile.next()

                line = inputfile.next() # the start of the first SFO

                while len(self.fonames) < symoffset + num:
                    info = line.split()

                    #index0 index1 occ2 energy3/4 fragname5 coeff6 orbnum7 orbname8 fragname9
                    if not sym in self.start_indeces.keys():
                    #have we already set the start index for this symmetry?
                        self.start_indeces[sym] = int(info[1])

                    orbname = info[8]
                    orbital = info[7] + orbname.replace(":", "")

                    fragname = info[5]
                    frag = fragname + info[9]

                    coeff = float(info[6])

                    line = inputfile.next()
                    while line.strip() and not line[:7].strip(): # while it's the same SFO
                        # i.e. while not completely blank, but blank at the start
                        info = line[43:].split()
                        if len(info)>0: # len(info)==0 for the second line of dvb_ir.adfout
                            frag += "+" + fragname + info[-1]
                            coeff = float(info[-4])
                            if coeff < 0:
                                orbital += '-' + info[-3] + info[-2].replace(":", "")
                            else:
                                orbital += '+' + info[-3] + info[-2].replace(":", "")
                        line = inputfile.next()
                    # At this point, we are either at the start of the next SFO or at
                    # a blank line...the end

                    self.fonames.append("%s_%s" % (frag, orbital))
                symoffset += num

                # blankline blankline
                inputfile.next(); inputfile.next()

        if line[1:32] == "S F O   P O P U L A T I O N S ,":
        #Extract overlap matrix

            self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")

            symoffset = 0

            for nosymrep in self.nosymreps:

                line = inputfile.next()
                while line.find('===') < 10: #look for the symmetry labels
                    line = inputfile.next()
                #blank blank text blank col row
                for i in range(6):
                    inputfile.next()

                base = 0
                while base < nosymrep: #have we read all the columns?

                    for i in range(nosymrep - base):

                        self.updateprogress(inputfile, "Overlap", self.fupdate)
                        line = inputfile.next()
                        parts = line.split()[1:]
                        for j in range(len(parts)):
                            k = float(parts[j])
                            self.fooverlaps[base + symoffset + j, base + symoffset +i] = k
                            self.fooverlaps[base + symoffset + i, base + symoffset + j] = k

                    #blank, blank, column
                    for i in range(3):
                        inputfile.next()

                    base += 4

                symoffset += nosymrep
                base = 0

# The commented code below makes the atombasis attribute based on the BAS function in ADF,
#   but this is probably not so useful, since SFOs are used to build MOs in ADF.
#        if line[1:54] == "BAS: List of all Elementary Cartesian Basis Functions":
#
#            self.atombasis = []
#
#            # There will be some text, followed by a line:
#            #       (power of) X  Y  Z  R     Alpha  on Atom
#            while not line[1:11] == "(power of)":
#                line = inputfile.next()
#            dashes = inputfile.next()
#            blank = inputfile.next()
#            line = inputfile.next()
#            # There will be two blank lines when there are no more atom types.
#            while line.strip() != "":
#                atoms = [int(i)-1 for i in line.split()[1:]]
#                for n in range(len(atoms)):
#                    self.atombasis.append([])
#                dashes = inputfile.next()
#                line = inputfile.next()
#                while line.strip() != "":
#                    indices = [int(i)-1 for i in line.split()[5:]]
#                    for i in range(len(indices)):
#                        self.atombasis[atoms[i]].append(indices[i])
#                    line = inputfile.next()
#                line = inputfile.next()

        if line[48:67] == "SFO MO coefficients":

            self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d")]
            spin = 0
            symoffset = 0
            lastrow = 0

            # Section ends with "1" at beggining of a line.
            while line[0] != "1":
                line = inputfile.next()

                # If spin is specified, then there will be two coefficient matrices. 
                if line.strip() == "***** SPIN 1 *****":
                    self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d"),
                                     numpy.zeros((self.nbasis, self.nbasis), "d")]

                # Bump up the spin.
                if line.strip() == "***** SPIN 2 *****":
                    spin = 1
                    symoffset = 0
                    lastrow = 0

                # Next symmetry.
                if line.strip()[:4] == "=== ":
                    sym = line.split()[1]
                    if self.nosymflag:
                        aolist = range(self.nbasis)
                    else:
                        aolist = self.symlist[sym][spin]
                    # Add to the symmetry offset of AO ordering.
                    symoffset += lastrow

                # Blocks with coefficient always start with "MOs :".
                if line[1:6] == "MOs :":
                    # Next line has the MO index contributed to.
                    monumbers = [int(n) for n in line[6:].split()]
                    occup = inputfile.next()
                    label = inputfile.next()
                    line = inputfile.next()
                    # The table can end with a blank line or "1".
                    row = 0
                    while not line.strip() in ["", "1"]:
                        info = line.split()

                        if int(info[0]) < self.start_indeces[sym]:
                        #check to make sure we aren't parsing CFs
                            line = inputfile.next()
                            continue

                        self.updateprogress(inputfile, "Coefficients", self.fupdate)
                        row += 1
                        coeffs = [float(x) for x in info[1:]]
                        moindices = [aolist[n-1] for n in monumbers]
                        # The AO index is 1 less than the row.
                        aoindex = symoffset + row - 1
                        for i in range(len(monumbers)):
                            self.mocoeffs[spin][moindices[i],aoindex] = coeffs[i]
                        line = inputfile.next()
                    lastrow = row

        if line[4:53] == "Final excitation energies from Davidson algorithm":

            # move forward in file past some various algorthm info

            # *   Final excitation energies from Davidson algorithm                    *
            # *                                                                        *
            # **************************************************************************

            #     Number of loops in Davidson routine     =   20                    
            #     Number of matrix-vector multiplications =   24                    
            #     Type of excitations = SINGLET-SINGLET 

            inputfile.next(); inputfile.next(); inputfile.next()
            inputfile.next(); inputfile.next(); inputfile.next()
            inputfile.next(); inputfile.next()

            symm = self.normalisesym(inputfile.next().split()[1])

            # move forward in file past some more txt and header info

            # Excitation energies E in a.u. and eV, dE wrt prev. cycle,
            # oscillator strengths f in a.u.

            # no.  E/a.u.        E/eV      f           dE/a.u.
            # -----------------------------------------------------

            inputfile.next(); inputfile.next(); inputfile.next()
            inputfile.next(); inputfile.next(); inputfile.next()

            # now start parsing etenergies and etoscs

            etenergies = []
            etoscs = []
            etsyms = []

            line = inputfile.next()
            while len(line) > 2:
                info = line.split()
                etenergies.append(utils.convertor(float(info[2]), "eV", "cm-1"))
                etoscs.append(float(info[3]))
                etsyms.append(symm)
                line = inputfile.next()

            # move past next section
            while line[1:53] != "Major MO -> MO transitions for the above excitations":
                line = inputfile.next()

            # move past headers

            #  Excitation  Occupied to virtual  Contribution                         
            #   Nr.          orbitals           weight        contribibutions to      
            #                                   (sum=1) transition dipole moment   
            #                                             x       y       z       

            inputfile.next(), inputfile.next(), inputfile.next()
            inputfile.next(), inputfile.next(), inputfile.next()

            # before we start handeling transitions, we need
            # to create mosyms with indices
            # only restricted calcs are possible in ADF

            counts = {}
            syms = []
            for mosym in self.mosyms[0]:
                if counts.keys().count(mosym) == 0:
                    counts[mosym] = 1
                else:
                    counts[mosym] += 1

                syms.append(str(counts[mosym]) + mosym)

            import re
            etsecs = []
            printed_warning = False 

            for i in range(len(etenergies)):
                etsec = []
                line = inputfile.next()
                info = line.split()
                while len(info) > 0:

                    match = re.search('[^0-9]', info[1])
                    index1 = int(info[1][:match.start(0)])
                    text = info[1][match.start(0):]
                    symtext = text[0].upper() + text[1:]
                    sym1 = str(index1) + self.normalisesym(symtext)

                    match = re.search('[^0-9]', info[3])
                    index2 = int(info[3][:match.start(0)])
                    text = info[3][match.start(0):]
                    symtext = text[0].upper() + text[1:]
                    sym2 = str(index2) + self.normalisesym(symtext)

                    try:
                        index1 = syms.index(sym1)
                    except ValueError:
                        if not printed_warning:
                            self.logger.warning("Etsecs are not accurate!")
                            printed_warning = True

                    try:
                        index2 = syms.index(sym2)
                    except ValueError:
                        if not printed_warning:
                            self.logger.warning("Etsecs are not accurate!")
                            printed_warning = True

                    etsec.append([(index1, 0), (index2, 0), float(info[4])])

                    line = inputfile.next()
                    info = line.split()

                etsecs.append(etsec)


            if not hasattr(self, "etenergies"):
                self.etenergies = etenergies
            else:
                self.etenergies += etenergies

            if not hasattr(self, "etoscs"):
                self.etoscs = etoscs
            else:
                self.etoscs += etoscs

            if not hasattr(self, "etsyms"):
                self.etsyms = etsyms
            else:
                self.etsyms += etsyms

            if not hasattr(self, "etsecs"):
                self.etsecs = etsecs
            else:
                self.etsecs += etsecs

Example 74

Project: pywikibot-core Source File: solve_disambiguation.py
    def treat(self, refPage, disambPage):
        """Treat a page.

        @param disambPage: the disambiguation page or redirect we don't want
            anything to link to
        @type disambPage: pywikibot.Page
        @param refPage: a page linking to disambPage
        @type refPage: pywikibot.Page
        @return: False if the user pressed q to completely quit the program,
            True otherwise
        @rtype: bool

        """
        # TODO: break this function up into subroutines!

        self.current_page = refPage
        include = False
        unlink_counter = 0
        new_targets = []
        try:
            text = refPage.get()
            ignoreReason = self.checkContents(text)
            if ignoreReason:
                pywikibot.output('\n\nSkipping %s because it contains %s.\n\n'
                                 % (refPage.title(), ignoreReason))
            else:
                include = True
        except pywikibot.IsRedirectPage:
            pywikibot.output(u'%s is a redirect to %s'
                             % (refPage.title(), disambPage.title()))
            if disambPage.isRedirectPage():
                target = self.alternatives[0]
                if pywikibot.input_yn(u'Do you want to make redirect %s point '
                                      'to %s?' % (refPage.title(), target),
                                      default=False, automatic_quit=False):
                    redir_text = '#%s [[%s]]' \
                                 % (self.mysite.redirect(), target)
                    try:
                        refPage.put_async(redir_text, summary=self.comment)
                    except pywikibot.PageNotSaved as error:
                        pywikibot.output(u'Page not saved: %s' % error.args)
            else:
                choice = pywikibot.input_choice(
                    u'Do you want to work on pages linking to %s?'
                    % refPage.title(),
                    [('yes', 'y'), ('no', 'n'), ('change redirect', 'c')], 'n',
                    automatic_quit=False)
                if choice == 'y':
                    gen = ReferringPageGeneratorWithIgnore(
                        refPage, self.primary, main_only=self.main_only
                    )
                    preloadingGen = pagegenerators.PreloadingGenerator(gen)
                    for refPage2 in preloadingGen:
                        # run until the user selected 'quit'
                        if not self.treat(refPage2, refPage):
                            break
                elif choice == 'c':
                    text = refPage.get(get_redirect=True)
                    include = "redirect"
        except pywikibot.NoPage:
            pywikibot.output(
                u'Page [[%s]] does not seem to exist?! Skipping.'
                % refPage.title())
            include = False
        if include in (True, "redirect"):
            # make a backup of the original text so we can show the changes later
            original_text = text
            n = 0
            curpos = 0
            dn = False
            edited = False
            # This loop will run until we have finished the current page
            while True:
                m = self.linkR.search(text, pos=curpos)
                if not m:
                    if n == 0:
                        pywikibot.output(u"No changes necessary in %s"
                                         % refPage.title())
                        return True
                    else:
                        # stop loop and save page
                        break
                # Make sure that next time around we will not find this same hit.
                curpos = m.start() + 1
                try:
                    foundlink = pywikibot.Link(m.group('title'),
                                               disambPage.site)
                    foundlink.parse()
                except pywikibot.Error:
                    continue
                # ignore interwiki links
                if foundlink.site != disambPage.site:
                    continue
                # Check whether the link found is to disambPage.
                try:
                    if foundlink.canonical_title() != disambPage.title():
                        continue
                except pywikibot.Error:
                    # must be a broken link
                    pywikibot.log(u"Invalid link [[%s]] in page [[%s]]"
                                  % (m.group('title'), refPage.title()))
                    continue
                n += 1
                # how many bytes should be displayed around the current link
                context = 60
                # check if there's a dn-template here already
                if (self.dnSkip and self.dn_template_str and
                        self.dn_template_str[:-2] in text[m.end():m.end() +
                                                          len(self.dn_template_str) + 8]):
                    continue

                edit = EditOption('edit page', 'e', text, m.start(), disambPage.title())
                context_option = HighlightContextOption(
                    'more context', 'm', text, 60, start=m.start(), end=m.end())
                context_option.before_question = True

                options = [ListOption(self.alternatives, ''),
                           ListOption(self.alternatives, 'r'),
                           StandardOption('skip link', 's'),
                           edit,
                           StandardOption('next page', 'n'),
                           StandardOption('unlink', 'u')]
                if self.dn_template_str:
                    # '?', '/' for old choice
                    options += [AliasOption('tag template %s' % self.dn_template_str,
                                            ['t', '?', '/'])]
                options += [context_option]
                if not edited:
                    options += [ShowPageOption('show disambiguation page', 'd',
                                               m.start(), disambPage)]
                options += [
                    OutputProxyOption('list', 'l',
                                      SequenceOutputter(self.alternatives)),
                    AddAlternativeOption('add new', 'a',
                                         SequenceOutputter(self.alternatives))]
                if edited:
                    options += [StandardOption('save in this form', 'x')]

                # TODO: Output context on each question
                answer = pywikibot.input_choice('Option', options,
                                                default=self.always)
                if answer == 'x':
                    assert edited, 'invalid option before editing'
                    break
                elif answer == 's':
                    n -= 1  # TODO what's this for?
                    continue
                elif answer == 'e':
                    text = edit.new_text
                    edited = True
                    curpos = 0
                    continue
                elif answer == 'n':
                    # skip this page
                    if self.primary:
                        # If run with the -primary argument, skip this
                        # occurrence next time.
                        self.primaryIgnoreManager.ignore(refPage)
                    return True

                # The link looks like this:
                # [[page_title|link_text]]trailing_chars
                page_title = m.group('title')
                link_text = m.group('label')

                if not link_text:
                    # or like this: [[page_title]]trailing_chars
                    link_text = page_title
                if m.group('section') is None:
                    section = ''
                else:
                    section = m.group('section')
                trailing_chars = m.group('linktrail')
                if trailing_chars:
                    link_text += trailing_chars
                if answer == 't':
                    assert self.dn_template_str
                    # small chunk of text to search
                    search_text = text[m.end():m.end() + context]
                    # figure out where the link (and sentance) ends, put note
                    # there
                    end_of_word_match = re.search(r'\s', search_text)
                    if end_of_word_match:
                        position_split = end_of_word_match.start(0)
                    else:
                        position_split = 0
                    # insert dab needed template
                    text = (text[:m.end() + position_split] +
                            self.dn_template_str +
                            text[m.end() + position_split:])
                    dn = True
                    continue
                elif answer == 'u':
                    # unlink - we remove the section if there's any
                    text = text[:m.start()] + link_text + text[m.end():]
                    unlink_counter += 1
                    continue
                else:
                    # Check that no option from above was missed
                    assert isinstance(answer, tuple), 'only tuple answer left.'
                    assert answer[0] in ['r', ''], 'only valid tuple answers.'
                    if answer[0] == 'r':
                        # we want to throw away the original link text
                        replaceit = link_text == page_title
                    elif include == "redirect":
                        replaceit = True
                    else:
                        replaceit = False

                    new_page_title = answer[1]
                    repPl = pywikibot.Page(pywikibot.Link(new_page_title,
                                                          disambPage.site))
                    if (new_page_title[0].isupper() or
                            link_text[0].isupper()):
                        new_page_title = repPl.title()
                    else:
                        new_page_title = repPl.title()
                        new_page_title = first_lower(new_page_title)
                    if new_page_title not in new_targets:
                        new_targets.append(new_page_title)
                    if replaceit and trailing_chars:
                        newlink = "[[%s%s]]%s" % (new_page_title,
                                                  section,
                                                  trailing_chars)
                    elif replaceit or (new_page_title == link_text and
                                       not section):
                        newlink = "[[%s]]" % new_page_title
                    # check if we can create a link with trailing characters
                    # instead of a pipelink
                    elif (
                        (len(new_page_title) <= len(link_text)) and
                        (firstcap(link_text[:len(new_page_title)]) == firstcap(new_page_title)) and
                        (re.sub(self.trailR, '', link_text[len(new_page_title):]) == '') and
                        (not section)
                    ):
                        newlink = "[[%s]]%s" \
                                  % (link_text[:len(new_page_title)],
                                     link_text[len(new_page_title):])
                    else:
                        newlink = "[[%s%s|%s]]" \
                                  % (new_page_title, section, link_text)
                    text = text[:m.start()] + newlink + text[m.end():]
                    continue

                pywikibot.output(text[max(0, m.start() - 30):m.end() + 30])
            if text == original_text:
                pywikibot.output(u'\nNo changes have been made:\n')
            else:
                pywikibot.output(u'\nThe following changes have been made:\n')
                pywikibot.showDiff(original_text, text)
                pywikibot.output(u'')
                # save the page
                self.setSummaryMessage(disambPage, new_targets, unlink_counter,
                                       dn)
                try:
                    refPage.put_async(text, summary=self.comment)
                except pywikibot.LockedPage:
                    pywikibot.output(u'Page not saved: page is locked')
                except pywikibot.PageNotSaved as error:
                    pywikibot.output(u'Page not saved: %s' % error.args)
        return True

Example 75

Project: py-zfec Source File: scriptsetup.py
def do_scriptsetup(allusers=False):
    print "\nSetting up environment to run scripts for %s..." % (allusers and "all users" or "the current user")

    from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_CLASSES_ROOT, \
        REG_SZ, REG_EXPAND_SZ, KEY_QUERY_VALUE, KEY_SET_VALUE, \
        OpenKey, CreateKey, QueryValueEx, SetValueEx, FlushKey, CloseKey

    USER_ENV = "Environment"
    try:
        user_env = OpenKey(HKEY_CURRENT_USER, USER_ENV, 0, KEY_QUERY_VALUE)
    except WindowsError, e:
        raise DistutilsSetupError("I could not read the user environment from the registry.\n%r" % (e,))

    SYSTEM_ENV = "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment"
    try:
        system_env = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_ENV, 0, KEY_QUERY_VALUE)
    except WindowsError, e:
        raise DistutilsSetupError("I could not read the system environment from the registry.\n%r" % (e,))


    # HKEY_CLASSES_ROOT is a merged view that would only confuse us.
    # <http://technet.microsoft.com/en-us/library/cc739822(WS.10).aspx>

    USER_CLASSES = "SOFTWARE\\Classes"
    try:
        user_classes = OpenKey(HKEY_CURRENT_USER, USER_CLASSES, 0, KEY_QUERY_VALUE)
    except WindowsError, e:
        raise DistutilsSetupError("I could not read the user filetype associations from the registry.\n%r" % (e,))

    SYSTEM_CLASSES = "SOFTWARE\\Classes"
    try:
        system_classes = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_CLASSES, 0, KEY_QUERY_VALUE)
    except WindowsError, e:
        raise DistutilsSetupError("I could not read the system filetype associations from the registry.\n%r" % (e,))


    def query(key, subkey, what):
        try:
            (value, type) = QueryValueEx(key, subkey)
        except WindowsError, e:
            if e.winerror == 2:  # not found
                return None
            raise DistutilsSetupError("I could not read %s from the registry.\n%r" % (what, e))

        # It does not matter that we don't expand environment strings, in fact it's better not to.

        if type != REG_SZ and type != REG_EXPAND_SZ:
            raise DistutilsSetupError("I expected the registry entry for %s to have a string type (REG_SZ or REG_EXPAND_SZ), "
                                      "and was flummoxed by it having type code %r." % (what, type))
        return (value, type)


    def open_and_query(key, path, subkey, what):
        try:
            read_key = OpenKey(key, path, 0, KEY_QUERY_VALUE)
        except WindowsError, e:
            if e.winerror == 2:  # not found
                return None
            raise DistutilsSetupError("I could not read %s from the registry because I could not open "
                                      "the parent key.\n%r" % (what, e))

        try:
            return query(read_key, subkey, what)
        finally:
            CloseKey(read_key)


    def update(key_name_path, subkey, desired_value, desired_type, goal, what):
        (key, name, path) = key_name_path

        (old_value, old_type) = open_and_query(key, path, subkey, what) or (None, None)
        if (old_value, old_type) == (desired_value, desired_type):
            print "Already done: %s." % (goal,)
            return False

        try:
            update_key = OpenKey(key, path, 0, KEY_SET_VALUE|KEY_QUERY_VALUE)
        except WindowsError, e:
            if e.winerror != 2:
                raise DistutilsSetupError("I tried to %s, but was not successful because I could not open "
                                          "the registry key %s\\%s for writing.\n%r"
                                          % (goal, name, path, e))
            try:
                update_key = CreateKey(key, path)
            except WindowsError, e:
                raise DistutilsSetupError("I tried to %s, but was not successful because the registry key %s\\%s "
                                          "did not exist, and I was unable to create it.\n%r"
                                          % (goal, name, path, e))

        (new_value, new_type) = (None, None)
        try:
            SetValueEx(update_key, subkey, 0, desired_type, desired_value)
        except WindowsError, e:
            raise DistutilsSetupError("I tried to %s, but was not able to set the subkey %r under %s\\%s to be %r.\n%r"
                                      % (goal, subkey, name, path, desired_value))
        else:
            (new_value, new_type) = query(update_key, subkey, what) or (None, None)
        finally:
            FlushKey(update_key)
            CloseKey(update_key)

        if (new_value, new_type) != (desired_value, desired_type):
            raise DistutilsSetupError("I tried to %s by setting the subkey %r under %s\\%s to be %r, "
                                      "and the call to SetValueEx succeeded, but the value ended up as "
                                      "%r instead (it was previously %r). Maybe the update was unexpectedly virtualized?"
                                      % (goal, subkey, name, path, desired_value, new_value, old_value))

        print "Done: %s." % (goal,)
        return True


    # Maintenance hazard: 'add_to_environment' and 'associate' use very similar, but not identical logic.

    def add_to_environment(varname, addition, change_allusers):
        changed = False
        what = "the %s environment variable %s" % (change_allusers and "system" or "user", varname)
        goal = "add %s to %s" % (addition, what)

        system_valueandtype = query(system_env, varname, "the system environment variable %s" % (varname,))
        user_valueandtype   = query(user_env,   varname, "the user environment variable %s" % (varname,))

        if change_allusers:
            (value, type) = system_valueandtype or (u'', REG_SZ)
            key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", SYSTEM_ENV)
        else:
            (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ)
            key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV)

        if addition.lower() in value.lower().split(u';'):
            print "Already done: %s." % (goal,)
        else:
            changed |= update(key_name_path, varname, value + u';' + addition, type, goal, what)

        if change_allusers:
            # Also change any overriding environment entry for the current user.
            (user_value, user_type) = user_valueandtype or (u'', REG_SZ)
            split_value = user_value.lower().split(u';')

            if not (addition.lower() in split_value or u'%'+varname.lower()+u'%' in split_value):
                now_what = "the overriding user environment variable %s" % (varname,)
                changed |= update((HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV),
                                  varname, user_value + u';' + addition, user_type,
                                  "add %s to %s" % (addition, now_what), now_what)

        return changed


    def associate(ext, target, change_allusers):
        changed = False
        what = "the %s association for %s" % (change_allusers and "system" or "user", ext)
        goal = "associate the filetype %s with %s for %s" % (ext, target, change_allusers and "all users" or "the current user")

        try:
            if change_allusers:
                target_key = OpenKey(HKEY_LOCAL_MACHINE, "%s\\%s" % (SYSTEM_CLASSES, target), 0, KEY_QUERY_VALUE)
            else:
                target_key = OpenKey(HKEY_CLASSES_ROOT, target, 0, KEY_QUERY_VALUE)
        except WindowsError, e:
            raise DistutilsSetupError("I was going to %s, but that won't work because the %s class does not exist in the registry, "
                                      "as far as I can tell.\n%r" % (goal, target, e))
        CloseKey(target_key)

        system_key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", "%s\\%s" % (SYSTEM_CLASSES, ext))
        user_key_name_path   = (HKEY_CURRENT_USER,  "HKEY_CURRENT_USER",  "%s\\%s" % (USER_CLASSES,   ext))

        system_valueandtype = open_and_query(system_classes, ext, "", "the system association for %s" % (ext,))
        user_valueandtype   = open_and_query(user_classes,   ext, "", "the user association for %s" % (ext,))

        if change_allusers:
            (value, type) = system_valueandtype or (u'', REG_SZ)
            key_name_path = system_key_name_path
        else:
            (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ)
            key_name_path = user_key_name_path

        if value == target:
            print "Already done: %s." % (goal,)
        else:
            changed |= update(key_name_path, "", unicode(target), REG_SZ, goal, what)

        if change_allusers:
            # Also change any overriding association for the current user.
            (user_value, user_type) = user_valueandtype or (u'', REG_SZ)

            if user_value != target:
                changed |= update(user_key_name_path, "", unicode(target), REG_SZ,
                                  "associate the filetype %s with %s for the current user " \
                                      "(because the system association is overridden)" % (ext, target),
                                  "the overriding user association for %s" % (ext,))

        return changed


    def broadcast_settingchange(change_allusers):
        print "Broadcasting that the environment has changed, please wait..."

        # <http://support.microsoft.com/kb/104011/en-us>
        # <http://msdn.microsoft.com/en-us/library/ms644952(VS.85).aspx>
        # LRESULT WINAPI SendMessageTimeoutW(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam,
        #                                    UINT fuFlags, UINT uTimeout, PDWORD_PTR lpdwResult);

        try:
            from ctypes import WINFUNCTYPE, POINTER, windll, addressof, c_wchar_p
            from ctypes.wintypes import LONG, HWND, UINT, WPARAM, LPARAM, DWORD

            SendMessageTimeout = WINFUNCTYPE(POINTER(LONG), HWND, UINT, WPARAM, LPARAM, UINT, UINT, POINTER(POINTER(DWORD))) \
                                     (("SendMessageTimeoutW", windll.user32))
            HWND_BROADCAST   = 0xFFFF
            WM_SETTINGCHANGE = 0x001A
            SMTO_ABORTIFHUNG = 0x0002
            SendMessageTimeout(HWND_BROADCAST, WM_SETTINGCHANGE, change_allusers and 1 or 0,
                               addressof(c_wchar_p(u"Environment")), SMTO_ABORTIFHUNG, 5000, None);
        except Exception, e:
            print "Warning: %r" % (e,)


    changed_assoc = associate(".pyscript", "Python.File", allusers)

    changed_env = False
    try:
        changed_env |= add_to_environment("PATHEXT", ".pyscript", allusers)
        changed_env |= add_to_environment("PATHEXT", ".pyw",      allusers)
    finally:
        CloseKey(user_env)
        CloseKey(system_env)

    if changed_assoc or changed_env:
        broadcast_settingchange(allusers)

    if changed_env:
        # whether logout is needed seems to randomly differ between installations
        # of XP, but it is not needed in Vista or later.
        try:
            import platform, re
            need_logout = not re.search(r'^[6-9]|([1-9][0-9]+)\.', platform.version())
        except Exception, e:
            e  # hush pyflakes
            need_logout = True

        if need_logout:
            print """
cuem*******************************************************************
Changes have been made to the persistent environment, but they may not
take effect in this Windows session. Running installed Python scripts
from a Command Prompt may only work after you have logged out and back
in again, or rebooted.
***********************************************************************
"""
        else:
            print """

Example 76

Project: burp-ui Source File: burp1.py
    def __init__(self, server=None, conf=None, dummy=False):
        """The :class:`burpui.misc.backend.burp1.Burp` class provides a consistent
        backend for ``burp-1`` servers.

        It implements the :class:`burpui.misc.backend.interface.BUIbackend` class
        in order to have consistent data whatever backend is used.

        :param server: ``Burp-UI`` server instance in order to access logger
                       and/or some global settings
        :type server: :class:`burpui.server.BUIServer`

        :param conf: Configuration to use
        :type conf: :class:`burpui.config.BUIConfig`

        :param dummy: Does not instanciate the object (used for development
                      purpose)
        :type dummy: boolean
        """
        if dummy:
            return
        self.client_version = None
        self.server_version = None
        self.app = None
        self.zip64 = G_ZIP64
        self.host = G_BURPHOST
        self.port = G_BURPPORT
        self.burpbin = G_BURPBIN
        self.stripbin = G_STRIPBIN
        self.burpconfcli = G_BURPCONFCLI
        self.burpconfsrv = G_BURPCONFSRV
        self.includes = G_INCLUDES
        self.revoke = G_REVOKE
        self.enforce = G_ENFORCE
        self.running = []
        self.defaults = {
            'Burp1': {
                'bport': G_BURPPORT,
                'bhost': G_BURPHOST,
                'burpbin': G_BURPBIN,
                'stripbin': G_STRIPBIN,
                'bconfcli': G_BURPCONFCLI,
                'bconfsrv': G_BURPCONFSRV,
                'tmpdir': G_TMPDIR,
            },
            'Experimental': {
                'zip64': G_ZIP64,
            },
            'Security': {
                'includes': G_INCLUDES,
                'revoke': G_REVOKE,
                'enforce': G_ENFORCE,
            },
        }
        tmpdir = G_TMPDIR
        if conf is not None:
            conf.update_defaults(self.defaults)
            conf.default_section('Burp1')
            self.port = conf.safe_get('bport', 'integer')
            self.host = conf.safe_get('bhost')
            self.burpbin = self._get_binary_path(
                conf,
                'burpbin',
                G_BURPBIN
            )
            self.stripbin = self._get_binary_path(
                conf,
                'stripbin',
                G_STRIPBIN
            )
            confcli = conf.safe_get('bconfcli')
            confsrv = conf.safe_get('bconfsrv')
            tmpdir = conf.safe_get('tmpdir')

            # Experimental options
            self.zip64 = conf.safe_get(
                'zip64',
                'boolean',
                section='Experimental'
            )

            # Security options
            self.includes = conf.safe_get(
                'includes',
                'force_list',
                section='Security'
            )
            self.enforce = conf.safe_get(
                'enforce',
                'boolean',
                section='Security'
            )
            self.revoke = conf.safe_get(
                'revoke',
                'boolean',
                section='Security'
            )

            if confcli and not os.path.isfile(confcli):
                self.logger.warning("The file '%s' does not exist", confcli)
                confcli = None

            if confsrv and not os.path.isfile(confsrv):
                self.logger.warning("The file '%s' does not exist", confsrv)
                confsrv = None

            if self.host not in ['127.0.0.1', '::1']:
                self.logger.warning("Invalid value for 'bhost'. Must be '127.0.0.1' or '::1'. Falling back to '%s'", G_BURPHOST)
                self.host = G_BURPHOST

            self.burpconfcli = confcli
            self.burpconfsrv = confsrv

        if tmpdir and os.path.exists(tmpdir) and not os.path.isdir(tmpdir):
            self.logger.warning("'%s' is not a directory", tmpdir)
            if tmpdir == G_TMPDIR:
                raise IOError("Cannot use '{}' as tmpdir".format(tmpdir))
            tmpdir = G_TMPDIR
            if os.path.exists(tmpdir) and not os.path.isdir(tmpdir):
                raise IOError("Cannot use '{}' as tmpdir".format(tmpdir))
        if tmpdir and not os.path.exists(tmpdir):
            os.makedirs(tmpdir)

        self.tmpdir = tmpdir

        self.parser = Parser(self)

        self.family = Burp._get_inet_family(self.host)
        self._test_burp_server_address(self.host)

        try:
            cmd = [self.burpbin, '-v']
            self.client_version = subprocess.check_output(cmd, universal_newlines=True).rstrip().replace('burp-', '')
        except:
            pass

        try:
            cmd = [self.burpbin, '-a', 'l']
            if self.burpconfcli:
                cmd += ['-c', self.burpconfcli]
            for line in subprocess.check_output(cmd, universal_newlines=True).split('\n'):
                result = re.search(r'^.*Server version:\s+(\d+\.\d+\.\d+)', line)
                if result:
                    self.server_version = result.group(1)
                    break
        except:
            pass

        self.logger.info('burp port: {}'.format(self.port))
        self.logger.info('burp host: {}'.format(self.host))
        self.logger.info('burp binary: {}'.format(self.burpbin))
        self.logger.info('strip binary: {}'.format(self.stripbin))
        self.logger.info('burp conf cli: {}'.format(self.burpconfcli))
        self.logger.info('burp conf srv: {}'.format(self.burpconfsrv))
        self.logger.info('tmpdir: {}'.format(self.tmpdir))
        self.logger.info('zip64: {}'.format(self.zip64))
        self.logger.info('includes: {}'.format(self.includes))
        self.logger.info('enforce: {}'.format(self.enforce))
        self.logger.info('revoke: {}'.format(self.revoke))
        try:
            # make the connection
            self.status()
        except BUIserverException:
            pass

Example 77

Project: netgrph Source File: path.py
def get_routed_path(net1, net2, popt, rtype="NGTREE"):
    """
    Find the routed path between two CIDRs and return all interfaces and
    devices between the two. This query need optimization.

    - net1 and net2 can be IPs, and it will find the CIDR
    - Uses Neo4j All Shortest Paths on ROUTED Relationships
    - Returns all distinct links along shortest paths along with distance

    """

    rtypes = ('CSV', 'TREE', 'JSON', 'YAML', 'NGTREE')

    if rtype in rtypes:

        if 'l2path' not in popt:
            popt['l2path'] = False
        if 'onepath' not in popt:
            popt['onepath'] = True
        if 'VRF' not in popt:
            popt['VRF'] = "default"
        if 'verbose' not in popt:
            popt['verbose'] = True
        if 'depth' not in popt:
            popt['depth'] = '10'
        


        if popt['verbose']:
            logger.info("Query: Finding Routed Paths (%s --> %s) for %s",
                        net1, net2, nglib.user)

        hopSet = set()

        # Translate IPs to CIDRs
        n1tree = nglib.query.net.get_net(net1, rtype="NGTREE", verbose=popt['verbose'])
        net1 = n1tree['data'][0]['Name']
        if n1tree:
            net1 = n1tree['data'][0]['Name']     

        n2tree = nglib.query.net.get_net(net2, rtype="NGTREE", verbose=popt['verbose'])
        if n2tree:
            net2 = n2tree['data'][0]['Name']

        ngtree = nglib.ngtree.get_ngtree("Path", tree_type="L3-PATH")
        ngtree['Search Depth'] = popt['depth']
        ngtree["Path"] = net1 + " -> " + net2
        ngtree['Name'] = ngtree['Path']

        # Fixup Depth (double routed paths)
        popt['depth'] = str(int(popt['depth']) * 2)

        pathList = []
        pathRec = []

        # Finds all paths, then finds the relationships
        rtrp = nglib.py2neo_ses.cypher.execute(
            'MATCH (sn:Network)-[:ROUTED_BY|ROUTED_STANDBY]-(sr), '
            + '(dn:Network)-[:ROUTED_BY|ROUTED_STANDBY]-(dr), rp = allShortestPaths '
            + '((sr)-[:ROUTED*0..' + popt['depth'] + ']-(dr)) '
            + 'WHERE ALL(v IN rels(rp) WHERE v.vrf = {vrf}) '
            + 'AND sn.cidr =~ {net1} AND dn.cidr =~ {net2}'
            + 'UNWIND nodes(rp) as r1 UNWIND nodes(rp) as r2 '
            + 'MATCH (r1)<-[l1:ROUTED]-(n:Network {vrf:{vrf}})-[l2:ROUTED]->(r2) '
            + 'OPTIONAL MATCH (n)-[:L3toL2]->(v:VLAN) '
            + 'RETURN DISTINCT r1.name AS r1name, l1.gateway AS r1ip, '
            + 'r2.name AS r2name, l2.gateway as r2ip, v.vid AS vid, '
            + 'LENGTH(shortestPath((sn)<-[:ROUTED|ROUTED_BY|ROUTED_STANDBY*0..12]->(r1))) '
            + 'AS distance ORDER BY distance',
            {"net1": net1, "net2": net2, "vrf": popt['VRF']})

        # Empty Query
        if len(rtrp) == 0:
            return ngtree

        allpaths = dict()
        # Load all paths into tuples with distance value
        for rec in rtrp:
            p = (rec["r1name"], rec["r2name"])
            allpaths[p] = rec["distance"]


        # Find tuple with shortest distance (r1, core1) vs (core1, r1)
        # Save to pathRec for second pass of records to populate tree
        for en in allpaths:
            if allpaths[en] < allpaths[tuple(reversed(en))]:
                (r1, r2) = en
                distance = allpaths[en]
                pathRec.append((r1, r2, distance))

        # Sort path records by distance, src router, dst router
        pathRec = sorted(pathRec, key=lambda tup: (tup[2], tup[0], tup[1]))

        # Build Trees and pathList from pathRecs
        for path in pathRec:
            for rec in rtrp:
                if path[0] == rec['r1name'] and path[1] == rec['r2name']:
                    #print(path[0], rec['r1ip'], '-->', path[1], rec['r2ip'])
                    rtree = nglib.ngtree.get_ngtree("Hop", tree_type="L3-HOP")
                    rtree['From Router'] = rec['r1name']
                    rtree['From IP'] = rec['r1ip']
                    rtree['To Router'] = rec['r2name']
                    rtree['To IP'] = rec['r2ip']
                    rtree['VLAN'] = rec['vid']

                    # Calculate hop distance
                    # Distance of 1 is correct, other distances should be:
                    #   ((dist - 1) / 2) + 1
                    distance = rec['distance']
                    if distance != 1:
                        distance = int((distance - 1) / 2) + 1

                    # Save distance
                    rtree['distance'] = distance

                    # Rename rtree
                    rtree['Name'] = "#{:} {:}({:}) -> {:}({:})".format( \
                    distance, rec['r1name'], rec['r1ip'], rec['r2name'], rec['r2ip'])

                    if 'VLAN' in rtree and rtree['VLAN'] != '0':
                        rtree['Name'] = rtree['Name'] + ' [vid:' + str(rtree['VLAN']) + ']'

                    # Add Switchpath if requested
                    if popt['l2path']:
                        spath = get_switched_path(rec['r1name'], rec['r2name'], popt)
                        if spath:
                            for sp in spath['data']:
                                if '_rvlans' in sp:
                                    vrgx = r'[^0-9]*' + rec['vid'] + '[^0-9]*'
                                    if re.search(vrgx, sp['_rvlans']):
                                        nglib.ngtree.add_child_ngtree(rtree, sp)

                    # Single / Multi-path
                    if not popt['onepath'] or distance not in hopSet:
                        hopSet.add(distance)
                        nglib.ngtree.add_child_ngtree(ngtree, rtree)
                    pathList.append(rtree)

        # Check Results
        if pathList:

            ngtree['Hops'] = len(pathList)
            ngtree['Distance'] = max([s['distance'] for s in pathList])
            ngtree['VRF'] = popt['VRF']

            if popt['onepath']:
                ngtree['Traversal Type'] = 'Single Path'
                ngtree['Traversal Coverage'] = path_coverage(ngtree['Distance'], ngtree['Hops'])
            else:
                ngtree['Traversal Type'] = 'All Paths'

            # CSV Prints locally for now
            if rtype == "CSV":
                nglib.query.print_dict_csv(pathList)

            # Export NG Trees
            else:
                # Export NGTree
                ngtree = nglib.query.exp_ngtree(ngtree, rtype)
                return ngtree
        elif popt['verbose']:
            print("No results found for path between {:} and {:}".format(net1, net2), \
                file=sys.stderr)

Example 78

Project: CumulusCI Source File: release_notes.py
def create_release_notes():
    global ORG_NAME
    global REPO_NAME
    global USERNAME
    global PASSWORD
    global MASTER_BRANCH
    global LAST_REL_TAG
    global CURRENT_REL_TAG
    global PREFIX_BETA
    global PREFIX_RELEASE

    gh = Github(USERNAME, PASSWORD)
    try:
        org = gh.get_organization(ORG_NAME)
    except:
        org = gh.get_user(ORG_NAME)
    repo = org.get_repo(REPO_NAME)
    
    # If LAST_REL_TAG was not provided, find the last release tag and set it as LAST_REL_TAG
    if not LAST_REL_TAG:
        if CURRENT_REL_TAG.startswith(PREFIX_RELEASE):
            current_version = LooseVersion(CURRENT_REL_TAG.replace(PREFIX_RELEASE,''))
        else:
            current_version = LooseVersion(CURRENT_REL_TAG.replace(PREFIX_BETA,''))
    
        print 'LAST_REL_TAG not specified, finding last release tag'
        versions = []
        for tag in repo.get_tags():
            if re.search('%s[0-9][0-9]*\.[0-9][0-9]*' % PREFIX_RELEASE, tag.name):
                version = LooseVersion(tag.name.replace(PREFIX_RELEASE,''))
                # Skip the CURRENT_REL_TAG and any newer releases
                if version >= current_version:
                    continue
                versions.append(version)
        versions.sort()
        versions.reverse()
        if versions:
            LAST_REL_TAG = '%s%s' % (PREFIX_RELEASE, versions[0])
        print 'Found last release tag: %s' % LAST_REL_TAG
    
    # Find the start and end date for pull requests by finding the commits from the tags
    last_rel_commit = None
    if LAST_REL_TAG:
        last_rel_ref = call_api('/git/refs/tags/%s' % LAST_REL_TAG)
        if last_rel_ref['object']['type'] == 'tag':
            last_rel_tag = call_api('/git/tags/%s' % last_rel_ref['object']['sha'])
            last_rel_commit = call_api('/git/commits/%s' % last_rel_tag['object']['sha'])
        else:
            last_rel_commit = call_api('/git/commits/%s' % last_rel_ref['object']['sha'])

    current_rel_ref = call_api('/git/refs/tags/%s' % CURRENT_REL_TAG)
    print current_rel_ref
    if current_rel_ref['object']['type'] == 'tag':
        current_rel_tag = call_api('/git/tags/%s' % current_rel_ref['object']['sha'])
        current_rel_commit = call_api('/git/commits/%s' % current_rel_tag['object']['sha'])
    else:
        current_rel_commit = call_api('/git/commits/%s' % current_rel_ref['object']['sha'])
    
    if last_rel_commit:
        since_date = datetime.datetime.strptime(last_rel_commit['committer']['date'], "%Y-%m-%dT%H:%M:%SZ")
    else:
        since_date = datetime.datetime(1999, 1, 1)

    until_date = datetime.datetime.strptime(current_rel_commit['committer']['date'], "%Y-%m-%dT%H:%M:%SZ")
    
    # Get the released package version number
    if CURRENT_REL_TAG.startswith(PREFIX_RELEASE):
        release_version = CURRENT_REL_TAG.replace(PREFIX_RELEASE,'')
    else:
        release_version = '%s)' % CURRENT_REL_TAG.replace(PREFIX_BETA,'').replace('-', ' (').replace('Beta_', 'Beta ')
    
    # Unfortunately, there is no ability to filter pull requests by date merged so we have to fetch all and loop through them
    pulls = repo.get_pulls(state='closed')
    
    pulls = []
    
    for pull in repo.get_pulls(state='closed'):
        merged = pull.merged_at
        if not merged:
            continue
        if pull.base.ref != MASTER_BRANCH:
            continue
        if merged <= until_date and merged > since_date:
            pulls.append(pull)
    
    content = {
        'warning': [],
        'info': [],
        'issues': [],
    }
    
    pulls.reverse()
    
    for pull in pulls:
        section = None
        in_info = False
        in_issues = False
        for line in pull.body.split('\n'):
            if line.startswith('# Warning'):
                section = 'warning'
                continue
            if line.startswith('# Info'):
                section = 'info'
                continue
            if line.startswith('# Issues'):
                section = 'issues'
                continue
    
            # Ignore everything at the top of the pull request body until we hit a heading we care about
            if not section:
                continue
    
            # Skip empty lines and trim extra spaces from line end
            line = line.rstrip()
            if not line.strip():
                continue
    
            # If we got here, we are in a section and want to extract the line as content
            if section == 'issues':
                # Parse out the issue number as int
                issue = re.sub(r'.*[F|f]ix.* #([0-9][0-9]*).*$', r'\1', line)
                if issue:
                    issue = int(issue)
                    if issue not in content[section]:
                        content[section].append(issue)
            else:
                content[section].append(line)
    
    # If there is no content found, exit
    if not content['warning'] and not content['info'] and not content['issues']:
        print 'No release note content found, exiting'
        return
    
    # Sort issues by issue number
    content['issues'].sort()
    
    f = codecs.open('release_notes.md', encoding='utf-8', mode='w')
    
    if content['warning']:
        f.write(u'# Critical Changes\r\n')
        for line in content['warning']:
            f.write(u'{0}\r\n'.format(line,))
        if content['info'] or content['issues']:
            f.write(u'\r\n')
    if content['info']:
        f.write(u'# Changes\r\n')
        for line in content['info']:
            f.write(u'{0}\r\n'.format(line,))
        if content['issues']:
            f.write(u'\r\n')
    if content['issues']:
        f.write(u'# Issues Closed\r\n')
        for issue in content['issues']:
            # Get the issue title to include
            gh_issue = call_api('/issues/%s' % issue)
            f.write(u'#{0}: {1}\r\n'.format(issue, gh_issue['title']))
    
            # Ensure all issues have a comment on which release they were fixed
            gh_issue_comments = call_api('/issues/%s/comments' % issue)
            has_comment = False
            for comment in gh_issue_comments:
                if CURRENT_REL_TAG.startswith(PREFIX_RELEASE):
                    if comment['body'].find('Included in production release') != -1:
                        has_comment = True
                else:
                    if comment['body'].find('Included in beta release') != -1:
                        has_comment = True
            if not has_comment:
                if CURRENT_REL_TAG.startswith(PREFIX_RELEASE):
                    data = {'body': 'Included in production release version %s' % release_version}
                    print "Adding comment on issue #%s with the production release version" % issue
                else:
                    data = {'body': 'Included in beta release version %s and higher' % release_version}
                    print "Adding comment on issue #%s with the beta release version" % issue
                call_api('/issues/%s/comments' % issue, data=data)
    
    f.close()
    
    f = codecs.open('release_notes.md', encoding='utf-8', mode='r')
    release_notes = f.read()
    f.close()
    
    print '----- RELEASE NOTES -----'
    print release_notes.encode('utf-8')
    print '----- END RELEASE NOTES -----'
    
    # Add the release notes to the body of the release
    if not PRINT_ONLY:
        releases = call_api('/releases')
        for release in releases:
            if release['tag_name'] == CURRENT_REL_TAG:
                print 'Adding release notes to body of %s' % release['html_url']
        
                data = {
                    "tag_name": release['tag_name'],
                    "target_commitish": release['target_commitish'],
                    "name": release['name'],
                    "body": release['body'], 
                    "draft": release['draft'],
                    "prerelease": release['prerelease'],
                }
        
                if data['body']:
                    new_body = {
                        'pre': [],
                        'post': [],
                    }
                    found_release_notes = False
                    in_release_notes = False
                    
                    for line in data['body'].split('\n'):
                        if line.startswith(('# Critical Changes', '# Changes', '# Issues Closed')):
                            found_release_notes = True
                            in_release_notes = True
                       
                        # Skip empty lines 
                        elif not line.strip():
                            in_release_notes = False
                            continue

                        if not in_release_notes:
                            if found_release_notes:
                                new_body['post'].append(line)
                            else:
                                new_body['pre'].append(line)
                            
                    data['body'] = u'{0}\r\n{1}\r\n{2}'.format(
                        '\r\n'.join(new_body['pre']), 
                        release_notes, 
                        '\r\n'.join(new_body['post']),
                    )
                else:
                    data['body'] = release_notes
        
                call_api('/releases/%s' % release['id'], data=data)
                break

Example 79

Project: MariMe Source File: mariMe.py
def sendMultipler(*args):
	##GET PARAMETERS
	duped = {}
	meshID = {}
	matnodeID = ""
	meshy = {}
	appVersion = cmds.about(v=True)
	if appVersion == "Preview Release 38":
		appVersion = "2015"
	appVersion = re.search("(\d\d\d\d)", appVersion)
	appVersion = int(appVersion.group(0))
	##fullfilepath = []
	obj_namelocal = []
	textureNameUnfiltered = ""
	bitDepth = ""
	remoteHostIP = cmds.textField('mariTextFieldHost',q=True,text=1)
	channelText = cmds.textField('mariChannelText',q=True,text=1)
	fileExtension = ".obj"
	alembicStat = cmds.checkBox("mariMeBoxCheckbox12",q=1,v=1)
	updatestat = cmds.checkBox("mariMeBoxCheckbox13",q=1,v=1)
	startFrame = cmds.playbackOptions(q=1,minTime=1)
	endFrame = cmds.playbackOptions(q=1,maxTime=1)
	if alembicStat:
		fileExtension = ".abc"
		if len(cmds.ls(sl=1)) > 1:
			cmds.error("Alembic export only supports single meshes for animation.")
	#UDIMstat = cmds.checkBox('mariMeBoxCheckbox10',q=1,v=1)
	UDIMstat = 1
	##mySelection = cmds.ls( sl=True )

	smoothStat = cmds.checkBox("mariMeBoxCheckbox1",q=1,v=1)
	linuxsend = cmds.checkBox("mariMeBoxCheckbox5",q=1,v=1)
	displaceMe = cmds.checkBox("mariMeBoxCheckbox3",q=1,v=1)
	smoothIterations = cmds.intSliderGrp("mariMeSlider1",q=1,v=1)
	textureRes = cmds.intSliderGrp("mariMeSlider2",q=1,v=1)
	smoothUVs = cmds.checkBox("mariMeBoxCheckbox3",q=1,v=1)
	bitDepthCB = cmds.checkBox("mariMeBoxCheckbox6",q=1,v=1)
	bitDepthCBTwo = cmds.checkBox("mariMeBoxCheckbox7",q=1,v=1)

	if (bitDepthCB):
		bitDepth = "8"
	elif (bitDepthCBTwo):
		bitDepth = "16"
	else:
		bitDepth = "32"
	texturePath = ""
	scalarVal = cmds.checkBox("mariMeBoxCheckbox9",q=1,v=1)
	scalarString = "False"
	if (scalarVal):
		scalarString = "True"
	sceneName = cmds.textField('mariTextField',q=True,text=1)
	sendDiff = cmds.checkBox("mariMeBoxCheckbox4",q=1,v=1)
	useExistingRes = cmds.checkBox("mariMeBoxCheckbox11",q=1,v=1)
	projPath = cmds.workspace(q=True, rd=True)
	cmds.sysFile((projPath + 'MariMe'), makeDir=True)
	
	dummyGeoPath = (projPath + 'MariMe/' + "dummyGeo")
	
	##IF SEND TEXTURES IS ON, GET CHANNEL NAME FROM DROPDOWN LIST
	if (cmds.checkBox("mariMeBoxCheckbox4", q=1, value=1 )):
		textureNameUnfiltered = cmds.optionMenuGrp("textureSelect", q=1, v=1 )
		channelNameSplit = textureNameUnfiltered.split()
		channelText = channelNameSplit[0]
	
	if (smoothStat):
		duped = cmds.duplicate(rr=1)
		pm.polySmooth( kb=1, suv=1, khe=0, ksb=1, c=1, sl=4, dpe=1, ch=0)
		parentobj = cmds.ls(sl=1, o=1)
		cmds.select(parentobj, r=1)

	if (displaceMe):
		intermed = cmds.duplicate(rr=1)
		pm.polySmooth( kb=1, suv=1, khe=0, ksb=1, c=1, sl=4, dpe=1, ch=0) 
		meshy = cmds.displacementToPoly()
		cmds.delete(intermed)

	##GET THE TRANSFORM NODE IF MESH NODE IS SELECTED
	mySelection = cmds.ls( sl=True, transforms=1 )

	#IF NEW SCENE, BUILD A SINGLE QUAD TO DELETE AT END OF LOOP (WORKAROUND FOR WINDOWS NOT GETTING TAGS FOR FIRST OBJECT)	
	if not updatestat:
		mariMeDummyGeo = cmds.polyPlane(w=1111, h=1111, sx=1, sy=1)
		dummyMat = cmds.shadingNode("surfaceShader", asShader=1, n='dummyMaterial')
		cmds.select(mariMeDummyGeo[0], r=1)
		cmds.hyperShade(assign=dummyMat)
		cmds.select(mySelection, add=1)
		#FINAL MYSELECTION INCLUDES THE DUMMY GEO AS FIRST OBJECT	
		mySelection = cmds.ls( sl=True, transforms=1 )
	
	###CHECK FOR MESH ID BEFORE APPLYING IT
	for n in range(0, int(len(mySelection))):		
		if not checkForMeshID(mySelection[n]):
			meshID[n] = applyUniqueIDToMesh(mySelection[n])
		else:
			meshID[n] = checkForMeshID(mySelection[n])	
	
	if not updatestat:
		for n in range(0, 1):##EXPORT DUMMY GEO IF NOT UPDATING SCENE
			if alembicStat:
				cmds.select( mySelection[n], r=1 )
				##AbcExport -j "-frameRange 1 15 -noNormals -uvWrite -root test -file /Users/beige/Desktop/test3.abc";
				mel.eval('AbcExport -j "-frameRange '+str(startFrame)+' '+str(endFrame)+' -uvWrite -root '+mySelection[n]+' -file '+(dummyGeoPath + ".abc")+'";')
				##fullfilepath.append(projPath + "MariMe/" + meshID[n]+ ".abc")
			else:
				cmds.select( mySelection[n], r=1 )
				pm.exportSelected((dummyGeoPath + ".obj"), f=1, pr = 1, typ = "OBJexport", es = 1, op="groups=1;ptgroups=1;materials=0;smoothing=1;normals=1")

		for n in range(1, int(len(mySelection))):##FOR ALL OBJECTS AFTER DUMMY GEO
			if alembicStat:
				cmds.select( mySelection[n], r=1 )
				##AbcExport -j "-frameRange 1 15 -noNormals -uvWrite -root test -file /Users/beige/Desktop/test3.abc";
				mel.eval('AbcExport -j "-frameRange '+str(startFrame)+' '+str(endFrame)+' -uvWrite -root '+mySelection[n]+' -file '+(projPath + "MariMe/" + meshID[n]+ ".abc")+'";')
				##fullfilepath.append(projPath + "MariMe/" + meshID[n]+ ".abc")
			else:
				cmds.select( mySelection[n], r=1 )
				pm.exportSelected((projPath + "MariMe/" + meshID[n] + ".obj"), f=1, pr = 1, typ = "OBJexport", es = 1, op="groups=1;ptgroups=1;materials=0;smoothing=1;normals=1")
			##fullfilepath.append(projPath + "MariMe/" + meshID[n]+ ".obj")
	else:
		for n in range(0, int(len(mySelection))):##NO NEED FOR DUMMY GEO
			if alembicStat:
				cmds.select( mySelection[n], r=1 )
				##AbcExport -j "-frameRange 1 15 -noNormals -uvWrite -root test -file /Users/beige/Desktop/test3.abc";
				mel.eval('AbcExport -j "-frameRange '+str(startFrame)+' '+str(endFrame)+' -uvWrite -root '+mySelection[n]+' -file '+(projPath + "MariMe/" + meshID[n]+ ".abc")+'";')
				##fullfilepath.append(projPath + "MariMe/" + meshID[n]+ ".abc")
			else:
				cmds.select( mySelection[n], r=1 )
				pm.exportSelected((projPath + "MariMe/" + meshID[n] + ".obj"), f=1, pr = 1, typ = "OBJexport", es = 1, op="groups=1;ptgroups=1;materials=0;smoothing=1;normals=1")
			##fullfilepath.append(projPath + "MariMe/" + meshID[n]+ ".obj")

	cmds.select(mySelection, add=1)
	
	##SOCKET STUFF
	skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)	
	if (linuxsend):
		skt.connect((remoteHostIP, 6100))
	else:
		skt.connect(('localhost', 6100))

	##GET MATERIAL ON SELECTED AND TAG MAT ID
	for n in range(0, int(len(mySelection))):
		mySelectionShapes = cmds.listRelatives(mySelection[n], s=1)
		try:
			textureNameUnfiltered = cmds.optionMenuGrp("textureSelect", q=1, v=1 )
			channelNameSplit = textureNameUnfiltered.split()
			shadingGroup = cmds.listSets(type=1,o=mySelectionShapes[0])
			material = cmds.listConnections(shadingGroup[0] + ".surfaceShader")
		except:
			shadingGroup = cmds.listSets(type=1,o=mySelectionShapes[0])
			material = cmds.listConnections(shadingGroup[0] + ".surfaceShader")
		
		###CHECK FOR MAT ID BEFORE APPLYING IT
		if not checkFormatnodeID(material[0]):
			matnodeID = applyUniqueIDToMat(material[0])
	
	##APPEND MESHES TO OBJ LIST FOR NEW SCENE GEO
	for n in range(0, int(len(mySelection))):
		#skt.send('obj_name.append("' + projPath + 'MariMe/' + meshID[n]+fileExtension+'")\x04')
		obj_namelocal.append('"'+projPath + 'MariMe/' + meshID[n]+fileExtension+'"')

	commandqueue = []
	
	# for n in range(0, 5):
	#     commandqueue.append('mari.geo.load ("/Users/beige/Desktop/temps/objs/voronoi_rock_'+str(n)+'.obj")')
	# commandqstring = '\n'.join(commandqueue)
	# skt.send(commandqstring+"\x04")

	##SEND WITHOUT TEXTURES	
	if (sendDiff == 0):
		if not updatestat:
			commandqueue.append("createNewSceneWithDummyObject('" + sceneName + "', '"+dummyGeoPath+fileExtension+"')")

			#SUBLOOP FOR PROPER MAT ID TAGGING OF SECONDARY GEOMETRY
			for n in range(1, int(len(mySelection))):##FOR ALL SECONDARY EXPORTED MESHES 
				mySelectionShapes = cmds.listRelatives(mySelection[n], s=1)##LOCAL VAR
				inLoopShadingGroup = cmds.listSets(type=1,o=mySelectionShapes[0])##LOCAL VAR
				inLoopMaterial = cmds.listConnections(inLoopShadingGroup[0] + ".surfaceShader")##LOCAL VAR
				inLoopMatnodeID = checkFormatnodeID(inLoopMaterial[0])
				commandqueue.append('importNewObjectAndMat('+obj_namelocal[n]+', "'+channelText+'", "null", '+str(textureRes)+', '+str(bitDepth)+')')
				commandqueue.append('matnodeTagger("'+inLoopMatnodeID+'")')
			
			
		elif updatestat:
			for n in range(0, int(len(mySelection))):##FOR ALL EXPORTED MESHES - DON'T NEED DUMMY GEO
				mySelectionShapes = cmds.listRelatives(mySelection[n], s=1)##LOCAL VAR
				inLoopShadingGroup = cmds.listSets(type=1,o=mySelectionShapes[0])##LOCAL VAR
				inLoopMaterial = cmds.listConnections(inLoopShadingGroup[0] + ".surfaceShader")##LOCAL VAR
				inLoopMatnodeID = checkFormatnodeID(inLoopMaterial[0])
				commandqueue.append('importNewObjectAndMat('+obj_namelocal[n]+', "'+channelText+'", "null", '+str(textureRes)+', '+str(bitDepth)+')')
				commandqueue.append('matnodeTagger("'+inLoopMatnodeID+'")')
				
		commandqueue.append('IDTagger()')
		commandqueue.append('projPathTagger("'+projPath+'")')		

	elif (sendDiff == 1):
		##IF CHANNEL LIST PICK ISN'T "ALL (AUTO-DETECT), SEND WITH TEXTURES USING SINGLE TEXTURE PROC:
		if textureNameUnfiltered != 'all (auto-detect)':
			##SEND THE FINAL COMMANDS TO CREATE SCENE AND LOAD MESHES AND TEXTURES
			if not updatestat:##FOR TEXTURES
				for n in range(0, 1):########SKIP FIRST MESH SINCE IT'S DUMMY GEO:
					commandqueue.append("createNewSceneWithDummyObject('" + sceneName + "', '"+dummyGeoPath+fileExtension+"')")
				# if (cmds.about(win=1)):
				# 	time.sleep(6)
				##THE FOLLOW-UP LOOP FOR OPEN SCENE:
				for n in range(1, int(len(mySelection))):###THIS USES SECOND OBJECT AND ON FOR LOOP
					#GET UNIQUE TEXTUREPATH FOR EACH MESH
					mySelectionShapes = cmds.listRelatives(mySelection[n], s=1)
					textureNameUnfiltered = cmds.optionMenuGrp("textureSelect", q=1, v=1 )
					channelNameSplit = textureNameUnfiltered.split()
					shadingGroup = cmds.listSets(type=1,o=mySelectionShapes[0])
					material = cmds.listConnections(shadingGroup[0] + ".surfaceShader")
					fileNode = cmds.listConnections(material[0] + "." + channelNameSplit[0])
					texturePath = cmds.getAttr(fileNode[0] + ".ftn")
					UDIMFileName = ""
					matchcase = ""
					if (UDIMstat):
						try:
							splitname = texturePath.split('.')
							matchcase = re.match("(\d\d\d\d)", splitname[-2])##find the match in whatever is before the file extension
						except:
							pass
						if matchcase is not None:
							splitname[-2] = '$UDIM'
							texturePath = '.'.join( splitname )##REPLACE texturePath with UDIM'ed filename
					if useExistingRes:
						try:
							allFileNodesOnChannel = findFileNodesConnectedToFileNode(fileNode[0])
							for file in allFileNodesOnChannel:
								textureResForNode = cmds.getAttr(file + '.outSizeX')
								if textureResForNode > textureRes:
									textureRes = textureResForNode ##FIND THE MAXIMUM RESOLUTION OF THE CONNECTED UDIMS AND USE THAT INSTEAD OF THE FIRST DETECTED
						except:
							pass
					commandqueue.append('importNewObjectAndMat('+obj_namelocal[n]+', "'+channelText+'", "'+texturePath+'", '+str(textureRes)+', '+str(bitDepth)+')')
					matnodeID = checkFormatnodeID(material[0])	
					commandqueue.append('matnodeTagger("'+matnodeID+'")')
					#END OLD LOOP


			##UPDATE SCENE - LOOP FOR TEXTURES 
			elif updatestat:
				for n in range(0, int(len(mySelection))):
					#GET UNIQUE TEXTUREPATH FOR EACH MESH
					mySelectionShapes = cmds.listRelatives(mySelection[n], s=1)
					textureNameUnfiltered = cmds.optionMenuGrp("textureSelect", q=1, v=1 )
					channelNameSplit = textureNameUnfiltered.split()
					shadingGroup = cmds.listSets(type=1,o=mySelectionShapes[0])
					material = cmds.listConnections(shadingGroup[0] + ".surfaceShader")
					fileNode = cmds.listConnections(material[0] + "." + channelNameSplit[0])
					texturePath = cmds.getAttr(fileNode[0] + ".ftn")
					UDIMFileName = ""
					matchcase = ""
					if (UDIMstat):
						try:
							splitname = texturePath.split('.')
							matchcase = re.match("(\d\d\d\d)", splitname[-2])##find the match in whatever is before the file extension
						except:
							pass
						if matchcase is not None:
							splitname[-2] = '$UDIM'
							texturePath = '.'.join( splitname )##REPLACE texturePath with UDIM'ed filename
					if useExistingRes:
						try:
							allFileNodesOnChannel = findFileNodesConnectedToFileNode(fileNode[0])
							for file in allFileNodesOnChannel:
								textureResForNode = cmds.getAttr(file + '.outSizeX')
								if textureResForNode > textureRes:
									textureRes = textureResForNode ##FIND THE MAXIMUM RESOLUTION OF THE CONNECTED UDIMS AND USE THAT INSTEAD OF THE FIRST DETECTED
						except:
							pass
					commandqueue.append('importNewObjectAndMat('+obj_namelocal[n]+', "'+channelText+'", "'+texturePath+'", '+str(textureRes)+', '+str(bitDepth)+')')
					matnodeID = checkFormatnodeID(material[0])	
					commandqueue.append('matnodeTagger("'+matnodeID+'")')
					#END OLD LOOP
			commandqueue.append('IDTagger()')
			commandqueue.append('projPathTagger("'+projPath+'")')
		##end - IF NAME ISN'T "ALL (AUTO-DETECT)"
		
		##LOOP FOR AUTO-DETECT CHANNELS:(UPDATE SCENE)
		if textureNameUnfiltered == 'all (auto-detect)':
			for n in range(0, int(len(mySelection))):
				##get unique texturepath for each mesh
				mySelectionShapes = cmds.listRelatives(mySelection[n], s=1)

				#SHADER PORTION FOR EACH MESH:
				shadingGroup = cmds.listSets(type=1, o=str(mySelectionShapes[0]))
				connectedShader = cmds.listConnections(shadingGroup[0] + ".surfaceShader")
				fileNodeList = findFileNodes(mySelectionShapes[0])
				##textureNodeItems = ""
				filesConnectedToShader = cmds.listConnections(connectedShader, c=1, type='file')
				##print filesConnectedToShader ##[u'VRayMtl1.color', u'pSphere12014140307125015_color_1011', u'VRayMtl1.bumpMap', u'pSphere12014140307125015_bumpMap_1011']
				channelsJustNames = []
				listChannels = []
				listChannels = filesConnectedToShader[::2]
				fileNodeForChannel = filesConnectedToShader[::-2]
				fileNodeForChannel.reverse()

				for n in range(0, int(len(listChannels)), 1): 	  
					t = (listChannels[n].split('.'))
					channelsJustNames.append(t[1])
				
				##LOAD UNIQUE MESHES INTO UNIQUE VARIABLE:
				commandqueue.append('geot'+str(n)+' = mari.geo.load('+obj_namelocal[n]+')')
				##skt.send('print geot'+str(n)+'\x04')
				##set to current (use zero list index because it's a tuple with mesh ID in [1])
				commandqueue.append('mari.geo.setCurrent(geot'+str(n)+'[0])')
				##GET RID OF THE DEFAULT "DIFFUSE" CHANNEL THAT'S CREATED FOR NEW GEO:
				commandqueue.append('delAction = mari.actions.find("/Mari/Channels/Remove Channel")')
				commandqueue.append('delAction.trigger()')

				for n in range(0, int(len(channelsJustNames))): 
					filepath = cmds.getAttr(fileNodeForChannel[n] + ".ftn")
					if useExistingRes:
						try:
							allFileNodesOnChannel = findFileNodesConnectedToFileNode(fileNode[0])
							for file in allFileNodesOnChannel:
								textureResForNode = cmds.getAttr(file + '.outSizeX')
								if textureResForNode > textureRes:
									textureRes = textureResForNode ##FIND THE MAXIMUM RESOLUTION OF THE CONNECTED UDIMS AND USE THAT INSTEAD OF THE FIRST DETECTED
						except:
							pass
					if (UDIMstat):
						splitname = filepath.split('.')
						splitname[-2] = '$UDIM'
						filepath = '.'.join( splitname )##REPLACE texturePath with UDIM'ed filename
					commandqueue.append('addChannelToCurrent("'+channelsJustNames[n]+'", "'+filepath+'", '+str(textureRes)+', '+str(bitDepth)+')')
		##END LOOP FOR AUTO-DETECT CHANNELS

	##CLEAN UP SMOOTH MESH AND DISPLACE MESH TEMPS
	if (smoothStat):
		cmds.delete(duped)
	if (displaceMe):
		cmds.delete(meshy)
	if not updatestat:
		commandqueue.append('dummyDeleter()')
	##CLEAN UP DUMMY STUFF AT ALL COSTS
	try:
		cmds.delete(mariMeDummyGeo)
		cmds.delete('dummyMaterial*')
	except:
		pass
			
	##JOIN ALL COMMANDS INTO MULTILINE QUEUE :
	commandqstring = '\n'.join(commandqueue)
	print commandqstring
	##SEND MULTILINE COMMAND QUEUE AND EVAL AFTER ALL COMMANDS TO THE MARI SIDE (this is where the magic happens)
	skt.send(commandqstring+"\x04")

Example 80

Project: virtmgr Source File: views.py
def pool(request, host_id, pool):

	if not request.user.is_authenticated():
		return HttpResponseRedirect('/')

	kvm_host = Host.objects.get(user=request.user.id, id=host_id)

	def add_error(msg, type_err):
		error_msg = Log(host_id=host_id, 
			            type=type_err, 
			            message=msg, 
			            user_id=request.user.id
			            )
		error_msg.save()

	def get_vms():
		try:
			vname = {}
			for id in conn.listDomainsID():
				id = int(id)
				dom = conn.lookupByID(id)
				vname[dom.name()] = dom.info()[0]
			for id in conn.listDefinedDomains():
				dom = conn.lookupByName(id)
				vname[dom.name()] = dom.info()[0]
			return vname
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_networks():
		try:
			networks = {}
			for name in conn.listNetworks():
				net = conn.networkLookupByName(name)
				status = net.isActive()
				networks[name] = status
			for name in conn.listDefinedNetworks():
				net = conn.networkLookupByName(name)
				status = net.isActive()
				networks[name] = status
			return networks
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def vm_conn():
		try:
			flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
			auth = [flags, creds, None]
			uri = 'qemu+tcp://' + kvm_host.ipaddr + '/system'
			conn = libvirt.openAuth(uri, auth, 0)
			return conn
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	if not kvm_host.login or not kvm_host.passwd:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = request.session['login_kvm']
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = request.session['passwd_kvm']
				else:
					return -1
			return 0
	else:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = kvm_host.login
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = kvm_host.passwd
				else:
					return -1
			return 0
			
	def get_conn_pool(pool):
		try:
			net = conn.networkLookupByName(pool)
			return net
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def pool_start():
		try:
			net.create()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def pool_stop():
		try:
			net.destroy()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def pool_delete():
		try:
			net.undefine()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def net_set_autostart(pool):
		try:
			net = conn.networkLookupByName(pool)
			net.setAutostart(1)
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_net_info(get):
		try:
			if get == "bridge":
				return net.bridgeName()
			elif get == "status":
				return net.isActive()
			elif get == "start":
				return net.autostart()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_ipv4_net():
		try:
			net = conn.networkLookupByName(pool)
			xml = net.XMLDesc(0)
			addrStr = util.get_xml_path(xml, "/network/ip/@address")
			netmaskStr = util.get_xml_path(xml, "/network/ip/@netmask")

			netmask = IP(netmaskStr)
			gateway = IP(addrStr)

			network = IP(gateway.int() & netmask.int())
			return IP(str(network) + "/" + netmaskStr)
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_ipv4_dhcp_range():
		try:
			net = conn.networkLookupByName(pool)
			xml = net.XMLDesc(0)
			dhcpstart = util.get_xml_path(xml, "/network/ip/dhcp/range[1]/@start")
			dhcpend = util.get_xml_path(xml, "/network/ip/dhcp/range[1]/@end")
			if not dhcpstart or not dhcpend:
				return None
			
			return [IP(dhcpstart), IP(dhcpend)]
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_ipv4_forward():
		try:
			xml = net.XMLDesc(0)
			fw = util.get_xml_path(xml, "/network/forward/@mode")
			forwardDev = util.get_xml_path(xml, "/network/forward/@dev")
			return [fw, forwardDev]
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def create_net_pool(name_pool, forward, ipaddr, netmask, dhcp, s_dhcp, e_dhcp):
		try:
			xml = """
				<network>
					<name>%s</name>""" % (name_pool)

			if forward == "nat" or "route":
				xml += """<forward mode='%s'/>""" % (forward)

			xml += """<bridge stp='on' delay='0' />
						<ip address='%s' netmask='%s'>""" % (gw_ipaddr, netmask)

			if dhcp == "yes":
				xml += """<dhcp>
							<range start='%s' end='%s' />
						</dhcp>""" % (s_dhcp, e_dhcp)
					
			xml += """</ip>
				</network>"""
			conn.networkDefineXML(xml)
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	conn = vm_conn()

	if conn == None:
		return HttpResponseRedirect('/overview/%s/' % (host_id))

	pools = get_networks()
	all_vm = get_vms()
	errors = []

	if pool != 'new_net_pool':
		net = get_conn_pool(pool)
		bridge = get_net_info('bridge')
		status = get_net_info('status')
		if status == 1:
			start = get_net_info('start')
			network = get_ipv4_net()
			dhcprange = get_ipv4_dhcp_range()
			netmode = get_ipv4_forward()

	if request.method == 'POST':
		if request.POST.get('new_net_pool',''):
			name_pool = request.POST.get('name_pool','')
			net_addr = request.POST.get('net_addr','')
			forward = request.POST.get('forward','')
			dhcp = request.POST.get('dhcp','')
			simbol = re.search('[^a-zA-Z0-9\_]+', name_pool)
			if len(name_pool) > 20:
				msg = _('The name of the network pool must not exceed 20 characters')
				errors.append(msg)
			if simbol:
				msg = _('The name of the network pool must not contain any characters and Russian characters')
				errors.append(msg)
			if not name_pool:
				msg = _('Enter the name of the pool')
				errors.append(msg)
			if not net_addr:
				msg = _('Enter the IP subnet')
				errors.append(msg)
			try:
				netmask = IP(net_addr).strNetmask()
				ipaddr = IP(net_addr)
				gw_ipaddr = ipaddr[1].strNormal()
				start_dhcp = ipaddr[2].strNormal()
				end_dhcp = ipaddr[254].strNormal()
			except:
				msg = _('IP subnet must be 192.168.1.0/24 or 192.168.1.0/26')
				errors.append(msg)
			if errors:
				return render_to_response('network.html', locals())
			if not errors:
				if create_net_pool(name_pool, forward, gw_ipaddr, netmask, dhcp, start_dhcp, end_dhcp) is "error":
					msg = _('Such a pool already exists')
					errors.append(msg)
				if not errors:
					net_set_autostart(name_pool)
					net = get_conn_pool(name_pool)
					if pool_start() is "error":
						msg = _('Pool is created, but when I run the pool fails, you may specify an existing network')
						errors.append(msg)
					else:
						msg = _('Creating a network pool: ') 
						msg = msg + name_pool
						add_error(msg, 'user')
						return HttpResponseRedirect('/network/%s/%s/' % (host_id, name_pool))
					if errors:
						return render_to_response('network.html', locals())
		if request.POST.get('stop_pool',''):
			msg = _('Stop network pool: ')
			msg = msg + pool
			pool_stop()
			add_error(msg, 'user')
		if request.POST.get('start_pool',''):
			msg = _('Start network pool: ')
			msg = msg + pool
			pool_start()
			add_error(msg, 'user')
		if request.POST.get('del_pool',''):
			msg = _('Delete network pool: ')
			msg = msg + pool
			pool_delete()
			add_error(msg, 'user')
			return HttpResponseRedirect('/network/%s/' % (host_id))
		return HttpResponseRedirect('/network/%s/%s/' % (host_id, pool))

	conn.close()

	return render_to_response('network.html', locals())

Example 81

Project: oz Source File: Mageia.py
    def _modify_iso(self):
        """
        Method to make the boot ISO auto-boot with appropriate parameters.
        """
        self.log.debug("Modifying ISO")

        self.log.debug("Copying cfg file to floppy image")

        outname = os.path.join(self.iso_contents, "auto_inst.cfg")

        if self.default_auto_file():

            def _cfg_sub(line):
                """
                Method that is called back from oz.ozutil.copy_modify_file() to
                modify preseed files as appropriate for Mageia.
                """
                if re.search("'password' =>", line):
                    return "			'password' => '" + self.rootpw + "',\n"
                else:
                    return line

            oz.ozutil.copy_modify_file(self.auto, outname, _cfg_sub)
        else:
            shutil.copy(self.auto, outname)

        oz.ozutil.subprocess_check_output(["/sbin/mkfs.msdos", "-C",
                                           self.output_floppy, "1440"])
        oz.ozutil.subprocess_check_output(["mcopy", "-n", "-o", "-i",
                                           self.output_floppy, outname,
                                           "::AUTO_INST.CFG"])

        self.log.debug("Modifying isolinux.cfg")
        if self.tdl.update == "2":
            '''
            Mageia 2 dual   - isolinux/32.cfg
                              isolinux/64.cfg
                              isolinux/alt0/32/vmlinuz
                              isolinux/alt0/32/all.rdz
                              isolinux/alt0/64/vmlinuz
                              isolinux/alt0/64/all.rdz
            Mageia 2 x86_64 - x86_64/isolinux/isolinux.cfg
                              x86_64/isolinux/alt0/vmlinuz
                              x86_64/isolinux/alt0/all.rdz
            Mageia 2 i586   - i586/isolinux/isolinux.cfg
                              i586/isolinux/vmlinuz
                              i586/isolinux/all.rdz
            '''
            if os.path.exists(os.path.join(self.iso_contents, 'isolinux')):
                if self.tdl.arch == "i386":
                    mageia_arch = "32"
                else:
                    mageia_arch = "64"

                # This looks like a dual CD, so let's set things up that way.
                isolinuxcfg = os.path.join(self.iso_contents, 'isolinux', mageia_arch + ".cfg")
                self.isolinuxbin = os.path.join('isolinux', mageia_arch + ".bin")
                kernel = "alt0/" + mageia_arch + "/vmlinuz"
                initrd = "alt0/" + mageia_arch + "/all.rdz"
            else:
                # This looks like an i586 or x86_64 ISO, so set things up that way.
                mageia_arch = self.tdl.arch
                if self.tdl.arch == "i386":
                    mageia_arch = "i586"
                isolinuxcfg = os.path.join(self.iso_contents, mageia_arch, 'isolinux', 'isolinux.cfg')
                self.isolinuxbin = os.path.join(mageia_arch, 'isolinux', 'isolinux.bin')
                kernel = "alt0/vmlinuz"
                initrd = "alt0/all.rdz"
            flags = "ramdisk_size=128000 root=/dev/ram3 acpi=ht vga=788 automatic=method:cdrom"
        elif self.tdl.update == "3":
            '''
            Mageia 3 dual   - syslinux/32.cfg
                              syslinux/64.cfg
                              syslinux/alt0/32/vmlinuz
                              syslinux/alt0/32/all.rdz
                              syslinux/alt0/64/vmlinuz
                              syslinux/alt0/64/all.rdz
            Mageia 3 x86_64 - x86_64/isolinux/isolinux.cfg
                              x86_64/isolinux/alt0/vmlinuz
                              x86_64/isolinux/alt0/all.rdz
            Mageia 3 i586   - i586/isolinux/isolinux.cfg
                              i586/isolinux/alt0/vmlinuz
                              i586/isolinux/alt0/all.rdz
            '''
            if os.path.exists(os.path.join(self.iso_contents, 'syslinux')):
                if self.tdl.arch == "i386":
                    mageia_arch = "32"
                else:
                    mageia_arch = "64"
                isolinuxcfg = os.path.join(self.iso_contents, 'syslinux', mageia_arch + ".cfg")
                self.isolinuxbin = os.path.join('syslinux', mageia_arch + ".bin")
                kernel = "alt0/" + mageia_arch + "/vmlinuz"
                initrd = "alt0/" + mageia_arch + "/all.rdz"
            else:
                mageia_arch = self.tdl.arch
                if self.tdl.arch == "i386":
                    mageia_arch = "i586"
                isolinuxcfg = os.path.join(self.iso_contents, mageia_arch, 'isolinux', 'isolinux.cfg')
                self.isolinuxbin = os.path.join(mageia_arch, 'isolinux', 'isolinux.bin')
                kernel = "alt0/vmlinuz"
                initrd = "alt0/all.rdz"
            flags = "ramdisk_size=128000 root=/dev/ram3 acpi=ht vga=788 automatic=method:cdrom"
        elif self.tdl.update in ["4", "4.1", "5"]:
            '''
            Mageia 4 dual     - isolinux/i586.cfg
                                isolinux/x86_64.cfg
                                isolinux/i586/vmlinuz
                                isolinux/i586/all.rdz
                                isolinux/x86_64/vmlinuz
                                isolinux/x86_64/all.rdz
            Mageia 4 x86_64   - isolinux/isolinux.cfg
                                isolinux/x86_64/vmlinuz
                                isolinux/x86_64/all.rdz
            Mageia 4 i586     - isolinux/isolinux.cfg
                                isolinux/i586/vmlinuz
                                isolinuz/i586/all.rdz
            Mageia 4.1 dual   - isolinux/i586.cfg
                                isolinux/x86_64.cfg
                                isolinux/i586/vmlinuz
                                isolinux/i586/all.rdz
                                isolinux/x86_64/vmlinuz
                                isolinux/x86_64/all.rdz
            Mageia 4.1 x86_64 - isolinux/isolinux.cfg
                                isolinux/x86_64/vmlinuz
                                isolinux/x86_64/all.rdz
            Mageia 4.1 i586 -   isolinux/isolinux.cfg
                                isolinux/i586/vmlinuz
                                isolinux/i586/all.rdz
            Mageia 5 dual     - isolinux/i586.cfg
                                isolinux/x86_64.cfg
                                isolinux/i586/vmlinuz
                                isolinux/i586/all.rdz
                                isolinux/x86_64/vmlinuz
                                isolinux/x86_64/all.rdz
            Mageia 5 x86_64   - isolinux/isolinux.cfg
                                isolinux/x86_64/vmlinuz
                                isolinux/x86_64/all.rdz
            Mageia 5 i586 -     isolinux/isolinux.cfg
                                isolinux/i586/vmlinuz
                                isolinux/i586/all.rdz
            '''
            # Starting with Mageia 4, things are a lot more regular.  The
            # directory always starts with isolinux.  If it is a dual ISO, then
            # there is an i586.cfg and x86_64.cfg describing how to boot each
            # of them.  Otherwise, there is just an isolinux.cfg.  The kernel
            # and initrd are always in the same place.
            mageia_arch = self.tdl.arch
            if self.tdl.arch == "i386":
                mageia_arch = "i586"
            if os.path.exists(os.path.join(self.iso_contents, 'isolinux', 'i586.cfg')):
                # A dual, so use the correct cfg
                isolinuxcfg = os.path.join(self.iso_contents, 'isolinux', mageia_arch + ".cfg")
                self.isolinuxbin = os.path.join('isolinux', mageia_arch + ".bin")
            else:
                isolinuxcfg = os.path.join(self.iso_contents, 'isolinux', 'isolinux.cfg')
                self.isolinuxbin = os.path.join('isolinux', 'isolinux.bin')
            kernel = mageia_arch + "/vmlinuz"
            initrd = mageia_arch + "/all.rdz"
            if self.tdl.installtype == "url":
                url = urlparse.urlparse(self.tdl.url)
                flags = "automatic=method:%s,ser:%s,dir:%s,int:eth0,netw:dhcp" % (url.scheme, url.hostname, url.path)
            else:
                flags = "automatic=method:cdrom"

        with open(isolinuxcfg, 'w') as f:
            f.write("""\
default customiso
timeout 1
prompt 0
label customiso
  kernel %s
  append initrd=%s kickstart=floppy %s
""" % (kernel, initrd, flags))

Example 82

Project: xypath Source File: tabulate.py
def tabulate(tabular_data, headers=[], tablefmt="simple",
             floatfmt="g", numalign="decimal", stralign="left",
             missingval=u""):
    """Format a fixed width table for pretty printing.

    >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
    ---  ---------
      1      2.34
    -56      8.999
      2  10001
    ---  ---------

    The first required argument (`tabular_data`) can be a
    list-of-lists (or another iterable or iterables), a dictionary of
    iterables, a two-dimensional NumPy array, or a Pandas' dataframe.


    Table headers
    -------------

    To print nice column headers, supply the second argument (`headers`):

      - `headers` can be an explicit list of column headers
      - if `headers="firstrow"`, then the first row of data is used
      - if `headers="keys"`, then dictionary keys or column indices are used

    Otherwise a headerless table is produced.

    If the number of headers is less than the number of columns, they
    are supposed to be names of the last columns. This is consistent
    with the plain-text format of R and Pandas' dataframes.

    >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
    ...       headers="firstrow"))
           sex      age
    -----  -----  -----
    Alice  F         24
    Bob    M         19


    Column alignment
    ----------------

    `tabulate` tries to detect column types automatically, and aligns
    the values properly. By default it aligns decimal points of the
    numbers (or flushes integer numbers to the right), and flushes
    everything else to the left. Possible column alignments
    (`numalign`, `stralign`) are: right, center, left, decimal (only
    for `numalign`).


    Table formats
    -------------

    `floatfmt` is a format specification used for columns which
    contain numeric data with a decimal point.

    `None` values are replaced with a `missingval` string:

    >>> print(tabulate([["spam", 1, None],
    ...                 ["eggs", 42, 3.14],
    ...                 ["other", None, 2.7]], missingval="?"))
    -----  --  ----
    spam    1  ?
    eggs   42  3.14
    other   ?  2.7
    -----  --  ----

    Various plain-text table formats (`tablefmt`) are supported:
    'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', and 'mediawiki'.

    "plain" format doesn't use any pseudographics to draw tables,
    it separates columns with a double space:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "plain"))
    strings      numbers
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
    spam   41.9999
    eggs  451

    "simple" format is like Pandoc simple_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "simple"))
    strings      numbers
    ---------  ---------
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
    ----  --------
    spam   41.9999
    eggs  451
    ----  --------

    "grid" is similar to tables produced by Emacs table.el package or
    Pandoc grid_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "grid"))
    +-----------+-----------+
    | strings   |   numbers |
    +===========+===========+
    | spam      |   41.9999 |
    +-----------+-----------+
    | eggs      |  451      |
    +-----------+-----------+

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
    +------+----------+
    | spam |  41.9999 |
    +------+----------+
    | eggs | 451      |
    +------+----------+

    "pipe" is like tables in PHP Markdown Extra extension or Pandoc
    pipe_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "pipe"))
    | strings   |   numbers |
    |:----------|----------:|
    | spam      |   41.9999 |
    | eggs      |  451      |

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
    |:-----|---------:|
    | spam |  41.9999 |
    | eggs | 451      |

    "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
    are slightly different from "pipe" format by not using colons to
    define column alignment, and using a "+" sign to indicate line
    intersections:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "orgtbl"))
    | strings   |   numbers |
    |-----------+-----------|
    | spam      |   41.9999 |
    | eggs      |  451      |


    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
    | spam |  41.9999 |
    | eggs | 451      |

    "rst" is like a simple table format from reStructuredText; please
    note that reStructuredText accepts also "grid" tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "rst"))
    =========  =========
    strings      numbers
    =========  =========
    spam         41.9999
    eggs        451
    =========  =========

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
    ====  ========
    spam   41.9999
    eggs  451
    ====  ========

    "mediawiki" produces a table markup used in Wikipedia and on other
    MediaWiki-based sites:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="mediawiki"))
    {| class="wikitable" style="text-align: left;"
    |+ <!-- caption -->
    |-
    ! strings   !! align="right"|   numbers
    |-
    | spam      || align="right"|   41.9999
    |-
    | eggs      || align="right"|  451
    |}

    >>> print(tabulate([["eggs", 42], ["spam", 23]], tablefmt="mediawiki", stralign="left"))
    {| class="wikitable" style="text-align: left;"
    |+ <!-- caption -->
    |-
    | eggs || align="right"| 42
    |-
    | spam || align="right"| 23
    |}


    """

    list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)

    # optimization: look for ANSI control codes once,
    # enable smart width functions only if a control code is found
    plain_text = u'\n'.join(['\t'.join(map(_text_type, headers))] + \
                            [u'\t'.join(map(_text_type, row)) for row in list_of_lists])
    has_invisible = re.search(_invisible_codes, plain_text)
    if has_invisible:
        width_fn = _visible_width
    else:
        width_fn = len

    # format rows and columns, convert numeric values to strings
    cols = list(zip(*list_of_lists))
    coltypes = list(map(_column_type, cols))
    cols = [[_format(v, ct, floatfmt, missingval) for v in c]
             for c,ct in zip(cols, coltypes)]

    # align columns
    aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
    minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
    cols = [_align_column(c, a, minw, has_invisible)
            for c, a, minw in zip(cols, aligns, minwidths)]

    if headers:
        # align headers and add headers
        minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
        headers = [_align_header(h, a, minw)
                   for h, a, minw in zip(headers, aligns, minwidths)]
        rows = list(zip(*cols))
    else:
        minwidths = [width_fn(c[0]) for c in cols]
        rows = list(zip(*cols))

    if not isinstance(tablefmt, TableFormat):
        tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])

    return _format_table(tablefmt, headers, rows, minwidths, aligns)

Example 83

Project: sqlipy Source File: SQLiPy.py
Function: startscan
  def startScan(self, button):
    hpp = ''
    cu = ''
    cdb = ''
    hostname = ''
    isdba = ''
    lusers = ''
    lpswds = ''
    lprivs = ''
    lroles = ''
    ldbs = ''
    textonly = ''
    postdata = None
    datacmd = ''
    cookiedata = None
    cookiecmd = ''
    uadata = None
    uacmd = ''
    custheaderdata = None
    custheadercmd = ''
    headerdata = None
    headercmd = ''
    refererdata = None
    referercmd = ''
    proxy = None
    proxycmd = ''
    dbms = None
    dbmscmd = ''
    os = None
    oscmd = ''
    tampercmd = ''
    tamperdata = None
    paramcmd = ''
    paramdata = None
    csrfurl = None
    csrftoken = None
    torcmd = ''
    tortypecmd = ''
    torportcmd = ''
    httpmethod = None
    httpmethodcmd = ''

    if self._jCheckTO.isSelected():
      textonly = ' --text-only'
      textonlystatus = True
    else:
      textonlystatus = False

    if self._jCheckHPP.isSelected():
      hpp = ' --hpp'
      hppstatus = True
    else:
      hppstatus = False

    if self._jCheckCU.isSelected():
      cu = ' --current-user'
      custatus = True
    else:
      custatus = False

    if self._jCheckDB.isSelected():
      cdb = ' --current-db'
      cdbstatus = True
    else:
      cdbstatus = False

    if self._jCheckHost.isSelected():
      hostname = ' --hostname'
      hostnamestatus = True
    else:
      hostnamestatus = False

    if self._jCheckDBA.isSelected():
      isdba = ' --is-dba'
      isdbastatus = True
    else:
      isdbastatus = False

    if self._jCheckUsers.isSelected():
      lusers = ' --users'
      lusersstatus = True
    else:
      lusersstatus = False

    if self._jCheckPswds.isSelected():
      lpswds = ' --passwords'
      lpswdsstatus = True
    else:
      lpswdsstatus = False

    if self._jCheckPrivs.isSelected():
      lprivs = ' --privileges'
      lprivsstatus = True
    else:
      lprivsstatus = False

    if self._jCheckRoles.isSelected():
      lroles = ' --roles'
      lrolesstatus = True
    else:
      lrolesstatus = False

    if self._jCheckDBs.isSelected():
      ldbs = ' --dbs'
      ldbsstatus = True
    else:
      ldbsstatus = False

    if self._jCheckTor.isSelected():
      torstatus = True
      torcmd = ' --tor'
      tortype = self._jComboTorType.getSelectedItem()
      tortypecmd = ' --tor-type=' + self._jComboTorType.getSelectedItem()
      torport = self._jTextFieldTorPort.getText()
      torportcmd = ' --tor-port=' + self._jTextFieldTorPort.getText()
    else:
      torstatus = False
      tortype = 'HTTP'
      torport = None

    if re.search('(http|https)\://', self._jTextFieldProxy.getText()) is not None:
      proxy = self._jTextFieldProxy.getText()
      proxycmd = ' --proxy=' + self._jTextFieldProxy.getText()

    if not re.search('^Default$', self._jComboHttpMethod.getSelectedItem()) is not None:
      httpmethod = self._jComboHttpMethod.getSelectedItem()
      httpmethodcmd = ' --method="' + self._jComboHttpMethod.getSelectedItem()+'"'

    if not re.search('^Any$', self._jComboDBMS.getSelectedItem()) is not None:
      dbms = self._jComboDBMS.getSelectedItem()
      dbmscmd = ' --dbms="' + self._jComboDBMS.getSelectedItem()+'"'

    if not re.search('^Any$', self._jComboOS.getSelectedItem()) is not None:
      os = self._jComboOS.getSelectedItem()
      oscmd = ' --os=' + self._jComboOS.getSelectedItem()

    if re.search('[a-zA-Z0-9]', self._jTextFieldTamper.getText()) is not None:
      tampercmd = ' --tamper="' + self._jTextFieldTamper.getText() + '"'
      tamperdata = self._jTextFieldTamper.getText()

    if re.search('[a-zA-Z0-9]', self._jTextData.getText()) is not None:
      postdata = self._jTextData.getText()
      datacmd = ' --data="' + self._jTextData.getText() + '"'

    if re.search('[a-zA-Z0-9]', self._jTextFieldCookie.getText()) is not None:
      cookiedata = self._jTextFieldCookie.getText()
      cookiecmd = ' --cookie="' + self._jTextFieldCookie.getText() + '"'

    if re.search('[a-zA-Z0-9]', self._jTextFieldUA.getText()) is not None:
      uadata = self._jTextFieldUA.getText()
      uacmd = ' --user-agent="' + self._jTextFieldUA.getText() + '"'

    if re.search('[a-zA-Z0-9]', self._jTextFieldCustHeader.getText()) is not None:
      custheaderdata = self._jTextFieldCustHeader.getText()
      custheadercmd = ' --headers="' + self._jTextFieldCustHeader.getText() + '"'

    if re.search('[a-zA-Z0-9]', self._jTextFieldReferer.getText()) is not None:
      refererdata = self._jTextFieldReferer.getText()
      referercmd = ' --referer="' + self._jTextFieldReferer.getText() + '"'

    if re.search('[a-zA-Z0-9]', self._jTextFieldParam.getText()) is not None:
      paramdata = self._jTextFieldParam.getText()
      paramcmd = ' -p "' + self._jTextFieldParam.getText() + '"'

    try:
      sqlmapcmd = 'sqlmap.py -u "' + self._jTextFieldURL.getText() + '"' + datacmd + httpmethodcmd + cookiecmd + uacmd + referercmd + custheadercmd + proxycmd + torcmd + tortypecmd + torportcmd + ' --delay=' + str(self._jComboDelay.getSelectedItem()) + ' --timeout=' + str(self._jComboTimeout.getSelectedItem()) + ' --retries=' + str(self._jComboDelay.getSelectedItem()) + paramcmd + dbmscmd + oscmd + tampercmd + ' --level=' + str(self._jComboLevel.getSelectedItem()) + ' --risk=' + str(self._jComboRisk.getSelectedItem()) + textonly + hpp + ' --threads=' + str(self._jComboThreads.getSelectedItem()) + ' --time-sec=' + str(self._jComboTimeSec.getSelectedItem()) + ' -b' + cu + cdb + hostname + isdba + lusers + lpswds + lprivs + lroles + ldbs + ' --batch --answers="crack=N,dict=N"\n\n'
      print 'SQLMap Command: ' + sqlmapcmd
      req = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/task/new')
      resp = json.load(urllib2.urlopen(req))

      if resp['success'] == True and resp['taskid']:
        sqlitask = resp['taskid']
        sqliopts = {'csrfUrl': csrfurl, 'csrfToken': csrftoken, 'getUsers': lusersstatus, 'getPasswordHashes': lpswdsstatus, 'delay': self._jComboDelay.getSelectedItem(), 'isDba': isdbastatus, 'risk': self._jComboRisk.getSelectedItem(), 'getCurrentUser': custatus, 'getRoles': lrolesstatus, 'getPrivileges': lprivsstatus, 'testParameter': paramdata, 'timeout': self._jComboTimeout.getSelectedItem(), 'torPort': torport, 'level': self._jComboLevel.getSelectedItem(), 'getCurrentDb': cdbstatus, 'answers': 'crack=N,dict=N', 'method': httpmethod, 'cookie': cookiedata, 'proxy': proxy, 'os': os, 'threads': self._jComboThreads.getSelectedItem(), 'url': self._jTextFieldURL.getText(), 'getDbs': ldbsstatus, 'tor': torstatus, 'torType': tortype, 'referer': refererdata, 'retries': self._jComboRetry.getSelectedItem(), 'headers': custheaderdata, 'timeSec': self._jComboTimeSec.getSelectedItem(), 'getHostname': hostnamestatus, 'agent': uadata, 'dbms': dbms, 'tamper': tamperdata, 'hpp': hppstatus, 'getBanner': 'true', 'data': postdata, 'textOnly': textonlystatus}

        print 'Created SQLMap Task: ' + sqlitask + '\n'

        try:
          req = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/option/' + sqlitask + '/set')
          req.add_header('Content-Type', 'application/json')
          resp = json.load(urllib2.urlopen(req, json.dumps(sqliopts)))

          if resp['success'] == True:
            print 'SQLMap options set on Task ' + sqlitask + ': ' + json.dumps(sqliopts) + '\n'
            sqliopts = {'url': self._jTextFieldURL.getText()}

            try:
              checkreq = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/option/' + sqlitask + '/list')
              checkresp = json.load(urllib2.urlopen(checkreq))
              print 'SQLMap options returned: ' + json.dumps(checkresp) + '\n'
            except:
              print 'Failed to get list of options from SQLMap API\n'

            try:
              req = urllib2.Request('http://' + self._jTextFieldScanIPListen.getText() + ':' + self._jTextFieldScanPortListen.getText() + '/scan/' + sqlitask + '/start')
              req.add_header('Content-Type', 'application/json')
              resp = json.load(urllib2.urlopen(req, json.dumps(sqliopts)))

              if resp['success'] == True:
                findings = ThreadExtender(self, self._jTextFieldScanIPListen.getText(), self._jTextFieldScanPortListen.getText(), sqlitask, self.scanUrl, self.scanMessage, self._callbacks)
                t = threading.Thread(target=findings.checkResults)
                self.threads.append(t)
                t.start()
                self._jComboLogs.addItem(sqlitask + '-' + self._jTextFieldURL.getText())
                self._jComboStopScan.addItem(sqlitask + '-' + self._jTextFieldURL.getText())
                self.scancmds[sqlitask] = sqlmapcmd
                print 'Started SQLMap Scan on Task ' + sqlitask +' with Engine ID: ' + str(resp['engineid']) + ' - ' + self._jTextFieldURL.getText() + '\n'
              else:
                print 'Failed to start SQLMap Scan for Task: ' + sqlitask + '\n'

            except:
              print 'Failed to start SQLMap Scan for Task: ' + sqlitask + '\n'

          else:
            print 'Failed to set options on SQLMap Task: ' + sqlitask + '\n'

        except:
          print 'Failed to set options on SQLMap Task: ' + sqlitask + '\n'

      else:
        print 'SQLMap task creation failed\n'

    except:
      print 'SQLMap task creation failed\n'

Example 84

Project: paginate Source File: __init__.py
Function: link_map
    def link_map(self, format='~2~', url=None, show_if_single_page=False, separator=' ',
              symbol_first='<<', symbol_last='>>', symbol_previous='<', symbol_next='>',
              link_attr=dict(), curpage_attr=dict(), dotdot_attr=dict()):
        """ Return map with links to other pages if default pager() function is not suitable solution.
        format:
            Format string that defines how the pager would be normally rendered rendered. Uses same arguments as pager()
            method, but returns a simple dictionary in form of:
            {'current_page': {'attrs': {},
                                     'href': 'http://example.org/foo/page=1',
                                     'value': 1},
                    'first_page': {'attrs': {},
                                   'href': 'http://example.org/foo/page=1',
                                   'type': 'first_page',
                                   'value': 1},
                    'last_page': {'attrs': {},
                                  'href': 'http://example.org/foo/page=8',
                                  'type': 'last_page',
                                  'value': 8},
                    'next_page': {'attrs': {}, 'href': 'HREF', 'type': 'next_page', 'value': 2},
                    'previous_page': None,
                    'range_pages': [{'attrs': {},
                                     'href': 'http://example.org/foo/page=1',
                                     'type': 'current_page',
                                     'value': 1},
                                     ....
                                    {'attrs': {}, 'href': '', 'type': 'span', 'value': '..'}]}


            The string can contain the following $-tokens that are substituted by the
            string.Template module:

            - $first_page: number of first reachable page
            - $last_page: number of last reachable page
            - $page: number of currently selected page
            - $page_count: number of reachable pages
            - $items_per_page: maximal number of items per page
            - $first_item: index of first item on the current page
            - $last_item: index of last item on the current page
            - $item_count: total number of items
            - $link_first: link to first page (unless this is first page)
            - $link_last: link to last page (unless this is last page)
            - $link_previous: link to previous page (unless this is first page)
            - $link_next: link to next page (unless this is last page)

            To render a range of pages the token '~3~' can be used. The
            number sets the radius of pages around the current page.
            Example for a range with radius 3:

            '1 .. 5 6 7 [8] 9 10 11 .. 50'

            Default: '~2~'

        url
            The URL that page links will point to. Make sure it contains the string
            $page which will be replaced by the actual page number.
            Must be given unless a url_maker is specified to __init__, in which
            case this parameter is ignored.

        symbol_first
            String to be displayed as the text for the $link_first link above.

            Default: '<<' (<<)

        symbol_last
            String to be displayed as the text for the $link_last link above.

            Default: '>>' (>>)

        symbol_previous
            String to be displayed as the text for the $link_previous link above.

            Default: '<' (<)

        symbol_next
            String to be displayed as the text for the $link_next link above.

            Default: '>' (>)

        separator:
            String that is used to separate page links/numbers in the above range of pages.

            Default: ' '

        show_if_single_page:
            if True the navigator will be shown even if there is only one page.

            Default: False

        link_attr (optional)
            A dictionary of attributes that get added to A-HREF links pointing to other pages. Can
            be used to define a CSS style or class to customize the look of links.

            Example: { 'style':'border: 1px solid green' }
            Example: { 'class':'pager_link' }

        curpage_attr (optional)
            A dictionary of attributes that get added to the current page number in the pager (which
            is obviously not a link). If this dictionary is not empty then the elements will be
            wrapped in a SPAN tag with the given attributes.

            Example: { 'style':'border: 3px solid blue' }
            Example: { 'class':'pager_curpage' }

        dotdot_attr (optional)
            A dictionary of attributes that get added to the '..' string in the pager (which is
            obviously not a link). If this dictionary is not empty then the elements will be wrapped
            in a SPAN tag with the given attributes.

            Example: { 'style':'color: #808080' }
            Example: { 'class':'pager_dotdot' }
        """
        self.curpage_attr = curpage_attr
        self.separator = separator
        self.link_attr = link_attr
        self.dotdot_attr = dotdot_attr
        self.url = url

        regex_res = re.search(r'~(\d+)~', format)
        if regex_res:
            radius = regex_res.group(1)
        else:
            radius = 2
        radius = int(radius)
        self.radius = radius

        # Compute the first and last page number within the radius
        # e.g. '1 .. 5 6 [7] 8 9 .. 12'
        # -> leftmost_page  = 5
        # -> rightmost_page = 9
        leftmost_page = max(self.first_page, (self.page-radius))
        rightmost_page = min(self.last_page, (self.page+radius))

        nav_items = {
            "first_page": None,
            "last_page": None,
            "previous_page": None,
            "next_page": None,
            "current_page": None,
            "radius": self.radius,
            "range_pages": []
        }

        nav_items["first_page"] = {"type": "first_page", "value": unicode(symbol_first), "attrs": self.link_attr,
                                   "number": self.first_page, "href": self.url_maker(self.first_page)}

        # Insert dots if there are pages between the first page
        # and the currently displayed page range
        if leftmost_page - self.first_page > 1:
            # Wrap in a SPAN tag if dotdot_attr is set
            nav_items["range_pages"].append({"type": "span", "value": '..', "attrs": self.dotdot_attr, "href": "",
                                             "number": None})

        for thispage in range(leftmost_page, rightmost_page+1):
            # Highlight the current page number and do not use a link
            if thispage == self.page:
                # Wrap in a SPAN tag if curpage_attr is set
                nav_items["range_pages"].append({"type": "current_page", "value": unicode(thispage), "number": thispage,
                                                 "attrs": self.curpage_attr, "href": self.url_maker(thispage)})
                nav_items["current_page"] = {"value": thispage, "attrs": self.curpage_attr,
                                             "type": "current_page", "href": self.url_maker(thispage)}
            # Otherwise create just a link to that page
            else:
                nav_items["range_pages"].append({"type": "page", "value": unicode(thispage), "number": thispage,
                                                 "attrs": self.link_attr, "href": self.url_maker(thispage)})

        # Insert dots if there are pages between the displayed
        # page numbers and the end of the page range
        if self.last_page - rightmost_page > 1:
            # Wrap in a SPAN tag if dotdot_attr is set
            nav_items["range_pages"].append({"type": "span", "value": '..', "attrs": self.dotdot_attr, "href": "",
                                             "number":None})

        # Create a link to the very last page (unless we are on the last
        # page or there would be no need to insert '..' spacers)
        nav_items["last_page"] = {"type": "last_page", "value": unicode(symbol_last), "attrs": self.link_attr,
                                  "href": self.url_maker(self.last_page), "number":self.last_page}

        nav_items["previous_page"] = {"type": "previous_page", "value": unicode(symbol_previous),
                                      "attrs": self.link_attr, "number": self.previous_page or self.first_page,
                                      "href": self.url_maker(self.previous_page or self.first_page)}

        nav_items["next_page"] = {"type": "next_page", "value": unicode(symbol_next),
                                  "attrs": self.link_attr, "number": self.next_page or self.last_page,
                                  "href": self.url_maker(self.next_page or self.last_page)}

        return nav_items

Example 85

Project: CouchPotatoV1 Source File: library.py
    def getMovies(self, folder = None, withMeta = True):
        log.debug('getMoviesStart')

        movies = []
        qualities = Qualities()

        movieFolder = unicode(folder) if folder else self.config.get('Renamer', 'destination')
        if not os.path.isdir(movieFolder):
            log.error('Can\'t find directory: %s' % movieFolder)
            return movies

        log.debug('os.walk(movieFolder) %s' % movieFolder)
        # Walk the tree once to catch any UnicodeDecodeErrors that might arise
        # from malformed file and directory names. Use the non-unicode version
        # of movieFolder if so.
        try:
            for x in os.walk(movieFolder): pass
            walker = os.walk(movieFolder)
        except UnicodeDecodeError:
            walker = os.walk(str(movieFolder))
        for root, subfiles, filenames in walker:
            if self.abort:
                log.debug('Aborting moviescan')
                return movies

            movie = {
                'movie': None,
                'queue': 0,
                'match': False,
                'meta': {},
                'info': {
                    'name': None,
                    'cpnfoImdb': None,
                    'ppScriptName': None,
                    'imdb': None,
                    'year': None,
                    'quality': '',
                    'resolution': None,
                    'sourcemedia': '',
                    'size': 0,
                    'codec': {
                        'video': '',
                        'audio': ''
                    },
                    'group': ''
                },
                'history': None,
                'path': root,
                'folder': root.split(os.path.sep)[-1:].pop(),
                'nfo':[], 'files':[], 'trailer':[], 'cpnfo':None,
                'subtitles':{
                    'files': [],
                    'extras': []
                }
            }

            if movie['folder'] == 'VIDEO_TS':
                movie['folder'] = movie['path'].split(os.path.sep)[-2:-1].pop()

            patterns = []
            for extType in self.extensions.itervalues():
                patterns.extend(extType)

            for pattern in patterns:
                for filename in fnmatch.filter(sorted(filenames), pattern):
                    fullFilePath = os.path.join(root, filename)
                    log.debug('Processing file: %s' % fullFilePath)

                    new = {
                       'filename': filename,
                       'ext': os.path.splitext(filename)[1].lower()[1:], #[1:]to remove . from extension
                    }

                    #cpnfo
                    if new.get('filename') in self.extensions['cpnfo']:
                        movie['cpnfo'] = new.get('filename')
                    #nfo file
                    if('*.' + new.get('ext') in self.extensions['nfo']):
                        movie['nfo'].append(filename)
                    #subtitle file
                    elif('*.' + new.get('ext') in self.extensions['subtitle']):
                        movie['subtitles']['files'].append(new)
                    #idx files
                    elif('*.' + new.get('ext') in self.extensions['subtitleExtras']):
                        movie['subtitles']['extras'].append(new)
                    #trailer file
                    elif re.search('(^|[\W_])trailer\d*[\W_]', filename.lower()) and self.filesizeBetween(fullFilePath, 2, 250):
                        movie['trailer'].append(new)
                    else:
                        #ignore movies files / or not
                        if self.keepFile(fullFilePath):
                            log.debug('self.keepFile(fullFilePath)')
                            new['hash'] = hashFile(fullFilePath) # Add movie hash
                            new['size'] = os.path.getsize(fullFilePath) # File size
                            movie['files'].append(new)

            if movie['files']:
                log.debug('Files found')
                #Find movie by cpnfo
                if movie['cpnfo']:
                    log.debug('Scanning cpnfo')
                    cpnfoFile = open(os.path.join(movie['path'], movie['cpnfo']), 'r').readlines()
                    cpnfoFile = [x.strip() for x in cpnfoFile]
                    movie['info']['ppScriptName'] = cpnfoFile[0]
                    movie['info']['cpnfoImdb'] = cpnfoFile[1]

                # Find movie by nfo
                if movie['nfo']:
                    log.debug('Scanning nfo')
                    for nfo in movie['nfo']:
                        nfoFile = open(os.path.join(movie['path'], nfo), 'r').read()
                        movie['info']['imdb'] = self.getImdb(nfoFile)

                # Find movie via files
                log.debug('self.determineMovie(movie)')
                movie['movie'] = self.determineMovie(movie)

                if movie['movie']:
                    movie['match'] = True

                    log.debug('self.getHistory(movie[movie])')
                    movie['history'] = self.getHistory(movie['movie'])
                    movie['queue'] = self.getQueue(movie['movie'])

                    movie['info']['name'] = movie['movie'].name
                    movie['info']['year'] = movie['movie'].year
                    try:
                        movie['info']['quality'] = qualities.types.get(movie['queue'].qualityType).get('label')
                    except:
                        movie['info']['quality'] = qualities.guess([os.path.join(movie['path'], file['filename']) for file in movie['files']])

                    for file in movie['files']:
                        movie['info']['size'] += file['size']

                    movie['info']['size'] = str(movie['info']['size'])
                    movie['info']['group'] = self.getGroup(movie['folder'])
                    movie['info']['codec']['video'] = self.getCodec(movie['folder'], self.codecs['video'])
                    movie['info']['codec']['audio'] = self.getCodec(movie['folder'], self.codecs['audio'])

                    #get metainfo about file
                    if withMeta:
                        log.debug('self.getHistory(movie[movie])')
                        testFile = os.path.join(movie['path'], movie['files'][0]['filename'])
                        try:
                            movie['meta'].update(self.getMeta(testFile))
                        except:
                            pass

                        #check the video file for its resolution
                        if movie['meta'].has_key('video stream'):
                            width = movie['meta']['video stream'][0]['image width']
                            height = movie['meta']['video stream'][0]['image height']

                            if width and height:
                                if width > 1900 and width < 2000 and height <= 1080:
                                    namedResolution = '1080p'
                                elif width > 1200 and width < 1300 and height <= 720:
                                    namedResolution = '720p'
                                else:
                                    namedResolution = None
                        else:
                            log.info("Unable to fetch audio/video details for %s" % testFile)
                            namedResolution = None

                        movie['info']['resolution'] = namedResolution
                        movie['info']['sourcemedia'] = self.getSourceMedia(testFile)

                # Create filename without cd1/cd2 etc
                log.debug('removeMultipart')
                movie['filename'] = self.removeMultipart(os.path.splitext(movie['files'][0]['filename'])[0])

                # Give back ids, not table rows
                if self.noTables:
                    log.debug('self.noTables')
                    movie['history'] = [h.id for h in movie['history']] if movie['history'] else movie['history']
                    movie['movie'] = movie['movie'].id if movie['movie'] else movie['movie']

                log.debug('movies.append(movie)')
                movies.append(movie)

        log.debug('getMoviesEnd')
        return movies

Example 86

Project: blender_mmd_tools Source File: importer.py
def import_pmd(**kwargs):
    """ Import pmd file
    """
    target_path = kwargs['filepath']
    pmd_model = pmd.load(target_path)


    logging.info('')
    logging.info('cuem************************************')
    logging.info(' mmd_tools.import_pmd module')
    logging.info('----------------------------------------')
    logging.info(' Start to convert pmx data into pmd data')
    logging.info('              by the mmd_tools.pmd modlue.')
    logging.info('')

    pmx_model = pmx.Model()

    pmx_model.name = pmd_model.name
    pmx_model.name_e = pmd_model.name_e
    pmx_model.comment = pmd_model.comment
    pmx_model.comment_e = pmd_model.comment_e

    pmx_model.vertices = []

    # convert vertices
    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert Vertices')
    logging.info('------------------------------')
    for v in pmd_model.vertices:
        pmx_v = pmx.Vertex()
        pmx_v.co = v.position
        pmx_v.normal = v.normal
        pmx_v.uv = v.uv
        pmx_v.additional_uvs= []
        pmx_v.edge_scale = 1

        weight = pmx.BoneWeight()
        if v.bones[0] != v.bones[1]:
            weight.type = pmx.BoneWeight.BDEF2
            weight.bones = v.bones
            weight.weights = [float(v.weight)/100.0]
        else:
            weight.type = pmx.BoneWeight.BDEF1
            weight.bones = [v.bones[0]]
            weight.weights = [float(v.weight)/100.0]

        pmx_v.weight = weight

        pmx_model.vertices.append(pmx_v)
    logging.info('----- Converted %d vertices', len(pmx_model.vertices))

    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert Faces')
    logging.info('------------------------------')
    for f in pmd_model.faces:
        pmx_model.faces.append(f)
    logging.info('----- Converted %d faces', len(pmx_model.faces))

    knee_bones = []

    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert Bones')
    logging.info('------------------------------')
    for i, bone in enumerate(pmd_model.bones):
        pmx_bone = pmx.Bone()
        pmx_bone.name = bone.name
        pmx_bone.name_e = bone.name_e
        pmx_bone.location = bone.position
        pmx_bone.parent = bone.parent
        if bone.type != 9 and bone.type != 8:
            pmx_bone.displayConnection = bone.tail_bone
        else:
            pmx_bone.displayConnection = -1
        if pmx_bone.displayConnection <= 0:
            pmx_bone.displayConnection = [0.0, 0.0, 0.0]
        pmx_bone.isIK = False
        if bone.type == 0:
            pmx_bone.isMovable = False
        elif bone.type == 1:
            pass
        elif bone.type == 2:
            pmx_bone.transform_order = 1
        elif bone.type == 4:
            pmx_bone.isMovable = False
        elif bone.type == 5:
            pmx_bone.hasAdditionalRotate = True
            pmx_bone.additionalTransform = (bone.ik_bone, 1.0)
        elif bone.type == 7:
            pmx_bone.visible = False
        elif bone.type == 8:
            pmx_bone.isMovable = False
            tail_loc=mathutils.Vector(pmd_model.bones[bone.tail_bone].position)
            loc = mathutils.Vector(bone.position)
            vec = tail_loc - loc
            vec.normalize()
            pmx_bone.axis=list(vec)
        elif bone.type == 9:
            pmx_bone.visible = False
            pmx_bone.hasAdditionalRotate = True
            pmx_bone.additionalTransform = (bone.tail_bone, float(bone.ik_bone)/100.0)

        if bone.type >= 4:
            pmx_bone.transform_order = 2

        pmx_model.bones.append(pmx_bone)

        if re.search(u'ひざ$', pmx_bone.name):
            knee_bones.append(i)

    for i in pmx_model.bones:
        if i.parent != -1 and pmd_model.bones[i.parent].type == 2:
            i.transform_order = 1
    logging.info('----- Converted %d boness', len(pmx_model.bones))

    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert IKs')
    logging.info('------------------------------')
    applied_ik_bones = []
    for ik in pmd_model.iks:
        if ik.bone in applied_ik_bones:
            logging.info('The bone %s is targeted by two or more IK bones.', pmx_model.bones[ik.bone].name)
            b = pmx_model.bones[ik.bone]
            t = copy.deepcopy(b)
            t.name += '+'
            t.parent = ik.bone
            t.ik_links = []
            pmx_model.bones.append(t)
            ik.bone = len(pmx_model.bones) - 1
            logging.info('Duplicate the bone: %s -> %s', b.name, t.name)
        pmx_bone = pmx_model.bones[ik.bone]
        logging.debug('Add IK settings to the bone %s', pmx_bone.name)
        pmx_bone.isIK = True
        pmx_bone.target = ik.target_bone
        pmx_bone.loopCount = ik.iterations
        for i in ik.ik_child_bones:
            ik_link = pmx.IKLink()
            ik_link.target = i
            if i in knee_bones:
                ik_link.maximumAngle = [-0.5, 0.0, 0.0]
                ik_link.minimumAngle = [-180.0, 0.0, 0.0]
                logging.info('  Add knee constraints to %s', i)
            logging.debug('  IKLink: %s(index: %d)', pmx_model.bones[i].name, i)
            pmx_bone.ik_links.append(ik_link)
        applied_ik_bones.append(ik.bone)
    logging.info('----- Converted %d bones', len(pmd_model.iks))

    texture_map = {}
    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert Materials')
    logging.info('------------------------------')
    for i, mat in enumerate(pmd_model.materials):
        pmx_mat = pmx.Material()
        pmx_mat.name = '材質%d'%(i+1)
        pmx_mat.name_e = 'Material%d'%(i+1)
        pmx_mat.diffuse = mat.diffuse
        pmx_mat.specular = mat.specular + [mat.specular_intensity]
        pmx_mat.ambient = mat.ambient
        pmx_mat.enabled_self_shadow = True # pmd doesn't support this
        pmx_mat.enabled_self_shadow_map = abs(mat.diffuse[3] - 0.98) > 1e-7 # consider precision error
        pmx_mat.enabled_toon_edge = (mat.edge_flag != 0)
        pmx_mat.vertex_count = mat.vertex_count
        if len(mat.texture_path) > 0:
            tex_path = mat.texture_path
            if tex_path not in texture_map:
                logging.info('  Create pmx.Texture %s', tex_path)
                tex = pmx.Texture()
                tex.path = os.path.normpath(os.path.join(os.path.dirname(target_path), tex_path))
                pmx_model.textures.append(tex)
                texture_map[tex_path] = len(pmx_model.textures) - 1
            pmx_mat.texture = texture_map[tex_path]
        if len(mat.sphere_path) > 0:
            tex_path = mat.sphere_path
            if tex_path not in texture_map:
                logging.info('  Create pmx.Texture %s', tex_path)
                tex = pmx.Texture()
                tex.path = os.path.normpath(os.path.join(os.path.dirname(target_path), tex_path))
                pmx_model.textures.append(tex)
                texture_map[tex_path] = len(pmx_model.textures) - 1
            pmx_mat.sphere_texture = texture_map[tex_path]
            pmx_mat.sphere_texture_mode = mat.sphere_mode
        pmx_model.materials.append(pmx_mat)
    logging.info('----- Converted %d materials', len(pmx_model.materials))

    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert Morphs')
    logging.info('------------------------------')
    t = list(filter(lambda x: x.type == 0, pmd_model.morphs))
    if len(t) == 0:
        logging.error('Not found the base morph')
        logging.error('Skip converting vertex morphs.')
    else:
        if len(t) > 1:
            logging.warning('Found two or more base morphs.')
        vertex_map = []
        for i in t[0].data:
            vertex_map.append(i.index)

        for morph in pmd_model.morphs:
            logging.debug('Vertex Morph: %s', morph.name)
            if morph.type == 0:
                continue
            pmx_morph = pmx.VertexMorph(morph.name, morph.name_e, morph.type)
            for i in morph.data:
                mo = pmx.VertexMorphOffset()
                mo.index = vertex_map[i.index]
                mo.offset = i.offset
                pmx_morph.offsets.append(mo)
            pmx_model.morphs.append(pmx_morph)
    logging.info('----- Converted %d morphs', len(pmx_model.morphs))

    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert Rigid bodies')
    logging.info('------------------------------')
    for rigid in pmd_model.rigid_bodies:
        pmx_rigid = pmx.Rigid()

        pmx_rigid.name = rigid.name

        pmx_rigid.bone = rigid.bone
        pmx_rigid.collision_group_number = rigid.collision_group_number
        pmx_rigid.collision_group_mask = rigid.collision_group_mask
        pmx_rigid.type = rigid.type

        pmx_rigid.size = rigid.size

        # a location parameter of pmd.RigidBody is the offset from the relational bone or the center bone.
        if rigid.bone == -1:
            t = 0
        else:
            t = rigid.bone
        pmx_rigid.location = mathutils.Vector(pmx_model.bones[t].location) + mathutils.Vector(rigid.location)
        pmx_rigid.rotation = rigid.rotation

        pmx_rigid.mass = rigid.mass
        pmx_rigid.velocity_attenuation = rigid.velocity_attenuation
        pmx_rigid.rotation_attenuation = rigid.rotation_attenuation
        pmx_rigid.bounce = rigid.bounce
        pmx_rigid.friction = rigid.friction
        pmx_rigid.mode = rigid.mode

        pmx_model.rigids.append(pmx_rigid)
    logging.info('----- Converted %d rigid bodies', len(pmx_model.rigids))

    logging.info('')
    logging.info('------------------------------')
    logging.info(' Convert Joints')
    logging.info('------------------------------')
    for joint in pmd_model.joints:
        pmx_joint = pmx.Joint()

        pmx_joint.name = joint.name
        pmx_joint.src_rigid = joint.src_rigid
        pmx_joint.dest_rigid = joint.dest_rigid

        pmx_joint.location = joint.location
        pmx_joint.rotation = joint.rotation

        pmx_joint.maximum_location = joint.minimum_location
        pmx_joint.minimum_location = joint.maximum_location
        pmx_joint.maximum_rotation = joint.minimum_rotation
        pmx_joint.minimum_rotation = joint.maximum_rotation

        pmx_joint.spring_constant = joint.spring_constant
        pmx_joint.spring_rotation_constant = joint.spring_rotation_constant

        pmx_model.joints.append(pmx_joint)
    logging.info('----- Converted %d joints', len(pmx_model.joints))

    logging.info(' Finish converting pmd into pmx.')
    logging.info('----------------------------------------')
    logging.info(' mmd_tools.import_pmd module')
    logging.info('****************************************')

    importer = import_pmx.PMXImporter()
    kwargs['pmx'] = pmx_model
    importer.execute(**kwargs)

Example 87

Project: stonix Source File: DisableRemoveableStorage.py
    def fixMac(self):
        '''This method will attempt to disable certain storage ports by moving
        certain kernel extensions.  If the check box is checked we will
        move the kernel (if present) associated with that storage port/device
        into a folder designated for those disabled extensions.  If the
        check box is unchecked, we will assume the user doesn't want this
        disabled and if the kernel is no longer where it should be, we will
        check the disabled extensions folder to see if it was previously
        disabled.  If it's in that folder, we will move it back.
        @author: bemalmbe
        @return: bool
        @change: dwalker 8/19/2014
        '''
        debug = ""
        check = "/usr/sbin/kextstat "
        unload = "/sbin/kextunload "
        load = "/sbin/kextload "
        filepath = "/System/Library/Extensions/"
        success = True
        #created1 = False
        created2 = False
        if not os.path.exists(self.plistpath):
            createFile(self.plistpath, self.logger)
        self.iditerator += 1
        myid = iterate(self.iditerator, self.rulenumber)
        cmd = "/bin/launchctl unload " + self.plistpath
        event = {"eventtype": "commandstring",
                 "command": cmd}
        self.statechglogger.recordchgevent(myid, event)
        #created1 = True
        self.iditerator += 1
        myid = iterate(self.iditerator, self.rulenumber)
        event = {"eventtype": "creation",
                 "filepath": self.plistpath}
        self.statechglogger.recordchgevent(myid, event)
        if os.path.exists(self.plistpath):
            uid, gid = "", ""
            statdata = os.stat(self.plistpath)
            mode = stat.S_IMODE(statdata.st_mode)
            ownergrp = getUserGroupName(self.plistpath)
            owner = ownergrp[0]
            group = ownergrp[1]
            if grp.getgrnam("wheel")[2] != "":
                gid = grp.getgrnam("wheel")[2]
            if pwd.getpwnam("root")[2] != "":
                uid = pwd.getpwnam("root")[2]
#             if not created1:
#                 if mode != 420 or owner != "root" or group != "wheel":
#                     origuid = statdata.st_uid
#                     origgid = statdata.st_gid
#                     if gid:
#                         if uid:
#                             self.iditerator += 1
#                             myid = iterate(self.iditerator,
#                                            self.rulenumber)
#                             event = {"eventtype": "perm",
#                                      "startstate": [origuid,
#                                                     origgid, mode],
#                                      "endstate": [uid, gid, 420],
#                                      "filepath": self.plistpath}
            contents = readFile(self.plistpath, self.logger)
            contentstring = ""
            for line in contents:
                contentstring += line.strip()
            if not re.search(self.plistregex, contentstring):
                tmpfile = self.plistpath + ".tmp"
                if not writeFile(tmpfile, self.plistcontents, self.logger):
                    success = False
#                 elif not created1:
#                     self.iditerator += 1
#                     myid = iterate(self.iditerator, self.rulenumber)
#                     event = {"eventtype": "conf",
#                              "filepath": self.plistpath}
#                     self.statechglogger.recordchgevent(myid, event)
#                     self.statechglogger.recordfilechange(self.plistpath,
#                                                          tmpfile, myid)
#                     os.rename(tmpfile, self.plistpath)
#                     if uid and gid:
#                         os.chown(self.plistpath, uid, gid)
#                     os.chmod(self.plistpath, 420)
                else:
                    os.rename(tmpfile, self.plistpath)
                    if uid and gid:
                        os.chown(self.plistpath, uid, gid)
                    os.chmod(self.plistpath, 420)
        if not os.path.exists(self.daemonpath):
            if not createFile(self.daemonpath, self.logger):
                success = False
                self.detailedresults += "Unable to create the disablestorage python file\n"
        self.iditerator += 1
        myid = iterate(self.iditerator, self.rulenumber)
        event = {"eventtype": "creation",
                 "filepath": self.daemonpath}
        self.statechglogger.recordchgevent(myid, event)
        if os.path.exists(self.daemonpath):
            uid, gid = "", ""
            statdata = os.stat(self.daemonpath)
            mode = stat.S_IMODE(statdata.st_mode)
            ownergrp = getUserGroupName(self.daemonpath)
            owner = ownergrp[0]
            group = ownergrp[1]
            if grp.getgrnam("admin")[2] != "":
                gid = grp.getgrnam("admin")[2]
            if pwd.getpwnam("root")[2] != "":
                uid = pwd.getpwnam("root")[2]
            #if we didn't have to create the file then we want to record
            #incorrect permissions as state event
            if not created2:
                if mode != 509 or owner != "root" or group != "admin":
                    origuid = statdata.st_uid
                    origgid = statdata.st_gid
                    if gid:
                        if uid:
                            self.iditerator += 1
                            myid = iterate(self.iditerator,
                                           self.rulenumber)
                            event = {"eventtype": "perm",
                                     "startstate": [origuid,
                                                    origgid, mode],
                                     "endstate": [uid, gid, 509],
                                     "filepath": self.daemonpath}
            contents = readFile(self.daemonpath, self.logger)
            contentstring = ""
            for line in contents:
                contentstring += line
            if contentstring != self.daemoncontents:
                tmpfile = self.daemonpath + ".tmp"
                if writeFile(tmpfile, self.daemoncontents, self.logger):
                    if not created2:
                        self.iditerator += 1
                        myid = iterate(self.iditerator, self.rulenumber)
                        event = {"eventtype": "conf",
                                 "filepath": self.daemonpath}
                        self.statechglogger.recordchgevent(myid, event)
                        self.statechglogger.recordfilechange(self.daemonpath,
                                                             tmpfile, myid)
                        os.rename(tmpfile, self.daemonpath)
                        if uid and gid:
                            os.chown(self.daemonpath, uid, gid)
                        os.chmod(self.daemonpath, 509)
                    else:
                        os.rename(tmpfile, self.daemonpath)
                        if uid and gid:
                            os.chown(self.daemonpath, uid, gid)
                        os.chmod(self.daemonpath, 509)
                else:
                    success = False
            elif not checkPerms(self.daemonpath, [0, 0, 509], self.logger):
                if not setPerms(self.daemonpath, [0, 0, 509], self.logger):
                    success = False
        if re.search("^10.11", self.environ.getosver()):
            usb = "IOUSBMassStorageDriver"
        else:
            usb = "IOUSBMassStorageClass"
        cmd = check + "| grep " + usb
        self.ch.executeCommand(cmd)

        # if return code is 0, the kernel module is loaded, thus we need
        # to disable it
        if self.ch.getReturnCode() == 0:
            cmd = unload + filepath + usb + ".kext/"
            if not self.ch.executeCommand(cmd):
                debug += "Unable to disable USB\n"
                success = False
            else:
                self.iditerator += 1
                myid = iterate(self.iditerator, self.rulenumber)
                undo = load + filepath + usb + ".kext/"
                event = {"eventtype": "comm",
                         "command": undo}
                self.statechglogger.recordchgevent(myid, event)
        fw = "IOFireWireSerialBusProtocolTransport"
        cmd = check + "| grep " + fw
        self.ch.executeCommand(cmd)

        # if return code is 0, the kernel module is loaded, thus we need
        # to disable it
        if self.ch.getReturnCode() == 0:
            cmd = unload + filepath + fw + ".kext/"
            if not self.ch.executeCommand(cmd):
                debug += "Unable to disable Firewire\n"
                success = False
            else:
                self.iditerator += 1
                myid = iterate(self.iditerator, self.rulenumber)
                undo = load + filepath + fw + ".kext/"
                event = {"eventtype": "comm",
                         "command": undo}
                self.statechglogger.recordchgevent(myid, event)
        tb = "AppleThunderboltUTDM"
        cmd = check + "| grep " + tb
        self.ch.executeCommand(cmd)

        # if return code is 0, the kernel module is loaded, thus we need
        # to disable it
        if self.ch.getReturnCode() == 0:
            cmd = unload + "/System/Library/Extensions/" + tb + ".kext/"
            if not self.ch.executeCommand(cmd):
                debug += "Unable to disable Thunderbolt\n"
                success = False
            else:
                self.iditerator += 1
                myid = iterate(self.iditerator, self.rulenumber)
                undo = load + filepath + tb + ".kext/"
                event = {"eventtype": "comm",
                         "command": undo}
                self.statechglogger.recordchgevent(myid, event)
        sd = "AppleSDXC"
        cmd = check + "| grep " + sd
        self.ch.executeCommand(cmd)

        # if return code is 0, the kernel module is loaded, thus we need
        # to disable it
        if self.ch.getReturnCode() == 0:
            cmd = unload + "/System/Library/Extensions/" + sd + ".kext/"
            if not self.ch.executeCommand(cmd):
                debug += "Unable to disable SD Card functionality\n"
                success = False
            else:
                self.iditerator += 1
                myid = iterate(self.iditerator, self.rulenumber)
                undo = load + filepath + sd + ".kext/"
                event = {"eventtype": "comm",
                         "command": undo}
                self.statechglogger.recordchgevent(myid, event)
        cmd = ["/bin/launchctl", "load", self.plistpath]
        if not self.ch.executeCommand(cmd):
            debug += "Unable to load the launchctl job to regularly " + \
                "disable removeable storage.  May need to be done manually\n"
            success = False
        if debug:
            self.logger.log(LogPriority.DEBUG, debug)
        return success

Example 88

Project: youtube-dl Source File: http.py
Function: real_download
    def real_download(self, filename, info_dict):
        url = info_dict['url']
        tmpfilename = self.temp_name(filename)
        stream = None

        # Do not include the Accept-Encoding header
        headers = {'Youtubedl-no-compression': 'True'}
        add_headers = info_dict.get('http_headers')
        if add_headers:
            headers.update(add_headers)
        basic_request = sanitized_Request(url, None, headers)
        request = sanitized_Request(url, None, headers)

        is_test = self.params.get('test', False)

        if is_test:
            request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))

        # Establish possible resume length
        if os.path.isfile(encodeFilename(tmpfilename)):
            resume_len = os.path.getsize(encodeFilename(tmpfilename))
        else:
            resume_len = 0

        open_mode = 'wb'
        if resume_len != 0:
            if self.params.get('continuedl', True):
                self.report_resuming_byte(resume_len)
                request.add_header('Range', 'bytes=%d-' % resume_len)
                open_mode = 'ab'
            else:
                resume_len = 0

        count = 0
        retries = self.params.get('retries', 0)
        while count <= retries:
            # Establish connection
            try:
                data = self.ydl.urlopen(request)
                # When trying to resume, Content-Range HTTP header of response has to be checked
                # to match the value of requested Range HTTP header. This is due to a webservers
                # that don't support resuming and serve a whole file with no Content-Range
                # set in response despite of requested Range (see
                # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
                if resume_len > 0:
                    content_range = data.headers.get('Content-Range')
                    if content_range:
                        content_range_m = re.search(r'bytes (\d+)-', content_range)
                        # Content-Range is present and matches requested Range, resume is possible
                        if content_range_m and resume_len == int(content_range_m.group(1)):
                            break
                    # Content-Range is either not present or invalid. Assuming remote webserver is
                    # trying to send the whole file, resume is not possible, so wiping the local file
                    # and performing entire redownload
                    self.report_unable_to_resume()
                    resume_len = 0
                    open_mode = 'wb'
                break
            except (compat_urllib_error.HTTPError, ) as err:
                if (err.code < 500 or err.code >= 600) and err.code != 416:
                    # Unexpected HTTP error
                    raise
                elif err.code == 416:
                    # Unable to resume (requested range not satisfiable)
                    try:
                        # Open the connection again without the range header
                        data = self.ydl.urlopen(basic_request)
                        content_length = data.info()['Content-Length']
                    except (compat_urllib_error.HTTPError, ) as err:
                        if err.code < 500 or err.code >= 600:
                            raise
                    else:
                        # Examine the reported length
                        if (content_length is not None and
                                (resume_len - 100 < int(content_length) < resume_len + 100)):
                            # The file had already been fully downloaded.
                            # Explanation to the above condition: in issue #175 it was revealed that
                            # YouTube sometimes adds or removes a few bytes from the end of the file,
                            # changing the file size slightly and causing problems for some users. So
                            # I decided to implement a suggested change and consider the file
                            # completely downloaded if the file size differs less than 100 bytes from
                            # the one in the hard drive.
                            self.report_file_already_downloaded(filename)
                            self.try_rename(tmpfilename, filename)
                            self._hook_progress({
                                'filename': filename,
                                'status': 'finished',
                                'downloaded_bytes': resume_len,
                                'total_bytes': resume_len,
                            })
                            return True
                        else:
                            # The length does not match, we start the download over
                            self.report_unable_to_resume()
                            resume_len = 0
                            open_mode = 'wb'
                            break
            except socket.error as e:
                if e.errno != errno.ECONNRESET:
                    # Connection reset is no problem, just retry
                    raise

            # Retry
            count += 1
            if count <= retries:
                self.report_retry(count, retries)

        if count > retries:
            self.report_error('giving up after %s retries' % retries)
            return False

        data_len = data.info().get('Content-length', None)

        # Range HTTP header may be ignored/unsupported by a webserver
        # (e.g. extractor/scivee.py, extractor/bambuser.py).
        # However, for a test we still would like to download just a piece of a file.
        # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
        # block size when downloading a file.
        if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
            data_len = self._TEST_FILE_SIZE

        if data_len is not None:
            data_len = int(data_len) + resume_len
            min_data_len = self.params.get('min_filesize')
            max_data_len = self.params.get('max_filesize')
            if min_data_len is not None and data_len < min_data_len:
                self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
                return False
            if max_data_len is not None and data_len > max_data_len:
                self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
                return False

        byte_counter = 0 + resume_len
        block_size = self.params.get('buffersize', 1024)
        start = time.time()

        # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
        now = None  # needed for slow_down() in the first loop run
        before = start  # start measuring
        while True:

            # Download and write
            data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
            byte_counter += len(data_block)

            # exit loop when download is finished
            if len(data_block) == 0:
                break

            # Open destination file just in time
            if stream is None:
                try:
                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
                    assert stream is not None
                    filename = self.undo_temp_name(tmpfilename)
                    self.report_destination(filename)
                except (OSError, IOError) as err:
                    self.report_error('unable to open for writing: %s' % str(err))
                    return False

                if self.params.get('xattr_set_filesize', False) and data_len is not None:
                    try:
                        write_xattr(tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
                    except (XAttrUnavailableError, XAttrMetadataError) as err:
                        self.report_error('unable to set filesize xattr: %s' % str(err))

            try:
                stream.write(data_block)
            except (IOError, OSError) as err:
                self.to_stderr('\n')
                self.report_error('unable to write data: %s' % str(err))
                return False

            # Apply rate limit
            self.slow_down(start, now, byte_counter - resume_len)

            # end measuring of one loop run
            now = time.time()
            after = now

            # Adjust block size
            if not self.params.get('noresizebuffer', False):
                block_size = self.best_block_size(after - before, len(data_block))

            before = after

            # Progress message
            speed = self.calc_speed(start, now, byte_counter - resume_len)
            if data_len is None:
                eta = None
            else:
                eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)

            self._hook_progress({
                'status': 'downloading',
                'downloaded_bytes': byte_counter,
                'total_bytes': data_len,
                'tmpfilename': tmpfilename,
                'filename': filename,
                'eta': eta,
                'speed': speed,
                'elapsed': now - start,
            })

            if is_test and byte_counter == data_len:
                break

        if stream is None:
            self.to_stderr('\n')
            self.report_error('Did not get any data blocks')
            return False
        if tmpfilename != '-':
            stream.close()

        if data_len is not None and byte_counter != data_len:
            raise ContentTooShortError(byte_counter, int(data_len))
        self.try_rename(tmpfilename, filename)

        # Update file modification time
        if self.params.get('updatetime', True):
            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))

        self._hook_progress({
            'downloaded_bytes': byte_counter,
            'total_bytes': byte_counter,
            'filename': filename,
            'status': 'finished',
            'elapsed': time.time() - start,
        })

        return True

Example 89

Project: script.tvshowtime Source File: default.py
    def onNotification(self, sender, method, data):
        log('onNotification')
        log('method=%s' % method)
        if (method == 'Player.OnPlay'):
            self._setUp()
            self._total_time = player.getTotalTime()
            self._tracker.start()
            log('Player.OnPlay')
            if player.http == 'true' and player.getPlayingFile()[:4] == 'http' and re.search(r'[sS][0-9]*[eE][0-9]*', os.path.basename(player.getPlayingFile()), flags=0) :
                player.http_playing = True
                player.filename = os.path.basename(player.getPlayingFile())
                self.startcut = player.filename.find("%5B")
                self.endcut = player.filename.find("%5D")
                self.tocut = player.filename[self.startcut:self.endcut]
                player.filename = player.filename.replace(self.tocut, "")
                player.filename = player.filename.replace("%5B", "")
                player.filename = player.filename.replace("%5D", "")
                player.filename = player.filename.replace("%20", ".")
                log('tvshowtitle=%s' % player.filename)
                player.episode = FindEpisode(player.token, 0, player.filename)
                log('episode.is_found=%s' % player.episode.is_found)
                if player.episode.is_found:
                    if player.notifications == 'true':                        
                        if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                            return
                        if player.notif_scrobbling == 'false':
                            return
                        notif('%s %s %sx%s' % (__language__(32904), player.episode.showname, player.episode.season_number, player.episode.number), time=2500)
                else:
                    if player.notifications == 'true':
                        if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                            return
                        notif(__language__(32905), time=2500)
            else:
                player.http_playing = False
                response = json.loads(data) 
                log('%s' % response)
                if response.get('item').get('type') == 'episode':
                    xbmc_id = response.get('item').get('id')
                    item = self.getEpisodeTVDB(xbmc_id)    
                    log('showtitle=%s' % item['showtitle'])
                    log('season=%s' % item['season'])
                    log('episode=%s' % item['episode'])
                    log('episode_id=%s' % item['episode_id'])
                    if len(item['showtitle']) > 0 and item['season'] > 0 and item['episode'] > 0 and item['episode_id'] > 0:                   
                        player.filename = '%s.S%.2dE%.2d' % (formatName(item['showtitle']), float(item['season']), float(item['episode']))
                        log('tvshowtitle=%s' % player.filename)
                        player.episode = FindEpisode(player.token, item['episode_id'])
                        log('episode.is_found=%s' % player.episode.is_found)
                        if player.episode.is_found:
                            if player.notifications == 'true':                        
                                if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                    return
                                if player.notif_scrobbling == 'false':
                                    return
                                notif('%s %s %sx%s' % (__language__(32904), player.episode.showname, player.episode.season_number, player.episode.number), time=2500)
                        else:
                            if player.notifications == 'true':
                                if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                    return
                                notif(__language__(32905), time=2500)
                    else:
                        if player.notifications == 'true':
                            if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                return
                            notif(__language__(32905), time=2500)              
        if (method == 'Player.OnStop'): 
            self._tearDown()
            actual_percent = (self._last_pos/self._total_time)*100
            log('last_pos / total_time : %s / %s = %s %%' % (self._last_pos, self._total_time, actual_percent)) 
            log('Player.OnStop') 
            if player.http == 'true' and player.http_playing == True :
                if player.progress == 'true':
                    player.episode = FindEpisode(player.token, 0, player.filename)
                    log('episode.is_found=%s' % player.episode.is_found)
                    if player.episode.is_found:
                        log('progress=%s' % self._last_pos)
                        self.progress = SaveProgress(player.token, player.episode.id, self._last_pos)   
                        log('progress.is_set:=%s' % self.progress.is_set)  
                        if actual_percent > 90:
                            log('MarkAsWatched(*, %s, %s, %s)' % (player.filename, player.facebook, player.twitter))
                            checkin = MarkAsWatched(player.token, player.episode.id, player.facebook, player.twitter)
                            log('checkin.is_marked:=%s' % checkin.is_marked)
                            if checkin.is_marked:
                                if player.emotion == 'true':
                                    self.emotion = xbmcgui.Dialog().select('%s: %s %sx%s' % (__language__(33909), player.episode.showname, player.episode.season_number, player.episode.number), [__language__(35311), __language__(35312), __language__(35313), __language__(35314), __language__(35316), __language__(35317)])
                                    if self.emotion < 0: return
                                    if self.emotion == 0:
                                        self.emotion = 1
                                    elif self.emotion == 1:
                                        self.emotion = 2
                                    elif self.emotion == 2:
                                        self.emotion = 3
                                    elif self.emotion == 3:
                                        self.emotion = 4
                                    elif self.emotion == 4:
                                        self.emotion = 6
                                    elif self.emotion == 5:
                                        self.emotion = 7
                                    SetEmotion(player.token, player.episode.id, self.emotion)
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    if player.notif_scrobbling == 'false':
                                        return
                                    notif('%s %s %sx%s' % (__language__(32906), player.episode.showname, player.episode.season_number, player.episode.number), time=2500) 
            else:       
                response = json.loads(data) 
                log('%s' % response)
                if player.progress == 'true':
                    if response.get('item').get('type') == 'episode':
                        xbmc_id = response.get('item').get('id')
                        item = self.getEpisodeTVDB(xbmc_id)    
                        log('showtitle=%s' % item['showtitle'])
                        log('season=%s' % item['season'])
                        log('episode=%s' % item['episode'])
                        log('episode_id=%s' % item['episode_id'])
                        if len(item['showtitle']) > 0 and item['season'] > 0 and item['episode'] > 0 and item['episode_id'] > 0:                   
                            player.filename = '%s.S%.2dE%.2d' % (formatName(item['showtitle']), float(item['season']), float(item['episode']))
                            log('tvshowtitle=%s' % player.filename)
                        log('progress=%s' % self._last_pos)
                        self.progress = SaveProgress(player.token, item['episode_id'], self._last_pos)   
                        log('progress.is_set:=%s' % self.progress.is_set)                                
        if (method == 'VideoLibrary.OnUpdate'):
            log('VideoLibrary.OnUpdate')
            response = json.loads(data) 
            log('%s' % response)
            if response.get('item').get('type') == 'episode':
                xbmc_id = response.get('item').get('id')
                playcount = response.get('playcount') 
                log('playcount=%s' % playcount)
                item = self.getEpisodeTVDB(xbmc_id)    
                log('showtitle=%s' % item['showtitle'])
                log('season=%s' % item['season'])
                log('episode=%s' % item['episode'])
                log('episode_id=%s' % item['episode_id'])
                log('playcount=%s' % playcount)
                if len(item['showtitle']) > 0 and item['season'] > 0 and item['episode'] > 0 and item['episode_id'] > 0:
                    self.filename = '%s.S%.2dE%.2d' % (formatName(item['showtitle']), float(item['season']), float(item['episode']))
                    log('tvshowtitle=%s' % self.filename)
                    self.episode = FindEpisode(player.token, item['episode_id'])
                    log('episode.is_found=%s' % self.episode.is_found)
                    if self.episode.is_found:
                        if playcount is 1:
                            log('MarkAsWatched(*, %s, %s, %s)' % (self.filename, player.facebook, player.twitter))
                            checkin = MarkAsWatched(player.token, item['episode_id'], player.facebook, player.twitter)
                            log('checkin.is_marked:=%s' % checkin.is_marked)
                            if checkin.is_marked:
                                if player.emotion == 'true':
                                    self.emotion = xbmcgui.Dialog().select('%s: %s' % (__language__(33909), self.filename), [__language__(35311), __language__(35312), __language__(35313), __language__(35314), __language__(35316), __language__(35317)])
                                    if self.emotion < 0: return
                                    if self.emotion == 0:
                                        self.emotion = 1
                                    elif self.emotion == 1:
                                        self.emotion = 2
                                    elif self.emotion == 2:
                                        self.emotion = 3
                                    elif self.emotion == 3:
                                        self.emotion = 4
                                    elif self.emotion == 4:
                                        self.emotion = 6
                                    elif self.emotion == 5:
                                        self.emotion = 7
                                    SetEmotion(player.token, item['episode_id'], self.emotion)
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    if player.notif_scrobbling == 'false':
                                        return
                                    notif('%s %s %sx%s' % (__language__(32906), self.episode.showname, self.episode.season_number, self.episode.number), time=2500)
                            else:
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    notif(__language__(32907), time=2500)
                        if playcount is 0:
                            log('MarkAsUnWatched(*, %s)' % (self.filename))
                            checkin = MarkAsUnWatched(player.token, item['episode_id'])
                            log('checkin.is_unmarked:=%s' % checkin.is_unmarked)
                            if checkin.is_unmarked:
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    if player.notif_scrobbling == 'false':
                                        return
                                    notif('%s %s %sx%s' % (__language__(32908), self.episode.showname, self.episode.season_number, self.episode.number), time=2500)
                            else:
                                if player.notifications == 'true':
                                    if player.notif_during_playback == 'false' and player.isPlaying() == 1:
                                        return
                                    notif(__language__(32907), time=2500)

Example 90

Project: translate Source File: dtd.py
Function: parse
    def parse(self, dtdsrc):
        """read the first dtd element from the source code into this object, return linesprocessed"""
        self.comments = []
        # make all the lists the same
        self._locfilenotes = self.comments
        self._locgroupstarts = self.comments
        self._locgroupends = self.comments
        self._locnotes = self.comments
        # self._locfilenotes = []
        # self._locgroupstarts = []
        # self._locgroupends = []
        # self._locnotes = []
        # self.comments = []
        self.entity = None
        self.definition = ''
        if not dtdsrc:
            return 0
        lines = dtdsrc.split("\n")
        linesprocessed = 0
        comment = ""
        for line in lines:
            line += "\n"
            linesprocessed += 1
            if not self.incomment:
                if (line.find('<!--') != -1):
                    self.incomment = True
                    self.continuecomment = False
                    # now work out the type of comment, and save it (remember we're not in the comment yet)
                    (comment, dummy) = quote.extract(line, "<!--", "-->", None, 0)
                    if comment.find('LOCALIZATION NOTE') != -1:
                        l = quote.findend(comment, 'LOCALIZATION NOTE')
                        while (comment[l] == ' '):
                            l += 1
                        if comment.find('FILE', l) == l:
                            self.commenttype = "locfile"
                        elif comment.find('BEGIN', l) == l:
                            self.commenttype = "locgroupstart"
                        elif comment.find('END', l) == l:
                            self.commenttype = "locgroupend"
                        else:
                            self.commenttype = "locnote"
                    else:
                        # plain comment
                        self.commenttype = "comment"
                #FIXME: bloody entity might share a line with something important
                elif not self.inentity and re.search("%.*;", line):
                    # now work out the type of comment, and save it (remember we're not in the comment yet)
                    self.comments.append(("comment", line))
                    line = ""
                    continue

            if self.incomment:
                # some kind of comment
                (comment, self.incomment) = quote.extract(line, "<!--", "-->", None, self.continuecomment)
                self.continuecomment = self.incomment
                # strip the comment out of what will be parsed
                line = line.replace(comment, "", 1)
                # add a end of line of this is the end of the comment
                if not self.incomment:
                    if line.isspace():
                        comment += line
                        line = ''
                    else:
                        comment += '\n'
                # check if there's actually an entity definition that's commented out
                # TODO: parse these, store as obsolete messages
                # if comment.find('<!ENTITY') != -1:
                #     # remove the entity from the comment
                #     comment, dummy = quote.extractwithoutquotes(comment, ">", "<!ENTITY", None, 1)
                # depending on the type of comment (worked out at the start), put it in the right place
                # make it record the comment and type as a tuple
                commentpair = (self.commenttype, comment)
                if self.commenttype == "locfile":
                    self._locfilenotes.append(commentpair)
                elif self.commenttype == "locgroupstart":
                    self._locgroupstarts.append(commentpair)
                elif self.commenttype == "locgroupend":
                    self._locgroupends.append(commentpair)
                elif self.commenttype == "locnote":
                    self._locnotes.append(commentpair)
                elif self.commenttype == "comment":
                    self.comments.append(commentpair)

            if not self.inentity and not self.incomment:
                entitypos = line.find('<!ENTITY')
                if entitypos != -1:
                    self.inentity = True
                    beforeentity = line[:entitypos].strip()
                    if beforeentity.startswith("#"):
                        self.hashprefix = beforeentity
                    self.entitypart = "start"
                else:
                    self.unparsedlines.append(line)

            if self.inentity:
                if self.entitypart == "start":
                    # the entity definition
                    e = quote.findend(line, '<!ENTITY')
                    line = line[e:]
                    self.entitypart = "name"
                    self.entitytype = "internal"
                if self.entitypart == "name":
                    s = 0
                    e = 0
                    while (e < len(line) and line[e].isspace()):
                        e += 1
                    self.space_pre_entity = ' ' * (e - s)
                    s = e
                    self.entity = ''
                    if (e < len(line) and line[e] == '%'):
                        self.entitytype = "external"
                        self.entityparameter = ""
                        e += 1
                        while (e < len(line) and line[e].isspace()):
                            e += 1
                    while (e < len(line) and not line[e].isspace()):
                        self.entity += line[e]
                        e += 1
                    s = e

                    assert quote.rstripeol(self.entity) == self.entity
                    while (e < len(line) and line[e].isspace()):
                        e += 1
                    self.space_pre_definition = ' ' * (e - s)
                    if self.entity:
                        if self.entitytype == "external":
                            self.entitypart = "parameter"
                        else:
                            self.entitypart = "definition"
                        # remember the start position and the quote character
                        if e == len(line):
                            self.entityhelp = None
                            e = 0
                            continue
                        elif self.entitypart == "definition":
                            self.entityhelp = (e, line[e])
                            self.instring = False
                if self.entitypart == "parameter":
                    while (e < len(line) and line[e].isspace()):
                        e += 1
                    paramstart = e
                    while (e < len(line) and line[e].isalnum()):
                        e += 1
                    self.entityparameter += line[paramstart:e]
                    while (e < len(line) and line[e].isspace()):
                        e += 1
                    line = line[e:]
                    e = 0
                    if not line:
                        continue
                    if line[0] in ('"', "'"):
                        self.entitypart = "definition"
                        self.entityhelp = (e, line[e])
                        self.instring = False
                if self.entitypart == "definition":
                    if self.entityhelp is None:
                        e = 0
                        while (e < len(line) and line[e].isspace()):
                            e += 1
                        if e == len(line):
                            continue
                        self.entityhelp = (e, line[e])
                        self.instring = False
                    # actually the lines below should remember instring, rather than using it as dummy
                    e = self.entityhelp[0]
                    if (self.entityhelp[1] == "'"):
                        (defpart, self.instring) = quote.extract(line[e:], "'", "'", startinstring=self.instring, allowreentry=False)
                    elif (self.entityhelp[1] == '"'):
                        (defpart, self.instring) = quote.extract(line[e:], '"', '"', startinstring=self.instring, allowreentry=False)
                    else:
                        raise ValueError("Unexpected quote character... %r" % (self.entityhelp[1]))
                    # for any following lines, start at the beginning of the line. remember the quote character
                    self.entityhelp = (0, self.entityhelp[1])
                    self.definition += defpart
                    if not self.instring:
                        self.closing = line[e+len(defpart):].rstrip("\n\r")
                        self.inentity = False
                        break

        return linesprocessed

Example 91

Project: mycli Source File: tabulate.py
Function: tabulate
def tabulate(tabular_data, headers=[], tablefmt="simple",
             floatfmt="g", numalign="decimal", stralign="left",
             missingval=""):
    """Format a fixed width table for pretty printing.

    >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
    ---  ---------
      1      2.34
    -56      8.999
      2  10001
    ---  ---------

    The first required argument (`tabular_data`) can be a
    list-of-lists (or another iterable of iterables), a list of named
    tuples, a dictionary of iterables, an iterable of dictionaries,
    a two-dimensional NumPy array, NumPy record array, or a Pandas'
    dataframe.


    Table headers
    -------------

    To print nice column headers, supply the second argument (`headers`):

      - `headers` can be an explicit list of column headers
      - if `headers="firstrow"`, then the first row of data is used
      - if `headers="keys"`, then dictionary keys or column indices are used

    Otherwise a headerless table is produced.

    If the number of headers is less than the number of columns, they
    are supposed to be names of the last columns. This is consistent
    with the plain-text format of R and Pandas' dataframes.

    >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
    ...       headers="firstrow"))
           sex      age
    -----  -----  -----
    Alice  F         24
    Bob    M         19


    Column alignment
    ----------------

    `tabulate` tries to detect column types automatically, and aligns
    the values properly. By default it aligns decimal points of the
    numbers (or flushes integer numbers to the right), and flushes
    everything else to the left. Possible column alignments
    (`numalign`, `stralign`) are: "right", "center", "left", "decimal"
    (only for `numalign`), and None (to disable alignment).


    Table formats
    -------------

    `floatfmt` is a format specification used for columns which
    contain numeric data with a decimal point.

    `None` values are replaced with a `missingval` string:

    >>> print(tabulate([["spam", 1, None],
    ...                 ["eggs", 42, 3.14],
    ...                 ["other", None, 2.7]], missingval="?"))
    -----  --  ----
    spam    1  ?
    eggs   42  3.14
    other   ?  2.7
    -----  --  ----

    Various plain-text table formats (`tablefmt`) are supported:
    'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
     'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
    currently supported formats.

    "plain" format doesn't use any pseudographics to draw tables,
    it separates columns with a double space:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "plain"))
    strings      numbers
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
    spam   41.9999
    eggs  451

    "simple" format is like Pandoc simple_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "simple"))
    strings      numbers
    ---------  ---------
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
    ----  --------
    spam   41.9999
    eggs  451
    ----  --------

    "grid" is similar to tables produced by Emacs table.el package or
    Pandoc grid_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "grid"))
    +-----------+-----------+
    | strings   |   numbers |
    +===========+===========+
    | spam      |   41.9999 |
    +-----------+-----------+
    | eggs      |  451      |
    +-----------+-----------+

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
    +------+----------+
    | spam |  41.9999 |
    +------+----------+
    | eggs | 451      |
    +------+----------+

    "fancy_grid" draws a grid using box-drawing characters:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "fancy_grid"))
    ╒═══════════╤═══════════╕
    │ strings   │   numbers │
    ╞═══════════╪═══════════╡
    │ spam      │   41.9999 │
    ├───────────┼───────────┤
    │ eggs      │  451      │
    ╘═══════════╧═══════════╛

    "pipe" is like tables in PHP Markdown Extra extension or Pandoc
    pipe_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "pipe"))
    | strings   |   numbers |
    |:----------|----------:|
    | spam      |   41.9999 |
    | eggs      |  451      |

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
    |:-----|---------:|
    | spam |  41.9999 |
    | eggs | 451      |

    "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
    are slightly different from "pipe" format by not using colons to
    define column alignment, and using a "+" sign to indicate line
    intersections:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "orgtbl"))
    | strings   |   numbers |
    |-----------+-----------|
    | spam      |   41.9999 |
    | eggs      |  451      |


    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
    | spam |  41.9999 |
    | eggs | 451      |

    "rst" is like a simple table format from reStructuredText; please
    note that reStructuredText accepts also "grid" tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "rst"))
    =========  =========
    strings      numbers
    =========  =========
    spam         41.9999
    eggs        451
    =========  =========

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
    ====  ========
    spam   41.9999
    eggs  451
    ====  ========

    "mediawiki" produces a table markup used in Wikipedia and on other
    MediaWiki-based sites:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="mediawiki"))
    {| class="wikitable" style="text-align: left;"
    |+ <!-- caption -->
    |-
    ! strings   !! align="right"|   numbers
    |-
    | spam      || align="right"|   41.9999
    |-
    | eggs      || align="right"|  451
    |}

    "html" produces HTML markup:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="html"))
    <table>
    <tr><th>strings  </th><th style="text-align: right;">  numbers</th></tr>
    <tr><td>spam     </td><td style="text-align: right;">  41.9999</td></tr>
    <tr><td>eggs     </td><td style="text-align: right;"> 451     </td></tr>
    </table>

    "latex" produces a tabular environment of LaTeX docuement markup:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
    \\begin{tabular}{lr}
    \\hline
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\hline
    \\end{tabular}

    "latex_booktabs" produces a tabular environment of LaTeX docuement markup
    using the booktabs.sty package:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
    \\begin{tabular}{lr}
    \\toprule
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\bottomrule
    \end{tabular}

    Also returns a tuple of the raw rows pulled from tabular_data
    """
    if tabular_data is None:
        tabular_data = []
    list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)

    # format rows and columns, convert numeric values to strings
    cols = list(zip(*list_of_lists))
    coltypes = list(map(_column_type, cols))
    cols = [[_format(v, ct, floatfmt, missingval) for v in c]
             for c,ct in zip(cols, coltypes)]

    # optimization: look for ANSI control codes once,
    # enable smart width functions only if a control code is found
    plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
                            ['\t'.join(map(_text_type, row)) for row in cols])
    has_invisible = re.search(_invisible_codes, plain_text)
    if has_invisible:
        width_fn = _visible_width
    else:
        width_fn = wcswidth

    # align columns
    aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
    minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
    cols = [_align_column(c, a, minw, has_invisible)
            for c, a, minw in zip(cols, aligns, minwidths)]

    if headers:
        # align headers and add headers
        t_cols = cols or [['']] * len(headers)
        t_aligns = aligns or [stralign] * len(headers)
        minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
        headers = [_align_header(h, a, minw)
                   for h, a, minw in zip(headers, t_aligns, minwidths)]
        rows = list(zip(*cols))
    else:
        minwidths = [width_fn(c[0]) for c in cols]
        rows = list(zip(*cols))

    if not isinstance(tablefmt, TableFormat):
        tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])

    return _format_table(tablefmt, headers, rows, minwidths, aligns), rows

Example 92

Project: tp-qemu Source File: floppy.py
Function: run
@error.context_aware
def run(test, params, env):
    """
    Test virtual floppy of guest:

    1) Create a floppy disk image on host
    2) Start the guest with this floppy image.
    3) Make a file system on guest virtual floppy.
    4) Calculate md5sum value of a file and copy it into floppy.
    5) Verify whether the md5sum does match.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    source_file = params["source_file"]
    dest_file = params["dest_file"]
    login_timeout = int(params.get("login_timeout", 360))
    floppy_prepare_timeout = int(params.get("floppy_prepare_timeout", 360))
    guest_floppy_path = params["guest_floppy_path"]

    def create_floppy(params, prepare=True):
        """
        Creates 'new' floppy with one file on it

        :param params: parameters for test
        :param preapre: if True then it prepare cd images.

        :return: path to new floppy file.
        """
        error.context("creating test floppy", logging.info)
        floppy = params["floppy_name"]
        if not os.path.isabs(floppy):
            floppy = os.path.join(data_dir.get_data_dir(), floppy)
        if prepare:
            utils.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy)
        return floppy

    def cleanup_floppy(path):
        """ Removes created floppy """
        error.context("cleaning up temp floppy images", logging.info)
        os.remove("%s" % path)

    def lazy_copy(vm, dst_path, check_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodically copied.
        """
        if copy_timeout is None:
            copy_timeout = 120
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = ('nohup bash -c "while [ true ]; do echo \"1\" | '
               'tee -a %s >> %s; sleep 0.1; done" 2> /dev/null &' %
               (check_path, dst_path))
        pid = re.search(r"\[.+\] (.+)",
                        session.cmd_output(cmd, timeout=copy_timeout))
        return pid.group(1)

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    class test_singlehost(MiniSubtest):

        def test(self):
            create_floppy(params)
            params["start_vm"] = "yes"
            vm_name = params.get("main_vm", "vm1")
            env_process.preprocess_vm(test, params, env, vm_name)
            vm = env.get_vm(vm_name)
            vm.verify_alive()
            self.session = vm.wait_for_login(timeout=login_timeout)

            self.dest_dir = params.get("mount_dir")
            # If mount_dir specified, treat guest as a Linux OS
            # Some Linux distribution does not load floppy at boot and Windows
            # needs time to load and init floppy driver
            if self.dest_dir:
                lsmod = self.session.cmd("lsmod")
                if 'floppy' not in lsmod:
                    self.session.cmd("modprobe floppy")
            else:
                time.sleep(20)

            error.context("Formating floppy disk before using it")
            format_cmd = params["format_floppy_cmd"]
            self.session.cmd(format_cmd, timeout=120)
            logging.info("Floppy disk formatted successfully")

            if self.dest_dir:
                error.context("Mounting floppy")
                self.session.cmd("mount %s %s" % (guest_floppy_path,
                                                  self.dest_dir))
            error.context("Testing floppy")
            self.session.cmd(params["test_floppy_cmd"])

            error.context("Copying file to the floppy")
            md5_cmd = params.get("md5_cmd")
            if md5_cmd:
                md5_source = self.session.cmd("%s %s" % (md5_cmd, source_file))
                try:
                    md5_source = md5_source.split(" ")[0]
                except IndexError:
                    error.TestError("Failed to get md5 from source file,"
                                    " output: '%s'" % md5_source)
            else:
                md5_source = None

            self.session.cmd("%s %s %s" % (params["copy_cmd"], source_file,
                                           dest_file))
            logging.info("Succeed to copy file '%s' into floppy disk" %
                         source_file)

            error.context("Checking if the file is unchanged after copy")
            if md5_cmd:
                md5_dest = self.session.cmd("%s %s" % (md5_cmd, dest_file))
                try:
                    md5_dest = md5_dest.split(" ")[0]
                except IndexError:
                    error.TestError("Failed to get md5 from dest file,"
                                    " output: '%s'" % md5_dest)
                if md5_source != md5_dest:
                    raise error.TestFail("File changed after copy to floppy")
            else:
                md5_dest = None
                self.session.cmd("%s %s %s" % (params["diff_file_cmd"],
                                               source_file, dest_file))

        def clean(self):
            clean_cmd = "%s %s" % (params["clean_cmd"], dest_file)
            self.session.cmd(clean_cmd)
            if self.dest_dir:
                self.session.cmd("umount %s" % self.dest_dir)
            self.session.close()

    class Multihost(MiniSubtest):

        def test(self):
            error.context("Preparing migration env and floppies.", logging.info)
            mig_protocol = params.get("mig_protocol", "tcp")
            self.mig_type = migration.MultihostMigration
            if mig_protocol == "fd":
                self.mig_type = migration.MultihostMigrationFd
            if mig_protocol == "exec":
                self.mig_type = migration.MultihostMigrationExec
            if "rdma" in mig_protocol:
                self.mig_type = migration.MultihostMigrationRdma

            self.vms = params.get("vms").split(" ")
            self.srchost = params["hosts"][0]
            self.dsthost = params["hosts"][1]
            self.is_src = params["hostid"] == self.srchost
            self.mig = self.mig_type(test, params, env, False, )

            if self.is_src:
                vm = env.get_vm(self.vms[0])
                vm.destroy()
                self.floppy = create_floppy(params)
                self.floppy_dir = os.path.dirname(self.floppy)
                params["start_vm"] = "yes"
                env_process.process(test, params, env,
                                    env_process.preprocess_image,
                                    env_process.preprocess_vm)
                vm = env.get_vm(self.vms[0])
                vm.wait_for_login(timeout=login_timeout)
            else:
                self.floppy = create_floppy(params, False)
                self.floppy_dir = os.path.dirname(self.floppy)

        def clean(self):
            self.mig.cleanup()
            if self.is_src:
                cleanup_floppy(self.floppy)

    class test_multihost_write(Multihost):

        def test(self):
            super(test_multihost_write, self).test()

            copy_timeout = int(params.get("copy_timeout", 480))
            self.mount_dir = params["mount_dir"]
            format_floppy_cmd = params["format_floppy_cmd"]
            check_copy_path = params["check_copy_path"]

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            src_file = os.path.join(self.mount_dir, filename)

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)

                if self.mount_dir:
                    session.cmd("rm -f %s" % (src_file))
                    session.cmd("rm -f %s" % (check_copy_path))
                # If mount_dir specified, treat guest as a Linux OS
                # Some Linux distribution does not load floppy at boot
                # and Windows needs time to load and init floppy driver
                error.context("Prepare floppy for writing.", logging.info)
                if self.mount_dir:
                    lsmod = session.cmd("lsmod")
                    if 'floppy' not in lsmod:
                        session.cmd("modprobe floppy")
                else:
                    time.sleep(20)

                session.cmd(format_floppy_cmd)

                error.context("Mount and copy data.", logging.info)
                if self.mount_dir:
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir),
                                timeout=30)

                error.context("File copying test.", logging.info)

                pid = lazy_copy(vm, src_file, check_copy_path, copy_timeout)

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in destination
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                error.context("Wait for copy finishing.", logging.info)
                status = session.cmd_status("kill %s" % pid,
                                            timeout=copy_timeout)
                if status != 0:
                    raise error.TestFail("Copy process was terminatted with"
                                         " error code %s" % (status))

                session.cmd_status("kill -s SIGINT %s" % (pid),
                                   timeout=copy_timeout)

                error.context("Check floppy file checksum.", logging.info)
                md5_cmd = params.get("md5_cmd", "md5sum")
                if md5_cmd:
                    md5_floppy = session.cmd("%s %s" % (md5_cmd, src_file))
                    try:
                        md5_floppy = md5_floppy.split(" ")[0]
                    except IndexError:
                        error.TestError("Failed to get md5 from source file,"
                                        " output: '%s'" % md5_floppy)
                    md5_check = session.cmd("%s %s" % (md5_cmd, check_copy_path))
                    try:
                        md5_check = md5_check.split(" ")[0]
                    except IndexError:
                        error.TestError("Failed to get md5 from dst file,"
                                        " output: '%s'" % md5_floppy)
                    if md5_check != md5_floppy:
                        raise error.TestFail("There is mistake in copying, "
                                             "it is possible to check file on vm.")

                session.cmd("rm -f %s" % (src_file))
                session.cmd("rm -f %s" % (check_copy_path))

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'finish_floppy_test', login_timeout)

        def clean(self):
            super(test_multihost_write, self).clean()

    class test_multihost_eject(Multihost):

        def test(self):
            super(test_multihost_eject, self).test()

            self.mount_dir = params.get("mount_dir", None)
            format_floppy_cmd = params["format_floppy_cmd"]
            floppy = params["floppy_name"]
            second_floppy = params["second_floppy_name"]
            if not os.path.isabs(floppy):
                floppy = os.path.join(data_dir.get_data_dir(), floppy)
            if not os.path.isabs(second_floppy):
                second_floppy = os.path.join(data_dir.get_data_dir(),
                                             second_floppy)
            if not self.is_src:
                self.floppy = create_floppy(params)

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            src_file = os.path.join(self.mount_dir, filename)

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)

                if self.mount_dir:   # If linux
                    session.cmd("rm -f %s" % (src_file))
                # If mount_dir specified, treat guest as a Linux OS
                # Some Linux distribution does not load floppy at boot
                # and Windows needs time to load and init floppy driver
                error.context("Prepare floppy for writing.", logging.info)
                if self.mount_dir:   # If linux
                    lsmod = session.cmd("lsmod")
                    if 'floppy' not in lsmod:
                        session.cmd("modprobe floppy")
                else:
                    time.sleep(20)

                if floppy not in vm.monitor.info("block"):
                    raise error.TestFail("Wrong floppy image is placed in vm.")

                try:
                    session.cmd(format_floppy_cmd)
                except aexpect.ShellCmdError, e:
                    if e.status == 1:
                        logging.error("First access to floppy failed, "
                                      " Trying a second time as a workaround")
                        session.cmd(format_floppy_cmd)

                error.context("Check floppy")
                if self.mount_dir:   # If linux
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir), timeout=30)
                    session.cmd("umount %s" % (self.mount_dir), timeout=30)

                written = None
                if self.mount_dir:
                    filepath = os.path.join(self.mount_dir, "test.txt")
                    session.cmd("echo 'test' > %s" % (filepath))
                    output = session.cmd("cat %s" % (filepath))
                    written = "test\n"
                else:   # Windows version.
                    filepath = "A:\\test.txt"
                    session.cmd("echo test > %s" % (filepath))
                    output = session.cmd("type %s" % (filepath))
                    written = "test \n\n"
                if output != written:
                    raise error.TestFail("Data read from the floppy differs"
                                         "from the data written to it."
                                         " EXPECTED: %s GOT: %s" %
                                         (repr(written), repr(output)))

                error.context("Change floppy.")
                vm.monitor.cmd("eject floppy0")
                vm.monitor.cmd("change floppy %s" % (second_floppy))
                session.cmd(format_floppy_cmd)

                error.context("Mount and copy data")
                if self.mount_dir:   # If linux
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir), timeout=30)

                if second_floppy not in vm.monitor.info("block"):
                    raise error.TestFail("Wrong floppy image is placed in vm.")

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in destination
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                written = None
                if self.mount_dir:
                    filepath = os.path.join(self.mount_dir, "test.txt")
                    session.cmd("echo 'test' > %s" % (filepath))
                    output = session.cmd("cat %s" % (filepath))
                    written = "test\n"
                else:   # Windows version.
                    filepath = "A:\\test.txt"
                    session.cmd("echo test > %s" % (filepath))
                    output = session.cmd("type %s" % (filepath))
                    written = "test \n\n"
                if output != written:
                    raise error.TestFail("Data read from the floppy differs"
                                         "from the data written to it."
                                         " EXPECTED: %s GOT: %s" %
                                         (repr(written), repr(output)))

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'finish_floppy_test', login_timeout)

        def clean(self):
            super(test_multihost_eject, self).clean()

    test_type = params.get("test_type", "test_singlehost")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 93

Project: vcli Source File: tabulate.py
Function: tabulate
def tabulate(tabular_data, headers=[], tablefmt="simple",
             floatfmt="g", numalign="decimal", stralign="left",
             missingval=""):
    """Format a fixed width table for pretty printing.

    >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
    ---  ---------
      1      2.34
    -56      8.999
      2  10001
    ---  ---------

    The first required argument (`tabular_data`) can be a
    list-of-lists (or another iterable of iterables), a list of named
    tuples, a dictionary of iterables, an iterable of dictionaries,
    a two-dimensional NumPy array, NumPy record array, or a Pandas'
    dataframe.


    Table headers
    -------------

    To print nice column headers, supply the second argument (`headers`):

      - `headers` can be an explicit list of column headers
      - if `headers="firstrow"`, then the first row of data is used
      - if `headers="keys"`, then dictionary keys or column indices are used

    Otherwise a headerless table is produced.

    If the number of headers is less than the number of columns, they
    are supposed to be names of the last columns. This is consistent
    with the plain-text format of R and Pandas' dataframes.

    >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
    ...       headers="firstrow"))
           sex      age
    -----  -----  -----
    Alice  F         24
    Bob    M         19


    Column alignment
    ----------------

    `tabulate` tries to detect column types automatically, and aligns
    the values properly. By default it aligns decimal points of the
    numbers (or flushes integer numbers to the right), and flushes
    everything else to the left. Possible column alignments
    (`numalign`, `stralign`) are: "right", "center", "left", "decimal"
    (only for `numalign`), and None (to disable alignment).


    Table formats
    -------------

    `floatfmt` is a format specification used for columns which
    contain numeric data with a decimal point.

    `None` values are replaced with a `missingval` string:

    >>> print(tabulate([["spam", 1, None],
    ...                 ["eggs", 42, 3.14],
    ...                 ["other", None, 2.7]], missingval="?"))
    -----  --  ----
    spam    1  ?
    eggs   42  3.14
    other   ?  2.7
    -----  --  ----

    Various plain-text table formats (`tablefmt`) are supported:
    'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
     'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
    currently supported formats.

    "plain" format doesn't use any pseudographics to draw tables,
    it separates columns with a double space:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "plain"))
    strings      numbers
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
    spam   41.9999
    eggs  451

    "simple" format is like Pandoc simple_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                 ["strings", "numbers"], "simple"))
    strings      numbers
    ---------  ---------
    spam         41.9999
    eggs        451

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
    ----  --------
    spam   41.9999
    eggs  451
    ----  --------

    "grid" is similar to tables produced by Emacs table.el package or
    Pandoc grid_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "grid"))
    +-----------+-----------+
    | strings   |   numbers |
    +===========+===========+
    | spam      |   41.9999 |
    +-----------+-----------+
    | eggs      |  451      |
    +-----------+-----------+

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
    +------+----------+
    | spam |  41.9999 |
    +------+----------+
    | eggs | 451      |
    +------+----------+

    "fancy_grid" draws a grid using box-drawing characters:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "fancy_grid"))
    ╒═══════════╤═══════════╕
    │ strings   │   numbers │
    ╞═══════════╪═══════════╡
    │ spam      │   41.9999 │
    ├───────────┼───────────┤
    │ eggs      │  451      │
    ╘═══════════╧═══════════╛

    "pipe" is like tables in PHP Markdown Extra extension or Pandoc
    pipe_tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "pipe"))
    | strings   |   numbers |
    |:----------|----------:|
    | spam      |   41.9999 |
    | eggs      |  451      |

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
    |:-----|---------:|
    | spam |  41.9999 |
    | eggs | 451      |

    "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
    are slightly different from "pipe" format by not using colons to
    define column alignment, and using a "+" sign to indicate line
    intersections:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "orgtbl"))
    | strings   |   numbers |
    |-----------+-----------|
    | spam      |   41.9999 |
    | eggs      |  451      |


    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
    | spam |  41.9999 |
    | eggs | 451      |

    "rst" is like a simple table format from reStructuredText; please
    note that reStructuredText accepts also "grid" tables:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
    ...                ["strings", "numbers"], "rst"))
    =========  =========
    strings      numbers
    =========  =========
    spam         41.9999
    eggs        451
    =========  =========

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
    ====  ========
    spam   41.9999
    eggs  451
    ====  ========

    "mediawiki" produces a table markup used in Wikipedia and on other
    MediaWiki-based sites:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="mediawiki"))
    {| class="wikitable" style="text-align: left;"
    |+ <!-- caption -->
    |-
    ! strings   !! align="right"|   numbers
    |-
    | spam      || align="right"|   41.9999
    |-
    | eggs      || align="right"|  451
    |}

    "html" produces HTML markup:

    >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
    ...                headers="firstrow", tablefmt="html"))
    <table>
    <tr><th>strings  </th><th style="text-align: right;">  numbers</th></tr>
    <tr><td>spam     </td><td style="text-align: right;">  41.9999</td></tr>
    <tr><td>eggs     </td><td style="text-align: right;"> 451     </td></tr>
    </table>

    "latex" produces a tabular environment of LaTeX docuement markup:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
    \\begin{tabular}{lr}
    \\hline
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\hline
    \\end{tabular}

    "latex_booktabs" produces a tabular environment of LaTeX docuement markup
    using the booktabs.sty package:

    >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
    \\begin{tabular}{lr}
    \\toprule
     spam &  41.9999 \\\\
     eggs & 451      \\\\
    \\bottomrule
    \end{tabular}
    """
    if tabular_data is None:
        tabular_data = []
    list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)

    # optimization: look for ANSI control codes once,
    # enable smart width functions only if a control code is found
    plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
                            ['\t'.join(map(_text_type, row)) for row in list_of_lists])
    has_invisible = re.search(_invisible_codes, plain_text)
    if has_invisible:
        width_fn = _visible_width
    else:
        width_fn = wcswidth

    # format rows and columns, convert numeric values to strings
    cols = list(zip(*list_of_lists))
    coltypes = list(map(_column_type, cols))
    cols = [[_format(v, ct, floatfmt, missingval) for v in c]
             for c,ct in zip(cols, coltypes)]

    # align columns
    aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
    minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
    cols = [_align_column(c, a, minw, has_invisible)
            for c, a, minw in zip(cols, aligns, minwidths)]

    if headers:
        # align headers and add headers
        t_cols = cols or [['']] * len(headers)
        t_aligns = aligns or [stralign] * len(headers)
        minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
        headers = [_align_header(h, a, minw)
                   for h, a, minw in zip(headers, t_aligns, minwidths)]
        rows = list(zip(*cols))
    else:
        minwidths = [width_fn(c[0]) for c in cols]
        rows = list(zip(*cols))

    if not isinstance(tablefmt, TableFormat):
        tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])

    return _format_table(tablefmt, headers, rows, minwidths, aligns)

Example 94

Project: viewfinder Source File: analyze_merged_logs.py
@gen.engine
def ProcessFiles(merged_store, logs_paths, filenames, callback):
  """Fetch and process each file contained in 'filenames'."""

  def _ProcessOneFile(contents, day_stats, device_entries, trace_entries):
    """Iterate over the contents of a processed file: one entry per line. Increment stats for specific entries."""
    buf = cStringIO.StringIO(contents)
    buf.seek(0)
    # Max len is +1 since we include the current line. It allows us to call 'continue' in the middle of the loop.
    context_before = deque(maxlen=options.options.trace_context_num_lines + 1)
    # Traces that still need "after" context.
    pending_traces = []
    def _AddTrace(trace_type, timestamp, module, message):
      # context_before also has the current line, so grab only :-1.
      trace = {'type': trace_type,
               'timestamp': timestamp,
               'module': module,
               'trace': msg,
               'context_before': list(context_before)[:-1],
               'context_after': []}
      if options.options.trace_context_num_lines == 0:
        trace_entries.append(trace)
      else:
        pending_traces.append(trace)

    def _CheckPendingTraces(line):
      for t in pending_traces:
        t['context_after'].append(line)
      while pending_traces and len(pending_traces[0]['context_after']) >= options.options.trace_context_num_lines:
        trace_entries.append(pending_traces.pop(0))

    while True:
      line = buf.readline()
      if not line:
        break
      line = line.rstrip('\n')
      # The deque automatically pops elements from the front when maxlen is reached.
      context_before.append(line)
      _CheckPendingTraces(line)

      parsed = logs_util.ParseLogLine(line)
      if not parsed:
        continue
      day, time, module, msg = parsed
      timestamp = logs_util.DayTimeStringsToUTCTimestamp(day, time)

      if options.options.process_traceback and re.search(kTracebackRE, line):
        _AddTrace('traceback', timestamp, module, msg)

      if module.startswith('user_op_manager:') or module.startswith('operation:'):
        # Found op status line.
        if msg.startswith('SUCCESS'):
          # Success message. eg: SUCCESS: user: xx, device: xx, op: xx, method: xx.yy in xxs
          parsed = logs_util.ParseSuccessMsg(msg)
          if not parsed:
            continue
          user, device, op, class_name, method_name = parsed
          method = '%s.%s' % (class_name, method_name)
          day_stats.ActiveAll(user)
          if method in ('Follower.UpdateOperation', 'UpdateFollowerOperation.Execute'):
            day_stats.ActiveView(user)
          elif method in ('Comment.PostOperation', 'PostCommentOperation.Execute'):
            day_stats.ActivePost(user)
          elif method in ('Episode.ShareExistingOperation', 'Episode.ShareNewOperation',
                          'ShareExistingOperation.Execute', 'ShareNewOperation.Execute'):
            day_stats.ActiveShare(user)
        elif msg.startswith('EXECUTE'):
          # Exec message. eg: EXECUTE: user: xx, device: xx, op: xx, method: xx.yy: <req>
          parsed = logs_util.ParseExecuteMsg(msg)
          if not parsed:
            continue
          user, device, op, class_name, method_name, request = parsed
          method = '%s.%s' % (class_name, method_name)
          if method in ('Device.UpdateOperation', 'User.RegisterOperation', 'RegisterUserOperation.Execute'):
            try:
              req_dict = eval(request)
              device_entries.append({'method': method, 'timestamp': timestamp, 'request': req_dict})
            except Exception as e:
              continue
        elif msg.startswith('ABORT'):
          if options.options.process_op_abort:
            # Abort message, save the entire line as well as context.
            _AddTrace('abort', timestamp, module, msg)
        # FAILURE status is already handled by Traceback processing.
      elif module.startswith('base:') and msg.startswith('/ping OK:'):
        # Ping message. Extract full request dict.
        req_str = logs_util.ParsePingMsg(msg)
        if not req_str:
          continue
        try:
          req_dict = json.loads(req_str)
          device_entries.append({'method': 'ping', 'timestamp': timestamp, 'request': req_dict})
        except Exception as e:
          continue
      elif module.startswith('ping:') and msg.startswith('ping OK:'):
        # Ping message in new format. Extract full request and response dicts.
        (req_str, resp_str) = logs_util.ParseNewPingMsg(msg)
        if not req_str or not resp_str:
          continue
        try:
          req_dict = json.loads(req_str)
          resp_dict = json.loads(resp_str)
          device_entries.append({'method': 'ping', 'timestamp': timestamp, 'request': req_dict, 'response': resp_dict})
        except Exception as e:
          continue


    # No more context. Flush the pending traces into the list.
    trace_entries.extend(pending_traces)
    buf.close()

  today = util.NowUTCToISO8601()
  # Group filenames by day.
  files_by_day = defaultdict(list)
  for filename in filenames:
    day = logs_paths.MergedLogPathToDate(filename)
    if not day:
      logging.error('filename cannot be parsed as processed log: %s' % filename)
      continue
    if options.options.compute_today or today != day:
      files_by_day[day].append(filename)

  # Sort the list of days. This is important both for --max_days_to_process, and to know the last
  # day for which we wrote the file.
  day_list = sorted(files_by_day.keys())
  if options.options.max_days_to_process is not None:
    day_list = day_list[:options.options.max_days_to_process]

  last_day_written = None
  for day in day_list:
    files = files_by_day[day]
    day_stats = logs_util.DayUserRequestStats(day)
    device_entries = []
    trace_entries = []
    for f in files:
      # Let exceptions surface.
      contents = yield gen.Task(merged_store.Get, f)
      logging.info('Processing %d bytes from %s' % (len(contents), f))
      _ProcessOneFile(contents, day_stats, device_entries, trace_entries)

    if not options.options.dry_run:
      # Write the json-ified stats.
      req_contents = json.dumps(day_stats.ToDotDict())
      req_file_path = 'processed_data/user_requests/%s' % day
      dev_contents = json.dumps(device_entries)
      dev_file_path = 'processed_data/device_details/%s' % day
      try:
        trace_contents = json.dumps(trace_entries)
      except Exception as e:
        trace_contents = None
      trace_file_path = 'processed_data/traces/%s' % day


      @gen.engine
      def _MaybePut(path, contents, callback):
        if contents:
          yield gen.Task(merged_store.Put, path, contents)
          logging.info('Wrote %d bytes to %s' % (len(contents), path))
        callback()


      yield [gen.Task(_MaybePut, req_file_path, req_contents),
             gen.Task(_MaybePut, dev_file_path, dev_contents),
             gen.Task(_MaybePut, trace_file_path, trace_contents)]

      last_day_written = day_stats.day

  callback(last_day_written)
  return

Example 95

Project: pyriscope Source File: processor.py
def process(args):
    # Make sure there are args, do a primary check for help.
    if len(args) == 0 or args[0] in ARGLIST_HELP:
        show_help()

    # Defaults arg flag settings.
    url_parts_list = []
    ffmpeg = True
    convert = False
    clean = False
    rotate = False
    agent_mocking = False
    name = ""
    live_duration = ""
    req_headers = {}

    # Check for ffmpeg.
    if shutil.which("ffmpeg") is None:
        ffmpeg = False

    # Read in args and set appropriate flags.
    cont = None
    for i in range(len(args)):
        if cont == ARGLIST_NAME:
            if args[i][0] in ('\'', '\"'):
                if args[i][-1:] == args[i][0]:
                    cont = None
                    name = args[i][1:-1]
                else:
                    cont = args[i][0]
                    name = args[i][1:]
            else:
                cont = None
                name = args[i]
            continue
        if cont in ('\'', '\"'):
            if args[i][-1:] == cont:
                cont = None
                name += " {}".format(args[i][:-1])
            else:
                name += " {}".format(args[i])
            continue
        if cont == ARGLIST_TIME:
            cont = None
            live_duration = args[i]

        if re.search(URL_PATTERN, args[i]) is not None:
            url_parts_list.append(dissect_url(args[i]))
        if args[i] in ARGLIST_HELP:
            show_help()
        if args[i] in ARGLIST_CONVERT:
            convert = True
        if args[i] in ARGLIST_CLEAN:
            convert = True
            clean = True
        if args[i] in ARGLIST_ROTATE:
            convert = True
            rotate = True
        if args[i] in ARGLIST_AGENTMOCK:
            agent_mocking = True
        if args[i] in ARGLIST_NAME:
            cont = ARGLIST_NAME
        if args[i] in ARGLIST_TIME:
            cont = ARGLIST_TIME


    # Check for URLs found.
    if len(url_parts_list) < 1:
        print("\nError: No valid URLs entered.")
        sys.exit(1)

    # Disable conversion/rotation if ffmpeg is not found.
    if convert and not ffmpeg:
        print("ffmpeg not found: Disabling conversion/rotation.")
        convert = False
        clean = False
        rotate = False

    # Set a mocked user agent.
    if agent_mocking:
        stdout("Getting mocked User-Agent.")
        req_headers['User-Agent'] = get_mocked_user_agent()
    else:
        req_headers['User-Agent'] = DEFAULT_UA


    url_count = 0
    for url_parts in url_parts_list:
        url_count += 1

        # Disable custom naming for multiple URLs.
        if len(url_parts_list) > 1:
            name = ""

        # Public Periscope API call to get information about the broadcast.
        if url_parts['token'] == "":
            req_url = PERISCOPE_GETBROADCAST.format("broadcast_id", url_parts['broadcast_id'])
        else:
            req_url = PERISCOPE_GETBROADCAST.format("token", url_parts['token'])

        stdout("Downloading broadcast information.")
        response = requests.get(req_url, headers=req_headers)
        broadcast_public = json.loads(response.text)

        if 'success' in broadcast_public and broadcast_public['success'] == False:
            print("\nError: Video expired/deleted/wasn't found: {}".format(url_parts['url']))
            continue

        # Loaded the correct JSON. Create file name.
        if name[-3:] == ".ts":
            name = name[:-3]
        if name[-4:] == ".mp4":
            name = name[:-4]
        if name == "":
            broadcast_start_time_end = broadcast_public['broadcast']['start'].rfind('.')
            timezone = broadcast_public['broadcast']['start'][broadcast_start_time_end:]
            timezone_start = timezone.rfind('-') if timezone.rfind('-') != -1 else timezone.rfind('+')
            timezone = timezone[timezone_start:].replace(':', '')
            to_zone = tz.tzlocal()
            broadcast_start_time = broadcast_public['broadcast']['start'][:broadcast_start_time_end]
            broadcast_start_time = "{}{}".format(broadcast_start_time, timezone)
            broadcast_start_time_dt = datetime.strptime(broadcast_start_time, '%Y-%m-%dT%H:%M:%S%z')
            broadcast_start_time_dt = broadcast_start_time_dt.astimezone(to_zone)
            broadcast_start_time = "{}-{:02d}-{:02d} {:02d}-{:02d}-{:02d}".format(
                broadcast_start_time_dt.year, broadcast_start_time_dt.month, broadcast_start_time_dt.day,
                broadcast_start_time_dt.hour, broadcast_start_time_dt.minute, broadcast_start_time_dt.second)
            name = "{} ({})".format(broadcast_public['broadcast']['username'], broadcast_start_time)

        name = sanitize(name)

        # Get ready to start capturing.
        if broadcast_public['broadcast']['state'] == 'RUNNING':
            # Cannot record live stream without ffmpeg.
            if not ffmpeg:
                print("\nError: Cannot record live stream without ffmpeg: {}".format(url_parts['url']))
                continue

            # The stream is live, start live capture.
            name = "{}.live".format(name)

            if url_parts['token'] == "":
                req_url = PERISCOPE_GETACCESS.format("broadcast_id", url_parts['broadcast_id'])
            else:
                req_url = PERISCOPE_GETACCESS.format("token", url_parts['token'])

            stdout("Downloading live stream information.")
            response = requests.get(req_url, headers=req_headers)
            access_public = json.loads(response.text)

            if 'success' in access_public and access_public['success'] == False:
                print("\nError: Video expired/deleted/wasn't found: {}".format(url_parts['url']))
                continue

            time_argument = ""
            if not live_duration == "":
                time_argument = " -t {}".format(live_duration)

            live_url = FFMPEG_LIVE.format(
                url_parts['url'],
                req_headers['User-Agent'],
                access_public['hls_url'],
                time_argument,
                name)

            # Start downloading live stream.
            stdout("Recording stream to {}.ts".format(name))

            Popen(live_url, shell=True, stdout=PIPE).stdout.read()

            stdoutnl("{}.ts Downloaded!".format(name))

            # Convert video to .mp4.
            if convert:
                stdout("Converting to {}.mp4".format(name))

                if rotate:
                    Popen(FFMPEG_ROT.format(name), shell=True, stdout=PIPE).stdout.read()
                else:
                    Popen(FFMPEG_NOROT.format(name), shell=True, stdout=PIPE).stdout.read()

                stdoutnl("Converted to {}.mp4!".format(name))

                if clean and os.path.exists("{}.ts".format(name)):
                    os.remove("{}.ts".format(name))
            continue

        else:
            if not broadcast_public['broadcast']['available_for_replay']:
                print("\nError: Replay unavailable: {}".format(url_parts['url']))
                continue

            # Broadcast replay is available.
            if url_parts['token'] == "":
                req_url = PERISCOPE_GETACCESS.format("broadcast_id", url_parts['broadcast_id'])
            else:
                req_url = PERISCOPE_GETACCESS.format("token", url_parts['token'])

            stdout("Downloading replay information.")
            response = requests.get(req_url, headers=req_headers)
            access_public = json.loads(response.text)

            if 'success' in access_public and access_public['success'] == False:
                print("\nError: Video expired/deleted/wasn't found: {}".format(url_parts['url']))
                continue

            base_url = access_public['replay_url']
            base_url_parts = dissect_replay_url(base_url)

            req_headers['Cookie'] = "{}={};{}={};{}={}".format(access_public['cookies'][0]['Name'],
                                                               access_public['cookies'][0]['Value'],
                                                               access_public['cookies'][1]['Name'],
                                                               access_public['cookies'][1]['Value'],
                                                               access_public['cookies'][2]['Name'],
                                                               access_public['cookies'][2]['Value'])
            req_headers['Host'] = "replay.periscope.tv"

            # Get the list of chunks to download.
            stdout("Downloading chunk list.")
            response = requests.get(access_public['replay_url'], headers=req_headers)
            chunks = response.text
            chunk_pattern = re.compile(r'chunk_\d+\.ts')

            download_list = []
            for chunk in re.findall(chunk_pattern, chunks):
                download_list.append(
                    {
                        'url': REPLAY_URL.format(base_url_parts['key'], chunk),
                        'file_name': chunk
                    }
                )

            # Download chunk .ts files and append them.
            pool = ThreadPool(name, DEFAULT_DL_THREADS, len(download_list))

            temp_dir_name = ".pyriscope.{}".format(name)
            if not os.path.exists(temp_dir_name):
                os.makedirs(temp_dir_name)

            stdout("Downloading replay {}.ts.".format(name))

            for chunk_info in download_list:
                temp_file_path = "{}/{}".format(temp_dir_name, chunk_info['file_name'])
                chunk_info['file_path'] = temp_file_path
                pool.add_task(download_chunk, chunk_info['url'], req_headers, temp_file_path)

            pool.wait_completion()

            if os.path.exists("{}.ts".format(name)):
                try:
                    os.remove("{}.ts".format(name))
                except:
                    stdoutnl("Failed to delete preexisting {}.ts.".format(name))

            with open("{}.ts".format(name), 'wb') as handle:
                for chunk_info in download_list:
                    file_path = chunk_info['file_path']
                    if not os.path.exists(file_path) or os.path.getsize(file_path) == 0:
                        break
                    with open(file_path, 'rb') as ts_file:
                        handle.write(ts_file.read())

            # don't delete temp if the download had missing chunks, just in case
            if pool.is_complete() and os.path.exists(temp_dir_name):
                try:
                    shutil.rmtree(temp_dir_name)
                except:
                    stdoutnl("Failed to delete temp folder: {}.".format(temp_dir_name))

            if pool.is_complete():
                stdoutnl("{}.ts Downloaded!".format(name))
            else:
                stdoutnl("{}.ts partially Downloaded!".format(name))

            # Convert video to .mp4.
            if convert:
                stdout("Converting to {}.mp4".format(name))

                if rotate:
                    Popen(FFMPEG_ROT.format(name), shell=True, stdout=PIPE).stdout.read()
                else:
                    Popen(FFMPEG_NOROT.format(name), shell=True, stdout=PIPE).stdout.read()

                stdoutnl("Converted to {}.mp4!".format(name))

                if clean and os.path.exists("{}.ts".format(name)):
                    try:
                        os.remove("{}.ts".format(name))
                    except:
                        stdout("Failed to delete {}.ts.".format(name))

    sys.exit(0)

Example 96

Project: tp-libvirt Source File: virsh_setvcpus.py
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    count = params.get("setvcpus_count", "")
    convert_err = "Can't convert {0} to integer type"
    try:
        count = int(count)
    except ValueError:
        # 'count' may not invalid number in negative tests
        logging.debug(convert_err.format(count))
    current_vcpu = int(params.get("setvcpus_current", "1"))
    try:
        current_vcpu = int(current_vcpu)
    except ValueError:
        raise error.TestError(convert_err.format(current_vcpu))
    max_vcpu = int(params.get("setvcpus_max", "4"))
    try:
        max_vcpu = int(max_vcpu)
    except ValueError:
        raise error.TestError(convert_err.format(max_vcpu))
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_prompt = params.get("remote_prompt", "#")
    tmpxml = os.path.join(test.tmpdir, 'tmp.xml')
    set_topology = "yes" == params.get("set_topology", "no")
    sockets = params.get("topology_sockets")
    cores = params.get("topology_cores")
    threads = params.get("topology_threads")
    start_vm_after_set = "yes" == params.get("start_vm_after_set", "no")
    start_vm_expect_fail = "yes" == params.get("start_vm_expect_fail", "no")

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError("remote/local ip parameters not set.")

    # Save original configuration
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    # Normal processing of the test is to set the maximum vcpu count to 4,
    # and set the current vcpu count to 1, then adjust the 'count' value to
    # plug or unplug vcpus.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #

    try:
        if vm.is_alive():
            vm.destroy()

        # Set maximum vcpus, so we can run all kinds of normal tests without
        # encounter requested vcpus greater than max allowable vcpus error
        vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        # Get the number of cpus, current value if set, and machine type
        orig_count, orig_current, mtype = get_xmldata(vm_name, tmpxml, options)
        logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d,"
                      " mtype=%s", orig_count, orig_current, mtype)

        # Set cpu topology
        if set_topology:
            vmcpu_xml = vm_xml.VMCPUXML()
            vmcpu_xml['topology'] = {'sockets': sockets, 'cores': cores,
                                     'threads': threads}
            vmxml['cpu'] = vmcpu_xml
            vmxml.sync()

        # Restart, unless that's not our test
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()

        if orig_count == 1 and count == 1:
            logging.debug("Original vCPU count is 1, just checking if setvcpus "
                          "can still set current.")

        domid = vm.get_id()  # only valid for running
        domuuid = vm.get_uuid()

        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shut off" and vm.is_alive():
            vm.destroy()

        # Run test
        if vm_ref == "remote":
            (setvcpu_exit_status, status_error,
             setvcpu_exit_stderr) = remote_test(remote_ip,
                                                local_ip,
                                                remote_pwd,
                                                remote_prompt,
                                                vm_name,
                                                status_error)
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpus_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpus_invalid_id") is not None:
                    dom_option = params.get("setvcpus_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpus_invalid_uuid") is not None:
                    dom_option = params.get("setvcpus_invalid_uuid")
            else:
                dom_option = vm_ref

            option_list = options.split(" ")
            for item in option_list:
                if virsh.has_command_help_match(command, item) is None:
                    raise error.TestNAError("The current libvirt version"
                                            " doesn't support '%s' option"
                                            % item)
            status = virsh.setvcpus(dom_option, count_option, options,
                                    ignore_status=True, debug=True)
            setvcpu_exit_status = status.exit_status
            setvcpu_exit_stderr = status.stderr.strip()

            # Start VM after set vcpu
            if start_vm_after_set:
                if vm.is_alive():
                    logging.debug("VM already started")
                else:
                    result = virsh.start(vm_name, ignore_status=True,
                                         debug=True)
                    libvirt.check_exit_status(result, start_vm_expect_fail)

    finally:
        new_count, new_current, mtype = get_xmldata(vm_name, tmpxml, options)
        logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d,"
                      " mtype=%s", new_count, new_current, mtype)

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()
        if os.path.exists(tmpxml):
            os.remove(tmpxml)

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         setvcpu_exit_stderr):
                raise error.TestNAError("guest <os> machine property '%s' "
                                        "may be too old to allow hotplug."
                                        % mtype)

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         setvcpu_exit_stderr):
                raise error.TestNAError("virsh setvcpu hotplug unsupported, "
                                        " mtype=%s" % mtype)

            # Otherwise, it seems we have a real error
            raise error.TestFail("Run failed with right command mtype=%s"
                                 " stderr=%s" % (mtype, setvcpu_exit_stderr))
        else:
            if "--maximum" in options:
                if new_count != count:
                    raise error.TestFail("Changing guest maximum vcpus failed"
                                         " while virsh command return 0")
            else:
                if new_current != count:
                    raise error.TestFail("Changing guest current vcpus failed"
                                         " while virsh command return 0")

Example 97

Project: tp-qemu Source File: migration_with_dst_problem.py
@error.context_aware
def run(test, params, env):
    """
    KVM migration with destination problems.
    Contains group of test for testing qemu behavior if some
    problems happens on destination side.

    Tests are described right in test classes comments down in code.

    Test needs params: nettype = bridge.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    login_timeout = int(params.get("login_timeout", 360))
    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")

    test_rand = None
    mount_path = None
    while mount_path is None or os.path.exists(mount_path):
        test_rand = utils.generate_random_string(3)
        mount_path = ("%s/ni_mount_%s" %
                      (data_dir.get_data_dir(), test_rand))

    mig_dst = os.path.join(mount_path, "mig_dst")

    migration_exec_cmd_src = params.get("migration_exec_cmd_src",
                                        "gzip -c > %s")
    migration_exec_cmd_src = (migration_exec_cmd_src % (mig_dst))

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    def control_service(session, service, init_service, action, timeout=60):
        """
        Start service on guest.

        :param vm: Virtual machine for vm.
        :param service: service to stop.
        :param action: action with service (start|stop|restart)
        :param init_service: name of service for old service control.
        """
        status = utils_misc.get_guest_service_status(session, service,
                                                     service_former=init_service)
        if action == "start" and status == "active":
            logging.debug("%s already started, no need start it again.",
                          service)
            return
        if action == "stop" and status == "inactive":
            logging.debug("%s already stopped, no need stop it again.",
                          service)
            return
        try:
            session.cmd("systemctl --version", timeout=timeout)
            session.cmd("systemctl %s %s.service" % (action, service),
                        timeout=timeout)
        except:
            session.cmd("service %s %s" % (init_service, action),
                        timeout=timeout)

    def set_nfs_server(vm, share_cfg):
        """
        Start nfs server on guest.

        :param vm: Virtual machine for vm.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = "echo '%s' > /etc/exports" % (share_cfg)
        control_service(session, "nfs-server", "nfs", "stop")
        session.cmd(cmd)
        control_service(session, "nfs-server", "nfs", "start")
        session.cmd("iptables -F")
        session.close()

    def umount(mount_path):
        """
        Umount nfs server mount_path

        :param mount_path: path where nfs dir will be placed.
        """
        utils.run("umount -f %s" % (mount_path))

    def create_file_disk(dst_path, size):
        """
        Create file with size and create there ext3 filesystem.

        :param dst_path: Path to file.
        :param size: Size of file in MB
        """
        utils.run("dd if=/dev/zero of=%s bs=1M count=%s" % (dst_path, size))
        utils.run("mkfs.ext3 -F %s" % (dst_path))

    def mount(disk_path, mount_path, options=None):
        """
        Mount Disk to path

        :param disk_path: Path to disk
        :param mount_path: Path where disk will be mounted.
        :param options: String with options for mount
        """
        if options is None:
            options = ""
        else:
            options = "%s" % options

        utils.run("mount %s %s %s" % (options, disk_path, mount_path))

    def find_disk_vm(vm, disk_serial):
        """
        Find disk on vm which ends with disk_serial

        :param vm: VM where to find a disk.
        :param disk_serial: sufix of disk id.

        :return: string Disk path
        """
        session = vm.wait_for_login(timeout=login_timeout)

        disk_path = os.path.join("/", "dev", "disk", "by-id")
        disks = session.cmd("ls %s" % disk_path).split("\n")
        session.close()
        disk = filter(lambda x: x.endswith(disk_serial), disks)
        if not disk:
            return None
        return os.path.join(disk_path, disk[0])

    def prepare_disk(vm, disk_path, mount_path):
        """
        Create Ext3 on disk a send there data from main disk.

        :param vm: VM where to find a disk.
        :param disk_path: Path to disk in guest system.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        session.cmd("mkfs.ext3 -F %s" % (disk_path))
        session.cmd("mount %s %s" % (disk_path, mount_path))
        session.close()

    def disk_load(vm, src_path, dst_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param dst_path: Path to destination
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodical copied.
        """
        if dsize is None:
            dsize = 100
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = ("nohup /bin/bash -c 'while true; do dd if=%s of=%s bs=1M "
               "count=%s; done;' 2> /dev/null &" % (src_path, dst_path, dsize))
        pid = re.search(r"\[.+\] (.+)",
                        session.cmd_output(cmd, timeout=copy_timeout))
        return pid.group(1)

    class IscsiServer_tgt(object):

        """
        Class for set and start Iscsi server.
        """

        def __init__(self):
            self.server_name = "autotest_guest_" + test_rand
            self.user = "user1"
            self.passwd = "pass"
            self.config = """
<target %s:dev01>
    backing-store %s
    incominguser %s %s
</target>
"""

        def set_iscsi_server(self, vm_ds, disk_path, disk_size):
            """
            Set iscsi server with some variant.

            @oaram vm_ds: VM where should be iscsi server started.
            :param disk_path: path where should be disk placed.
            :param disk_size: size of new disk.
            """
            session = vm_ds.wait_for_login(timeout=login_timeout)

            session.cmd("dd if=/dev/zero of=%s bs=1M count=%s" % (disk_path,
                                                                  disk_size))
            status, output = session.cmd_status_output("setenforce 0")
            if status not in [0, 127]:
                logging.warn("Function setenforce fails.\n %s" % (output))

            config = self.config % (self.server_name, disk_path,
                                    self.user, self.passwd)
            cmd = "cat > /etc/tgt/conf.d/virt.conf << EOF" + config + "EOF"
            control_service(session, "tgtd", "tgtd", "stop")
            session.sendline(cmd)
            control_service(session, "tgtd", "tgtd", "start")
            session.cmd("iptables -F")
            session.close()

        def find_disk(self):
            disk_path = os.path.join("/", "dev", "disk", "by-path")
            disks = utils.run("ls %s" % disk_path).stdout.split("\n")
            disk = filter(lambda x: self.server_name in x, disks)
            if disk is []:
                return None
            return os.path.join(disk_path, disk[0].strip())

        def connect(self, vm_ds):
            """
            Connect to iscsi server on guest.

            :param vm_ds: Guest where is iscsi server running.

            :return: path where disk is connected.
            """
            ip_dst = vm_ds.get_address()
            utils.run("iscsiadm -m discovery -t st -p %s" % (ip_dst))

            server_ident = ('iscsiadm -m node --targetname "%s:dev01"'
                            ' --portal %s' % (self.server_name, ip_dst))
            utils.run("%s --op update --name node.session.auth.authmethod"
                      " --value CHAP" % (server_ident))
            utils.run("%s --op update --name node.session.auth.username"
                      " --value %s" % (server_ident, self.user))
            utils.run("%s --op update --name node.session.auth.password"
                      " --value %s" % (server_ident, self.passwd))
            utils.run("%s --login" % (server_ident))
            time.sleep(1.0)
            return self.find_disk()

        def disconnect(self):
            server_ident = ('iscsiadm -m node --targetname "%s:dev01"' %
                            (self.server_name))
            utils.run("%s --logout" % (server_ident))

    class IscsiServer(object):

        """
        Iscsi server implementation interface.
        """

        def __init__(self, iscsi_type, *args, **kargs):
            if iscsi_type == "tgt":
                self.ic = IscsiServer_tgt(*args, **kargs)
            else:
                raise NotImplementedError()

        def __getattr__(self, name):
            if self.ic:
                return self.ic.__getattribute__(name)
            raise AttributeError("Cannot find attribute %s in class" % name)

    class test_read_only_dest(MiniSubtest):

        """
        Migration to read-only destination by using a migration to file.

        1) Start guest with NFS server.
        2) Config NFS server share for read-only.
        3) Mount the read-only share to host.
        4) Start second guest and try to migrate to read-only dest.

        result) Migration should fail with error message about read-only dst.
        """

        def test(self):
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            vm_ds = env.get_vm("virt_test_vm2_data_server")
            vm_guest = env.get_vm("virt_test_vm1_guest")
            ro_timeout = int(params.get("read_only_timeout", "480"))
            exp_str = r".*Read-only file system.*"
            utils.run("mkdir -p %s" % (mount_path))

            vm_ds.verify_alive()
            vm_guest.create()
            vm_guest.verify_alive()

            set_nfs_server(vm_ds, "/mnt *(ro,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")
            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=ro_timeout, first=2):
                raise error.TestFail("The Read-only file system warning not"
                                     " come in time limit.")

        def clean(self):
            if os.path.exists(mig_dst):
                os.remove(mig_dst)
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_low_space_dest(MiniSubtest):

        """
        Migrate to destination with low space.

        1) Start guest.
        2) Create disk with low space.
        3) Try to migratie to the disk.

        result) Migration should fail with warning about No left space on dev.
        """

        def test(self):
            self.disk_path = None
            while self.disk_path is None or os.path.exists(self.disk_path):
                self.disk_path = ("%s/disk_%s" %
                                  (test.tmpdir, utils.generate_random_string(3)))

            disk_size = utils.convert_data_size(params.get("disk_size", "10M"),
                                                default_sufix='M')
            disk_size /= 1024 * 1024    # To MB.

            exp_str = r".*gzip: stdout: No space left on device.*"
            vm_guest = env.get_vm("virt_test_vm1_guest")
            utils.run("mkdir -p %s" % (mount_path))

            vm_guest.verify_alive()
            vm_guest.wait_for_login(timeout=login_timeout)

            create_file_disk(self.disk_path, disk_size)
            mount(self.disk_path, mount_path, "-o loop")

            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=60, first=1):
                raise error.TestFail("The migration to destination with low "
                                     "storage space didn't fail as it should.")

        def clean(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.disk_path):
                os.remove(self.disk_path)

    class test_extensive_io(MiniSubtest):

        """
        Migrate after extensive_io abstract class. This class only define
        basic funtionaly and define interface. For other tests.

        1) Start ds_guest which starts data server.
        2) Create disk for data stress in ds_guest.
        3) Share and prepare disk from ds_guest
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test(self):
            self.copier_pid = None
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            self.disk_serial = params.get("drive_serial_image2_vm1",
                                          "nfs-disk-image2-vm1")
            self.disk_serial_src = params.get("drive_serial_image1_vm1",
                                              "root-image1-vm1")
            self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt")
            self.copy_timeout = int(params.get("copy_timeout", "1024"))

            self.copy_block_size = params.get("copy_block_size", "100M")
            self.copy_block_size = utils.convert_data_size(
                self.copy_block_size,
                "M")
            self.disk_size = "%s" % (self.copy_block_size * 1.4)
            self.copy_block_size /= 1024 * 1024

            self.server_recover_timeout = (
                int(params.get("server_recover_timeout", "240")))

            utils.run("mkdir -p %s" % (mount_path))

            self.test_params()
            self.config()

            self.vm_guest_params = params.copy()
            self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path
            self.vm_guest_params["image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand)
            self.vm_guest_params["image_size_image2_vm1"] = self.disk_size
            self.vm_guest_params = self.vm_guest_params.object_params("vm1")
            self.image2_vm_guest_params = (self.vm_guest_params.
                                           object_params("image2"))

            env_process.preprocess_image(test,
                                         self.image2_vm_guest_params,
                                         env)
            self.vm_guest.create(params=self.vm_guest_params)

            self.vm_guest.verify_alive()
            self.vm_guest.wait_for_login(timeout=login_timeout)
            self.workload()

            self.restart_server()

            self.vm_guest.migrate(mig_timeout, mig_protocol, env=env)

            try:
                self.vm_guest.verify_alive()
                self.vm_guest.wait_for_login(timeout=login_timeout)
            except aexpect.ExpectTimeoutError:
                raise error.TestFail("Migration should be successful.")

        def test_params(self):
            """
            Test specific params. Could be implemented in inherited class.
            """
            pass

        def config(self):
            """
            Test specific config.
            """
            raise NotImplementedError()

        def workload(self):
            disk_path = find_disk_vm(self.vm_guest, self.disk_serial)
            if disk_path is None:
                raise error.TestFail("It was impossible to find disk on VM")

            prepare_disk(self.vm_guest, disk_path, self.guest_mount_path)

            disk_path_src = find_disk_vm(self.vm_guest, self.disk_serial_src)
            dst_path = os.path.join(self.guest_mount_path, "test.data")
            self.copier_pid = disk_load(self.vm_guest, disk_path_src, dst_path,
                                        self.copy_timeout, self.copy_block_size)

        def restart_server(self):
            raise NotImplementedError()

        def clean_test(self):
            """
            Test specific cleanup.
            """
            pass

        def clean(self):
            if self.copier_pid:
                try:
                    if self.vm_guest.is_alive():
                        session = self.vm_guest.wait_for_login(timeout=login_timeout)
                        session.cmd("kill -9 %s" % (self.copier_pid))
                except:
                    logging.warn("It was impossible to stop copier. Something "
                                 "probably happened with GUEST or NFS server.")

            if params.get("kill_vm") == "yes":
                if self.vm_guest.is_alive():
                    self.vm_guest.destroy()
                    utils_misc.wait_for(lambda: self.vm_guest.is_dead(), 30,
                                        2, 2, "Waiting for dying of guest.")
                qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                                mount_path,
                                                None)
                qemu_img.check_image(self.image2_vm_guest_params,
                                     mount_path)

            self.clean_test()

    class test_extensive_io_nfs(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts NFS server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over NFS.
        4) Mount the disk to mount_path
        5) Create disk for second guest in the mounted path.
        6) Start second guest with prepared disk.
        7) Start stress on the prepared disk on second guest.
        8) Wait few seconds.
        9) Restart iscsi server.
        10) Migrate second guest.

        result) Migration should be successful.
        """

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            set_nfs_server(vm_ds, "/mnt *(rw,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "stop")  # Stop NFS server
            time.sleep(5)
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "start")  # Start NFS server

            """
            Touch waits until all previous requests are invalidated
            (NFS grace period). Without grace period qemu start takes
            to long and timers for machine creation dies.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_extensive_io_iscsi(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts iscsi server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over iscsi.
        4) Join to disk on host.
        5) Prepare partition on the disk.
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test_params(self):
            self.iscsi_variant = params.get("iscsi_variant", "tgt")
            self.ds_disk_path = os.path.join(self.guest_mount_path, "test.img")

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            self.isci_server = IscsiServer("tgt")
            disk_path = os.path.join(self.guest_mount_path, "disk1")
            self.isci_server.set_iscsi_server(vm_ds, disk_path,
                                              (int(float(self.disk_size) * 1.1) / (1024 * 1024)))
            self.host_disk_path = self.isci_server.connect(vm_ds)

            utils.run("mkfs.ext3 -F %s" % (self.host_disk_path))
            mount(self.host_disk_path, mount_path)

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "stop", 240)  # Stop Iscsi server
            time.sleep(5)
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "start", 240)  # Start Iscsi server

            """
            Wait for iscsi server after restart and will be again
            accessible.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.host_disk_path):
                self.isci_server.disconnect()

    test_type = params.get("test_type")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 98

Project: RMG-Py Source File: gamessparser.py
Function: extract
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line [1:12] == "INPUT CARD>":
            return

        # We are looking for this line:
        #           PARAMETERS CONTROLLING GEOMETRY SEARCH ARE
        #           ...
        #           OPTTOL = 1.000E-04          RMIN   = 1.500E-03
        if line[10:18] == "OPTTOL =":
            if not hasattr(self, "geotargets"):
                opttol = float(line.split()[2])
                self.geotargets = numpy.array([opttol, 3. / opttol], "d")
                        
        if line.find("FINAL") == 1:
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
        # Has to deal with such lines as:
        #  FINAL R-B3LYP ENERGY IS     -382.0507446475 AFTER  10 ITERATIONS
        #  FINAL ENERGY IS     -379.7594673378 AFTER   9 ITERATIONS
        # ...so take the number after the "IS"
            temp = line.split()
            self.scfenergies.append(utils.convertor(float(temp[temp.index("IS") + 1]), "hartree", "eV"))

        # Total energies after Moller-Plesset corrections
        if (line.find("RESULTS OF MOLLER-PLESSET") >= 0 or
            line[6:37] == "SCHWARZ INEQUALITY TEST SKIPPED"):
            # Output looks something like this:
            # RESULTS OF MOLLER-PLESSET 2ND ORDER CORRECTION ARE
            #         E(0)=      -285.7568061536
            #         E(1)=         0.0
            #         E(2)=        -0.9679419329
            #       E(MP2)=      -286.7247480864
            # where E(MP2) = E(0) + E(2)
            #
            # with GAMESS-US 12 Jan 2009 (R3) the preceding text is different:
            ##      DIRECT 4-INDEX TRANSFORMATION 
            ##      SCHWARZ INEQUALITY TEST SKIPPED          0 INTEGRAL BLOCKS
            ##                     E(SCF)=       -76.0088477471
            ##                       E(2)=        -0.1403745370
            ##                     E(MP2)=       -76.1492222841            
            if not hasattr(self, "mpenergies"):
                self.mpenergies = []
            # Each iteration has a new print-out
            self.mpenergies.append([])
            # GAMESS-US presently supports only second order corrections (MP2)
            # PC GAMESS also has higher levels (3rd and 4th), with different output
            # Only the highest level MP4 energy is gathered (SDQ or SDTQ)            
            while re.search("DONE WITH MP(\d) ENERGY", line) is None:
                line = inputfile.next()
                if len(line.split()) > 0:
                    # Only up to MP2 correction
                    if line.split()[0] == "E(MP2)=":
                        mp2energy = float(line.split()[1])
                        self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
                    # MP2 before higher order calculations
                    if line.split()[0] == "E(MP2)":
                        mp2energy = float(line.split()[2])
                        self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
                    if line.split()[0] == "E(MP3)":
                        mp3energy = float(line.split()[2])
                        self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV"))
                    if line.split()[0] in ["E(MP4-SDQ)", "E(MP4-SDTQ)"]:
                        mp4energy = float(line.split()[2])
                        self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV"))

        # Total energies after Coupled Cluster calculations
        # Only the highest Coupled Cluster level result is gathered
        if line[12:23] == "CCD ENERGY:":
            if not hasattr(self, "ccenergies"):
                self.ccenergies = []
            ccenergy = float(line.split()[2])
            self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV"))
        if line.find("CCSD") >= 0 and line.split()[0:2] == ["CCSD", "ENERGY:"]:
            if not hasattr(self, "ccenergies"):
                self.ccenergies = []
            ccenergy = float(line.split()[2])
            line = inputfile.next()
            if line[8:23] == "CCSD[T] ENERGY:":
                ccenergy = float(line.split()[2])
                line = inputfile.next()
                if line[8:23] == "CCSD(T) ENERGY:":
                    ccenergy = float(line.split()[2])
            self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV"))
        # Also collect MP2 energies, which are always calculated before CC
        if line [8:23] == "MBPT(2) ENERGY:":
            if not hasattr(self, "mpenergies"):
                self.mpenergies = []
            self.mpenergies.append([])
            mp2energy = float(line.split()[2])
            self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))

        # Extract charge and multiplicity
        if line[1:19] == "CHARGE OF MOLECULE":
            self.charge = int(line.split()[-1])
            self.mult = int(inputfile.next().split()[-1])

        # etenergies (used only for CIS runs now)
        if "EXCITATION ENERGIES" in line and line.find("DONE WITH") < 0:
            if not hasattr(self, "etenergies"):
                self.etenergies = []
            header = inputfile.next().rstrip()
            get_etosc = False
            if header.endswith("OSC. STR."):
                # water_cis_dets.out does not have the oscillator strength
                # in this table...it is extracted from a different section below
                get_etosc = True
                self.etoscs = []
            dashes = inputfile.next()
            line = inputfile.next()
            broken = line.split()
            while len(broken) > 0:
                # Take hartree value with more numbers, and convert.
                # Note that the values listed after this are also less exact!
                etenergy = float(broken[1])
                self.etenergies.append(utils.convertor(etenergy, "hartree", "cm-1"))
                if get_etosc:
                    etosc = float(broken[-1])
                    self.etoscs.append(etosc)
                broken = inputfile.next().split()

        # Detect the CI hamiltonian type, if applicable.
        # Should always be detected if CIS is done.
        if line[8:64] == "RESULTS FROM SPIN-ADAPTED ANTISYMMETRIZED PRODUCT (SAPS)":
            self.cihamtyp = "saps"
        if line[8:64] == "RESULTS FROM DETERMINANT BASED ATOMIC ORBITAL CI-SINGLES":
            self.cihamtyp = "dets"

        # etsecs (used only for CIS runs for now)
        if line[1:14] == "EXCITED STATE":
            if not hasattr(self, 'etsecs'):
                self.etsecs = []
            if not hasattr(self, 'etsyms'):
                self.etsyms = []
            statenumber = int(line.split()[2])
            spin = int(float(line.split()[7]))
            if spin == 0:
                sym = "Singlet"
            if spin == 1:
                sym = "Triplet"
            sym += '-' + line.split()[-1]
            self.etsyms.append(sym)
            # skip 5 lines
            for i in range(5):
                line = inputfile.next()
            line = inputfile.next()
            CIScontribs = []
            while line.strip()[0] != "-":
                MOtype = 0
                # alpha/beta are specified for hamtyp=dets
                if self.cihamtyp == "dets":
                    if line.split()[0] == "BETA":
                        MOtype = 1
                fromMO = int(line.split()[-3])-1
                toMO = int(line.split()[-2])-1
                coeff = float(line.split()[-1])
                # With the SAPS hamiltonian, the coefficients are multiplied
                #   by sqrt(2) so that they normalize to 1.
                # With DETS, both alpha and beta excitations are printed.
                # if self.cihamtyp == "saps":
                #    coeff /= numpy.sqrt(2.0)
                CIScontribs.append([(fromMO,MOtype),(toMO,MOtype),coeff])
                line = inputfile.next()
            self.etsecs.append(CIScontribs)

        # etoscs (used only for CIS runs now)
        if line[1:50] == "TRANSITION FROM THE GROUND STATE TO EXCITED STATE":
            if not hasattr(self, "etoscs"):
                self.etoscs = []
            statenumber = int(line.split()[-1])
            # skip 7 lines
            for i in range(8):
                line = inputfile.next()
            strength = float(line.split()[3])
            self.etoscs.append(strength)

        # TD-DFT for GAMESS-US
        if line[14:29] == "LET EXCITATIONS": # TRIPLET and SINGLET
            self.etenergies = []
            self.etoscs = []
            self.etsecs = []
            etsyms = []
            minus = inputfile.next()
            blank = inputfile.next()
            line = inputfile.next()
            # Loop starts on the STATE line
            while line.find("STATE") >= 0:
                broken = line.split()
                self.etenergies.append(utils.convertor(float(broken[-2]), "eV", "cm-1"))
                broken = inputfile.next().split()
                self.etoscs.append(float(broken[-1]))
                sym = inputfile.next() # Not always present
                if sym.find("SYMMETRY")>=0:
                    etsyms.append(sym.split()[-1])
                    header = inputfile.next()
                minus = inputfile.next()
                CIScontribs = []
                line = inputfile.next()
                while line.strip():
                    broken = line.split()
                    fromMO, toMO = [int(broken[x]) - 1 for x in [2, 4]]
                    CIScontribs.append([(fromMO, 0), (toMO, 0), float(broken[1])])
                    line = inputfile.next()
                self.etsecs.append(CIScontribs)
                line = inputfile.next()
            if etsyms: # Not always present
                self.etsyms = etsyms
         
        # Maximum and RMS gradients.
        if "MAXIMUM GRADIENT" in line or "RMS GRADIENT" in line:

            if not hasattr(self, "geovalues"):
                self.geovalues = []

            parts = line.split()

            # Newer versions (around 2006) have both maximum and RMS on one line:
            #       MAXIMUM GRADIENT =  0.0531540    RMS GRADIENT = 0.0189223
            if len(parts) == 8:
                maximum = float(parts[3])
                rms = float(parts[7])
            
            # In older versions of GAMESS, this spanned two lines, like this:
            #       MAXIMUM GRADIENT =    0.057578167
            #           RMS GRADIENT =    0.027589766
            if len(parts) == 4:
                maximum = float(parts[3])
                line = inputfile.next()
                parts = line.split()
                rms = float(parts[3])


            # FMO also prints two final one- and two-body gradients (see exam37):
            #   (1) MAXIMUM GRADIENT =  0.0531540    RMS GRADIENT = 0.0189223
            if len(parts) == 9:
                maximum = float(parts[4])
                rms = float(parts[8])

            self.geovalues.append([maximum, rms])

        if line[11:50] == "ATOMIC                      COORDINATES":
            # This is the input orientation, which is the only data available for
            # SP calcs, but which should be overwritten by the standard orientation
            # values, which is the only information available for all geoopt cycles.
            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
                self.atomnos = []
            line = inputfile.next()
            atomcoords = []
            atomnos = []
            line = inputfile.next()
            while line.strip():
                temp = line.strip().split()
                atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[2:5]])
                atomnos.append(int(round(float(temp[1])))) # Don't use the atom name as this is arbitary
                line = inputfile.next()
            self.atomnos = numpy.array(atomnos, "i")
            self.atomcoords.append(atomcoords)

        if line[12:40] == "EQUILIBRIUM GEOMETRY LOCATED":
            # Prevent extraction of the final geometry twice
            self.geooptfinished = True
        
        if line[1:29] == "COORDINATES OF ALL ATOMS ARE" and not self.geooptfinished:
            # This is the standard orientation, which is the only coordinate
            # information available for all geometry optimisation cycles.
            # The input orientation will be overwritten if this is a geometry optimisation
            # We assume that a previous Input Orientation has been found and
            # used to extract the atomnos
            if self.firststdorient:
                self.firststdorient = False
                # Wipes out the single input coordinate at the start of the file
                self.atomcoords = []
                
            line = inputfile.next()
            hyphens = inputfile.next()

            atomcoords = []
            line = inputfile.next()                

            for i in range(self.natom):
                temp = line.strip().split()
                atomcoords.append(map(float, temp[2:5]))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)
        
        # Section with SCF information.
        #
        # The space at the start of the search string is to differentiate from MCSCF.
        # Everything before the search string is stored as the type of SCF.
        # SCF types may include: BLYP, RHF, ROHF, UHF, etc.
        #
        # For example, in exam17 the section looks like this (note that this is GVB):
        #          ------------------------
        #          ROHF-GVB SCF CALCULATION
        #          ------------------------
        # GVB STEP WILL USE    119875 WORDS OF MEMORY.
        #
        #     MAXIT=  30   NPUNCH= 2   SQCDF TOL=1.0000E-05
        #     NUCLEAR ENERGY=        6.1597411978
        #     EXTRAP=T   DAMP=F   SHIFT=F   RSTRCT=F   DIIS=F  SOSCF=F
        #
        # ITER EX     TOTAL ENERGY       E CHANGE        SQCDF       DIIS ERROR
        #   0  0      -38.298939963   -38.298939963   0.131784454   0.000000000
        #   1  1      -38.332044339    -0.033104376   0.026019716   0.000000000
        # ... and will be terminated by a blank line.
        if line.rstrip()[-16:] == " SCF CALCULATION":

            # Remember the type of SCF.
            self.scftype = line.strip()[:-16]

            dashes = inputfile.next()

            while line [:5] != " ITER":

                # GVB uses SQCDF for checking convergence (for example in exam17).
                if "GVB" in self.scftype and "SQCDF TOL=" in line:
                    scftarget = float(line.split("=")[-1])

                # Normally however the density is used as the convergence criterium.
                # Deal with various versions:
                #   (GAMESS VERSION = 12 DEC 2003)
                #     DENSITY MATRIX CONV=  2.00E-05  DFT GRID SWITCH THRESHOLD=  3.00E-04
                #   (GAMESS VERSION = 22 FEB 2006)
                #     DENSITY MATRIX CONV=  1.00E-05
                #   (PC GAMESS version 6.2, Not DFT?)
                #     DENSITY CONV=  1.00E-05
                elif "DENSITY CONV" in line or "DENSITY MATRIX CONV" in line:
                    scftarget = float(line.split()[-1])

                line = inputfile.next()

            if not hasattr(self, "scftargets"):
                self.scftargets = []

            self.scftargets.append([scftarget])

            if not hasattr(self,"scfvalues"):
                self.scfvalues = []

            line = inputfile.next()

            # Normally the iteration print in 6 columns.
            # For ROHF, however, it is 5 columns, thus this extra parameter.
            if "ROHF" in self.scftype:
                valcol = 4
            else:
                valcol = 5

            # SCF iterations are terminated by a blank line.
            # The first four characters usually contains the step number.
            # However, lines can also contain messages, including:
            #   * * *   INITIATING DIIS PROCEDURE   * * *
            #   CONVERGED TO SWOFF, SO DFT CALCULATION IS NOW SWITCHED ON
            #   DFT CODE IS SWITCHING BACK TO THE FINER GRID
            values = []
            while line.strip():
                try:
                    temp = int(line[0:4])
                except ValueError:
                    pass
                else:
                    values.append([float(line.split()[valcol])])
                line = inputfile.next()
            self.scfvalues.append(values)

        if line.find("NORMAL COORDINATE ANALYSIS IN THE HARMONIC APPROXIMATION") >= 0:
        # GAMESS has...
        # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2,
        #     REDUCED MASSES IN AMU.
        #
        #                          1           2           3           4           5
        #       FREQUENCY:        52.49       41.45       17.61        9.23       10.61  
        #    REDUCED MASS:      3.92418     3.77048     5.43419     6.44636     5.50693
        #    IR INTENSITY:      0.00013     0.00001     0.00004     0.00000     0.00003

        # ...or in the case of a numerical Hessian job...

        # MODES 1 TO 5 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2,
        #     REDUCED MASSES IN AMU.
        #
        #                          1           2           3           4           5
        #       FREQUENCY:         0.05        0.03        0.03       30.89       30.94  
        #    REDUCED MASS:      8.50125     8.50137     8.50136     1.06709     1.06709

        
        # whereas PC-GAMESS has...
        # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
        #
        #                          1           2           3           4           5
        #       FREQUENCY:         5.89        1.46        0.01        0.01        0.01  
        #    IR INTENSITY:      0.00000     0.00000     0.00000     0.00000     0.00000
        
        # If Raman is present we have (for PC-GAMESS)...
        # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
        #     RAMAN INTENSITIES IN ANGSTROM**4/AMU, DEPOLARIZATIONS ARE DIMENSIONLESS
        #
        #                          1           2           3           4           5
        #       FREQUENCY:         5.89        1.46        0.04        0.03        0.01  
        #    IR INTENSITY:      0.00000     0.00000     0.00000     0.00000     0.00000
        # RAMAN INTENSITY:       12.675       1.828       0.000       0.000       0.000
        #  DEPOLARIZATION:        0.750       0.750       0.124       0.009       0.750

        # If PC-GAMESS has not reached the stationary point we have
        # MODES 1 TO 5 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
        #
        #     cuem***************************************************
        #     * THIS IS NOT A STATIONARY POINT ON THE MOLECULAR PES *
        #     *     THE VIBRATIONAL ANALYSIS IS NOT VALID !!!       *
        #     *******************************************************
        #
        #                          1           2           3           4           5
        
        # MODES 2 TO 7 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.

            self.vibfreqs = []
            self.vibirs = []
            self.vibdisps = []

            # Need to get to the modes line
            warning = False
            while line.find("MODES") == -1:
                line = inputfile.next()
                if line.find("THIS IS NOT A STATIONARY POINT")>=0:
                    warning = True
            startrot = int(line.split()[1])
            endrot = int(line.split()[3])
            blank = inputfile.next()

            line = inputfile.next() # FREQUENCIES, etc.
            while line != blank:
                line = inputfile.next()
            if warning: # Get past the second warning
                line = inputfile.next()
                while line!= blank:
                    line = inputfile.next()
                self.logger.warning("This is not a stationary point on the molecular"
                                    "PES. The vibrational analysis is not valid.")
            
            freqNo = inputfile.next()
            while freqNo.find("SAYVETZ") == -1:
                freq = inputfile.next().strip().split()[1:]
            # May include imaginary frequencies
            #       FREQUENCY:       825.18 I    111.53       12.62       10.70        0.89
                newfreq = []
                for i, x in enumerate(freq):
                    if x!="I":
                        newfreq.append(float(x))
                    else:
                        newfreq[-1] = -newfreq[-1]
                self.vibfreqs.extend(newfreq)
                line = inputfile.next()
                if line.find("REDUCED") >= 0: # skip the reduced mass (not always present)
                    line = inputfile.next()
                if line.find("IR INTENSITY") >= 0:
                    # Not present if a numerical Hessian calculation
                    irIntensity = map(float, line.strip().split()[2:])
                    self.vibirs.extend([utils.convertor(x, "Debye^2/amu-Angstrom^2", "km/mol") for x in irIntensity])
                    line = inputfile.next()
                if line.find("RAMAN") >= 0:
                    if not hasattr(self,"vibramans"):
                        self.vibramans = []
                    ramanIntensity = line.strip().split()
                    self.vibramans.extend(map(float, ramanIntensity[2:]))
                    depolar = inputfile.next()
                    line = inputfile.next()
                assert line == blank

                # Extract the Cartesian displacement vectors
                p = [ [], [], [], [], [] ]
                for j in range(len(self.atomnos)):
                    q = [ [], [], [], [], [] ]
                    for k in range(3): # x, y, z
                        line = inputfile.next()[21:]
                        broken = map(float, line.split())
                        for l in range(len(broken)):
                            q[l].append(broken[l])
                    for k in range(len(broken)):
                        p[k].append(q[k])
                self.vibdisps.extend(p[:len(broken)])

                # Skip the Sayvetz stuff at the end
                for j in range(10):
                    line = inputfile.next()
                blank = inputfile.next()
                freqNo = inputfile.next()
            # Exclude rotations and translations
            self.vibfreqs = numpy.array(self.vibfreqs[:startrot-1]+self.vibfreqs[endrot:], "d")
            self.vibirs = numpy.array(self.vibirs[:startrot-1]+self.vibirs[endrot:], "d")
            self.vibdisps = numpy.array(self.vibdisps[:startrot-1]+self.vibdisps[endrot:], "d")
            if hasattr(self, "vibramans"):
                self.vibramans = numpy.array(self.vibramans[:startrot-1]+self.vibramans[endrot:], "d")

        if line[5:21] == "ATOMIC BASIS SET":
            self.gbasis = []
            line = inputfile.next()
            while line.find("SHELL")<0:
                line = inputfile.next()
            blank = inputfile.next()
            atomname = inputfile.next()
            # shellcounter stores the shell no of the last shell
            # in the previous set of primitives
            shellcounter = 1
            while line.find("TOTAL NUMBER")<0:
                blank = inputfile.next()
                line = inputfile.next()
                shellno = int(line.split()[0])
                shellgap = shellno - shellcounter
                gbasis = [] # Stores basis sets on one atom
                shellsize = 0
                while len(line.split())!=1 and line.find("TOTAL NUMBER")<0:
                    shellsize += 1
                    coeff = {}
                    # coefficients and symmetries for a block of rows
                    while line.strip():
                        temp = line.strip().split()
                        sym = temp[1]
                        assert sym in ['S', 'P', 'D', 'F', 'G', 'L']
                        if sym == "L": # L refers to SP
                            if len(temp)==6: # GAMESS US
                                coeff.setdefault("S", []).append( (float(temp[3]), float(temp[4])) )
                                coeff.setdefault("P", []).append( (float(temp[3]), float(temp[5])) )
                            else: # PC GAMESS
                                assert temp[6][-1] == temp[9][-1] == ')'
                                coeff.setdefault("S", []).append( (float(temp[3]), float(temp[6][:-1])) )
                                coeff.setdefault("P", []).append( (float(temp[3]), float(temp[9][:-1])) )
                        else:
                            if len(temp)==5: # GAMESS US
                                coeff.setdefault(sym, []).append( (float(temp[3]), float(temp[4])) )
                            else: # PC GAMESS
                                assert temp[6][-1] == ')'
                                coeff.setdefault(sym, []).append( (float(temp[3]), float(temp[6][:-1])) )
                        line = inputfile.next()
                    # either a blank or a continuation of the block
                    if sym == "L":
                        gbasis.append( ('S', coeff['S']))
                        gbasis.append( ('P', coeff['P']))
                    else:
                        gbasis.append( (sym, coeff[sym]))
                    line = inputfile.next()
                # either the start of the next block or the start of a new atom or
                # the end of the basis function section
                
                numtoadd = 1 + (shellgap / shellsize)
                shellcounter = shellno + shellsize
                for x in range(numtoadd):
                    self.gbasis.append(gbasis)

        if line.find("EIGENVECTORS") == 10 or line.find("MOLECULAR OBRITALS") == 10:
            # The details returned come from the *final* report of evalues and
            #   the last list of symmetries in the log file.
            # Should be followed by lines like this:
            #           ------------
            #           EIGENVECTORS
            #           ------------
            # 
            #                       1          2          3          4          5
            #                   -10.0162   -10.0161   -10.0039   -10.0039   -10.0029
            #                      BU         AG         BU         AG         AG  
            #     1  C  1  S    0.699293   0.699290  -0.027566   0.027799   0.002412
            #     2  C  1  S    0.031569   0.031361   0.004097  -0.004054  -0.000605
            #     3  C  1  X    0.000908   0.000632  -0.004163   0.004132   0.000619
            #     4  C  1  Y   -0.000019   0.000033   0.000668  -0.000651   0.005256
            #     5  C  1  Z    0.000000   0.000000   0.000000   0.000000   0.000000
            #     6  C  2  S   -0.699293   0.699290   0.027566   0.027799   0.002412
            #     7  C  2  S   -0.031569   0.031361  -0.004097  -0.004054  -0.000605
            #     8  C  2  X    0.000908  -0.000632  -0.004163  -0.004132  -0.000619
            #     9  C  2  Y   -0.000019  -0.000033   0.000668   0.000651  -0.005256
            #    10  C  2  Z    0.000000   0.000000   0.000000   0.000000   0.000000
            #    11  C  3  S   -0.018967  -0.019439   0.011799  -0.014884  -0.452328
            #    12  C  3  S   -0.007748  -0.006932   0.000680  -0.000695  -0.024917
            #    13  C  3  X    0.002628   0.002997   0.000018   0.000061  -0.003608
            # and so forth... with blanks lines between blocks of 5 orbitals each.
            # Warning! There are subtle differences between GAMESS-US and PC-GAMES
            #   in the formatting of the first four columns.
            #
            # Watch out for F orbitals...
            # PC GAMESS
            #   19  C   1 YZ   0.000000   0.000000   0.000000   0.000000   0.000000
            #   20  C    XXX   0.000000   0.000000   0.000000   0.000000   0.002249
            #   21  C    YYY   0.000000   0.000000  -0.025555   0.000000   0.000000
            #   22  C    ZZZ   0.000000   0.000000   0.000000   0.002249   0.000000
            #   23  C    XXY   0.000000   0.000000   0.001343   0.000000   0.000000
            # GAMESS US
            #   55  C  1 XYZ   0.000000   0.000000   0.000000   0.000000   0.000000
            #   56  C  1XXXX  -0.000014  -0.000067   0.000000   0.000000   0.000000
            #
            # This is fine for GeoOpt and SP, but may be weird for TD and Freq.

            # This is the stuff that we can read from these blocks.
            self.moenergies = [[]]
            self.mosyms = [[]]
            if not hasattr(self, "nmo"):
                self.nmo = self.nbasis
            self.mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")]
            readatombasis = False
            if not hasattr(self, "atombasis"):
                self.atombasis = []
                self.aonames = []
                for i in range(self.natom):
                    self.atombasis.append([])
                self.aonames = []
                readatombasis = True

            dashes = inputfile.next()
            for base in range(0, self.nmo, 5):

                line = inputfile.next()
                # Make sure that this section does not end prematurely - checked by regression test 2CO.ccsd.aug-cc-pVDZ.out.
                if line.strip() != "":
                    break;
                
                numbers = inputfile.next() # Eigenvector numbers.

                # Sometimes there are some blank lines here.
                while not line.strip():
                    line = inputfile.next()

                # Eigenvalues for these orbitals (in hartrees).
                try:
                    self.moenergies[0].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()])
                except:
                    self.logger.warning('MO section found but could not be parsed!')
                    break;

                # Orbital symmetries.
                line = inputfile.next()
                if line.strip():
                    self.mosyms[0].extend(map(self.normalisesym, line.split()))
                
                # Now we have nbasis lines.
                # Going to use the same method as for normalise_aonames()
                # to extract basis set information.
                p = re.compile("(\d+)\s*([A-Z][A-Z]?)\s*(\d+)\s*([A-Z]+)")
                oldatom ='0'
                for i in range(self.nbasis):
                    line = inputfile.next()

                    # If line is empty, break (ex. for FMO in exam37).
                    if not line.strip(): break

                    # Fill atombasis and aonames only first time around
                    if readatombasis and base == 0:
                        aonames = []
                        start = line[:17].strip()
                        m = p.search(start)
                        if m:
                            g = m.groups()
                            aoname = "%s%s_%s" % (g[1].capitalize(), g[2], g[3])
                            oldatom = g[2]
                            atomno = int(g[2])-1
                            orbno = int(g[0])-1
                        else: # For F orbitals, as shown above
                            g = [x.strip() for x in line.split()]
                            aoname = "%s%s_%s" % (g[1].capitalize(), oldatom, g[2])
                            atomno = int(oldatom)-1
                            orbno = int(g[0])-1
                        self.atombasis[atomno].append(orbno)
                        self.aonames.append(aoname)
                    coeffs = line[15:] # Strip off the crud at the start.
                    j = 0
                    while j*11+4 < len(coeffs):
                        self.mocoeffs[0][base+j, i] = float(coeffs[j * 11:(j + 1) * 11])
                        j += 1

            line = inputfile.next()
            # If it's restricted and no more properties:
            #  ...... END OF RHF/DFT CALCULATION ......
            # If there are more properties (DENSITY MATRIX):
            #               --------------
            #
            # If it's unrestricted we have:
            #
            #  ----- BETA SET ----- 
            #
            #          ------------
            #          EIGENVECTORS
            #          ------------
            #
            #                      1          2          3          4          5
            # ... and so forth.
            line = inputfile.next()
            if line[2:22] == "----- BETA SET -----":
                self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d"))
                self.moenergies.append([])
                self.mosyms.append([])
                for i in range(4):
                    line = inputfile.next()
                for base in range(0, self.nmo, 5):
                    blank = inputfile.next()
                    line = inputfile.next() # Eigenvector no
                    line = inputfile.next()
                    self.moenergies[1].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()])
                    line = inputfile.next()
                    self.mosyms[1].extend(map(self.normalisesym, line.split()))
                    for i in range(self.nbasis):
                        line = inputfile.next()
                        temp = line[15:] # Strip off the crud at the start
                        j = 0
                        while j * 11 + 4 < len(temp):
                            self.mocoeffs[1][base+j, i] = float(temp[j * 11:(j + 1) * 11])
                            j += 1
                line = inputfile.next()
            self.moenergies = [numpy.array(x, "d") for x in self.moenergies]

        # Natural orbitals - presently support only CIS.
        # Looks basically the same as eigenvectors, without symmetry labels.
        if line[10:30] == "CIS NATURAL ORBITALS":

            self.nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d")

            dashes = inputfile.next()
            for base in range(0, self.nmo, 5):

                blank = inputfile.next()
                numbers = inputfile.next() # Eigenvector numbers.

                # Eigenvalues for these natural orbitals (not in hartrees!).
                # Sometimes there are some blank lines before it.
                line = inputfile.next()
                while not line.strip():
                    line = inputfile.next()
                eigenvalues = line

                # Orbital symemtry labels are normally here for MO coefficients.
                line = inputfile.next()
                
                # Now we have nbasis lines with the coefficients.
                for i in range(self.nbasis):

                    line = inputfile.next()
                    coeffs = line[15:]
                    j = 0
                    while j*11+4 < len(coeffs):
                        self.nocoeffs[base+j, i] = float(coeffs[j * 11:(j + 1) * 11])
                        j += 1

        # We cannot trust this self.humos until we come to the phrase:
        #   SYMMETRIES FOR INITAL GUESS ORBITALS FOLLOW
        # which either is followed by "ALPHA" or "BOTH" at which point we can say
        # for certain that it is an un/restricted calculations.
        # Note that MCSCF calcs also print this search string, so make sure
        #   that self.humos does not exist yet.
        if line[1:28] == "NUMBER OF OCCUPIED ORBITALS" and not hasattr(self,'humos'):
            humos = [int(line.split()[-1])-1]
            line = inputfile.next()
            humos.append(int(line.split()[-1])-1)
            self.humos = numpy.array(humos, "i")

        
        if line.find("SYMMETRIES FOR INITIAL GUESS ORBITALS FOLLOW") >= 0:
            # Not unrestricted, so lop off the second index.
            # In case the search string above was not used (ex. FMO in exam38),
            #   we can try to use the next line which should also contain the
            #   number of occupied orbitals.
            if line.find("BOTH SET(S)") >= 0:
                nextline = inputfile.next()
                if "ORBITALS ARE OCCUPIED" in nextline:
                    humos = int(nextline.split()[0])-1
                    if hasattr(self,"humos"):
                        try:
                            assert self.humos[0] == humos
                        except AssertionError:
                            self.logger.warning("Number of occupied orbitals not consistent. This is normal for ECP and FMO jobs.")
                    else:
                        self.humos = [humos]
                self.humos = numpy.resize(self.humos, [1])

        # Set the total number of atoms, only once.
        # Normally GAMESS print TOTAL NUMBER OF ATOMS, however in some cases
        #   this is slightly different (ex. lower case for FMO in exam37).
        if not hasattr(self,"natom") and "NUMBER OF ATOMS" in line.upper():
            self.natom = int(line.split()[-1])
            
        if line.find("NUMBER OF CARTESIAN GAUSSIAN BASIS") == 1 or line.find("TOTAL NUMBER OF BASIS FUNCTIONS") == 1:
            # The first is from Julien's Example and the second is from Alexander's
            # I think it happens if you use a polar basis function instead of a cartesian one
            self.nbasis = int(line.strip().split()[-1])
                
        elif line.find("SPHERICAL HARMONICS KEPT IN THE VARIATION SPACE") >= 0:
            # Note that this line is present if ISPHER=1, e.g. for C_bigbasis
            self.nmo = int(line.strip().split()[-1])
            
        elif line.find("TOTAL NUMBER OF MOS IN VARIATION SPACE") == 1:
            # Note that this line is not always present, so by default
            # NBsUse is set equal to NBasis (see below).
            self.nmo = int(line.split()[-1])

        elif line.find("OVERLAP MATRIX") == 0 or line.find("OVERLAP MATRIX") == 1:
            # The first is for PC-GAMESS, the second for GAMESS
            # Read 1-electron overlap matrix
            if not hasattr(self, "aooverlaps"):
                self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
            else:
                self.logger.info("Reading additional aooverlaps...")
            base = 0
            while base < self.nbasis:
                blank = inputfile.next()
                line = inputfile.next() # Basis fn number
                blank = inputfile.next()
                for i in range(self.nbasis - base): # Fewer lines each time
                    line = inputfile.next()
                    temp = line.split()
                    for j in range(4, len(temp)):
                        self.aooverlaps[base+j-4, i+base] = float(temp[j])
                        self.aooverlaps[i+base, base+j-4] = float(temp[j])
                base += 5

        # ECP Pseudopotential information
        if "ECP POTENTIALS" in line:
            if not hasattr(self, "coreelectrons"):
                self.coreelectrons = [0]*self.natom
            dashes = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            while header.split()[0] == "PARAMETERS":
                name = header[17:25]
                atomnum = int(header[34:40])
                # The pseudopotnetial is given explicitely
                if header[40:50] == "WITH ZCORE":
                  zcore = int(header[50:55])
                  lmax = int(header[63:67])
                  self.coreelectrons[atomnum-1] = zcore
                # The pseudopotnetial is copied from another atom
                if header[40:55] == "ARE THE SAME AS":
                  atomcopy = int(header[60:])
                  self.coreelectrons[atomnum-1] = self.coreelectrons[atomcopy-1]
                line = inputfile.next()
                while line.split() <> []:
                    line = inputfile.next()
                header = inputfile.next()

Example 99

Project: babble Source File: test_mmap.py
def test_both():
    "Test mmap module on Unix systems and Windows"

    # Create a file to be mmap'ed.
    if os.path.exists(TESTFN):
        os.unlink(TESTFN)
    f = open(TESTFN, 'w+')

    try:    # unlink TESTFN no matter what
        # Write 2 pages worth of data to the file
        f.write('\0'* PAGESIZE)
        f.write('foo')
        f.write('\0'* (PAGESIZE-3) )
        f.flush()
        m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
        f.close()

        # Simple sanity checks

        print type(m)  # SF bug 128713:  segfaulted on Linux
        print '  Position of foo:', m.find('foo') / float(PAGESIZE), 'pages'
        vereq(m.find('foo'), PAGESIZE)

        print '  Length of file:', len(m) / float(PAGESIZE), 'pages'
        vereq(len(m), 2*PAGESIZE)

        print '  Contents of byte 0:', repr(m[0])
        vereq(m[0], '\0')
        print '  Contents of first 3 bytes:', repr(m[0:3])
        vereq(m[0:3], '\0\0\0')

        # Modify the file's content
        print "\n  Modifying file's content..."
        m[0] = '3'
        m[PAGESIZE +3: PAGESIZE +3+3] = 'bar'

        # Check that the modification worked
        print '  Contents of byte 0:', repr(m[0])
        vereq(m[0], '3')
        print '  Contents of first 3 bytes:', repr(m[0:3])
        vereq(m[0:3], '3\0\0')
        print '  Contents of second page:',  repr(m[PAGESIZE-1 : PAGESIZE + 7])
        vereq(m[PAGESIZE-1 : PAGESIZE + 7], '\0foobar\0')

        m.flush()

        # Test doing a regular expression match in an mmap'ed file
        match = re.search('[A-Za-z]+', m)
        if match is None:
            print '  ERROR: regex match on mmap failed!'
        else:
            start, end = match.span(0)
            length = end - start

            print '  Regex match on mmap (page start, length of match):',
            print start / float(PAGESIZE), length

            vereq(start, PAGESIZE)
            vereq(end, PAGESIZE + 6)

        # test seeking around (try to overflow the seek implementation)
        m.seek(0,0)
        print '  Seek to zeroth byte'
        vereq(m.tell(), 0)
        m.seek(42,1)
        print '  Seek to 42nd byte'
        vereq(m.tell(), 42)
        m.seek(0,2)
        print '  Seek to last byte'
        vereq(m.tell(), len(m))

        print '  Try to seek to negative position...'
        try:
            m.seek(-1)
        except ValueError:
            pass
        else:
            verify(0, 'expected a ValueError but did not get it')

        print '  Try to seek beyond end of mmap...'
        try:
            m.seek(1,2)
        except ValueError:
            pass
        else:
            verify(0, 'expected a ValueError but did not get it')

        print '  Try to seek to negative position...'
        try:
            m.seek(-len(m)-1,2)
        except ValueError:
            pass
        else:
            verify(0, 'expected a ValueError but did not get it')

        # Try resizing map
        print '  Attempting resize()'
        try:
            m.resize(512)
        except SystemError:
            # resize() not supported
            # No messages are printed, since the output of this test suite
            # would then be different across platforms.
            pass
        else:
            # resize() is supported
            verify(len(m) == 512,
                    "len(m) is %d, but expecting 512" % (len(m),) )
            # Check that we can no longer seek beyond the new size.
            try:
                m.seek(513,0)
            except ValueError:
                pass
            else:
                verify(0, 'Could seek beyond the new size')

            # Check that the underlying file is truncated too
            # (bug #728515)
            f = open(TESTFN)
            f.seek(0, 2)
            verify(f.tell() == 512, 'Underlying file not truncated')
            f.close()
            verify(m.size() == 512, 'New size not reflected in file')

        m.close()

    finally:
        try:
            f.close()
        except OSError:
            pass
        try:
            os.unlink(TESTFN)
        except OSError:
            pass

    # Test for "access" keyword parameter
    try:
        mapsize = 10
        print "  Creating", mapsize, "byte test data file."
        open(TESTFN, "wb").write("a"*mapsize)
        print "  Opening mmap with access=ACCESS_READ"
        f = open(TESTFN, "rb")
        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
        verify(m[:] == 'a'*mapsize, "Readonly memory map data incorrect.")

        print "  Ensuring that readonly mmap can't be slice assigned."
        try:
            m[:] = 'b'*mapsize
        except TypeError:
            pass
        else:
            verify(0, "Able to write to readonly memory map")

        print "  Ensuring that readonly mmap can't be item assigned."
        try:
            m[0] = 'b'
        except TypeError:
            pass
        else:
            verify(0, "Able to write to readonly memory map")

        print "  Ensuring that readonly mmap can't be write() to."
        try:
            m.seek(0,0)
            m.write('abc')
        except TypeError:
            pass
        else:
            verify(0, "Able to write to readonly memory map")

        print "  Ensuring that readonly mmap can't be write_byte() to."
        try:
            m.seek(0,0)
            m.write_byte('d')
        except TypeError:
            pass
        else:
            verify(0, "Able to write to readonly memory map")

        print "  Ensuring that readonly mmap can't be resized."
        try:
            m.resize(2*mapsize)
        except SystemError:   # resize is not universally supported
            pass
        except TypeError:
            pass
        else:
            verify(0, "Able to resize readonly memory map")
        del m, f
        verify(open(TESTFN, "rb").read() == 'a'*mapsize,
               "Readonly memory map data file was modified")

        print "  Opening mmap with size too big"
        import sys
        f = open(TESTFN, "r+b")
        try:
            m = mmap.mmap(f.fileno(), mapsize+1)
        except ValueError:
            # we do not expect a ValueError on Windows
            # CAUTION:  This also changes the size of the file on disk, and
            # later tests assume that the length hasn't changed.  We need to
            # repair that.
            if sys.platform.startswith('win'):
                verify(0, "Opening mmap with size+1 should work on Windows.")
        else:
            # we expect a ValueError on Unix, but not on Windows
            if not sys.platform.startswith('win'):
                verify(0, "Opening mmap with size+1 should raise ValueError.")
            m.close()
        f.close()
        if sys.platform.startswith('win'):
            # Repair damage from the resizing test.
            f = open(TESTFN, 'r+b')
            f.truncate(mapsize)
            f.close()

        print "  Opening mmap with access=ACCESS_WRITE"
        f = open(TESTFN, "r+b")
        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
        print "  Modifying write-through memory map."
        m[:] = 'c'*mapsize
        verify(m[:] == 'c'*mapsize,
               "Write-through memory map memory not updated properly.")
        m.flush()
        m.close()
        f.close()
        f = open(TESTFN, 'rb')
        stuff = f.read()
        f.close()
        verify(stuff == 'c'*mapsize,
               "Write-through memory map data file not updated properly.")

        print "  Opening mmap with access=ACCESS_COPY"
        f = open(TESTFN, "r+b")
        m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
        print "  Modifying copy-on-write memory map."
        m[:] = 'd'*mapsize
        verify(m[:] == 'd' * mapsize,
               "Copy-on-write memory map data not written correctly.")
        m.flush()
        verify(open(TESTFN, "rb").read() == 'c'*mapsize,
               "Copy-on-write test data file should not be modified.")
        try:
            print "  Ensuring copy-on-write maps cannot be resized."
            m.resize(2*mapsize)
        except TypeError:
            pass
        else:
            verify(0, "Copy-on-write mmap resize did not raise exception.")
        del m, f
        try:
            print "  Ensuring invalid access parameter raises exception."
            f = open(TESTFN, "r+b")
            m = mmap.mmap(f.fileno(), mapsize, access=4)
        except ValueError:
            pass
        else:
            verify(0, "Invalid access code should have raised exception.")

        if os.name == "posix":
            # Try incompatible flags, prot and access parameters.
            f = open(TESTFN, "r+b")
            try:
                m = mmap.mmap(f.fileno(), mapsize, flags=mmap.MAP_PRIVATE,
                              prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
            except ValueError:
                pass
            else:
                verify(0, "Incompatible parameters should raise ValueError.")
            f.close()
    finally:
        try:
            os.unlink(TESTFN)
        except OSError:
            pass

    print '  Try opening a bad file descriptor...'
    try:
        mmap.mmap(-2, 4096)
    except mmap.error:
        pass
    else:
        verify(0, 'expected a mmap.error but did not get it')

    # Do a tougher .find() test.  SF bug 515943 pointed out that, in 2.2,
    # searching for data with embedded \0 bytes didn't work.
    f = open(TESTFN, 'w+')

    try:    # unlink TESTFN no matter what
        data = 'aabaac\x00deef\x00\x00aa\x00'
        n = len(data)
        f.write(data)
        f.flush()
        m = mmap.mmap(f.fileno(), n)
        f.close()

        for start in range(n+1):
            for finish in range(start, n+1):
                slice = data[start : finish]
                vereq(m.find(slice), data.find(slice))
                vereq(m.find(slice + 'x'), -1)
        m.close()

    finally:
        os.unlink(TESTFN)

    # make sure a double close doesn't crash on Solaris (Bug# 665913)
    f = open(TESTFN, 'w+')

    try:    # unlink TESTFN no matter what
        f.write(2**16 * 'a') # Arbitrary character
        f.close()

        f = open(TESTFN)
        mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
        mf.close()
        mf.close()
        f.close()

    finally:
        os.unlink(TESTFN)

    # test mapping of entire file by passing 0 for map length
    if hasattr(os, "stat"):
        print "  Ensuring that passing 0 as map length sets map size to current file size."
        f = open(TESTFN, "w+")

        try:
            f.write(2**16 * 'm') # Arbitrary character
            f.close()

            f = open(TESTFN, "rb+")
            mf = mmap.mmap(f.fileno(), 0)
            verify(len(mf) == 2**16, "Map size should equal file size.")
            vereq(mf.read(2**16), 2**16 * "m")
            mf.close()
            f.close()

        finally:
            os.unlink(TESTFN)

    # test mapping of entire file by passing 0 for map length
    if hasattr(os, "stat"):
        print "  Ensuring that passing 0 as map length sets map size to current file size."
        f = open(TESTFN, "w+")
        try:
            f.write(2**16 * 'm') # Arbitrary character
            f.close()

            f = open(TESTFN, "rb+")
            mf = mmap.mmap(f.fileno(), 0)
            verify(len(mf) == 2**16, "Map size should equal file size.")
            vereq(mf.read(2**16), 2**16 * "m")
            mf.close()
            f.close()

        finally:
            os.unlink(TESTFN)

    # make move works everywhere (64-bit format problem earlier)
    f = open(TESTFN, 'w+')

    try:    # unlink TESTFN no matter what
        f.write("ABCDEabcde") # Arbitrary character
        f.flush()

        mf = mmap.mmap(f.fileno(), 10)
        mf.move(5, 0, 5)
        verify(mf[:] == "ABCDEABCDE", "Map move should have duplicated front 5")
        mf.close()
        f.close()

    finally:
        os.unlink(TESTFN)

    # Test that setting access to PROT_READ gives exception
    # rather than crashing
    if hasattr(mmap, "PROT_READ"):
        try:
            mapsize = 10
            open(TESTFN, "wb").write("a"*mapsize)
            f = open(TESTFN, "rb")
            m = mmap.mmap(f.fileno(), mapsize, prot=mmap.PROT_READ)
            try:
                m.write("foo")
            except TypeError:
                pass
            else:
                verify(0, "PROT_READ is not working")
        finally:
            os.unlink(TESTFN)

Example 100

Project: youtube-dl-GUI Source File: rtmp.py
Function: real_download
    def real_download(self, filename, info_dict):
        def run_rtmpdump(args):
            start = time.time()
            resume_percent = None
            resume_downloaded_data_len = None
            proc = subprocess.Popen(args, stderr=subprocess.PIPE)
            cursor_in_new_line = True
            proc_stderr_closed = False
            while not proc_stderr_closed:
                # read line from stderr
                line = ''
                while True:
                    char = proc.stderr.read(1)
                    if not char:
                        proc_stderr_closed = True
                        break
                    if char in [b'\r', b'\n']:
                        break
                    line += char.decode('ascii', 'replace')
                if not line:
                    # proc_stderr_closed is True
                    continue
                mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
                if mobj:
                    downloaded_data_len = int(float(mobj.group(1)) * 1024)
                    percent = float(mobj.group(2))
                    if not resume_percent:
                        resume_percent = percent
                        resume_downloaded_data_len = downloaded_data_len
                    eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
                    speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
                    data_len = None
                    if percent > 0:
                        data_len = int(downloaded_data_len * 100 / percent)
                    data_len_str = '~' + format_bytes(data_len)
                    self.report_progress(percent, data_len_str, speed, eta)
                    cursor_in_new_line = False
                    self._hook_progress({
                        'downloaded_bytes': downloaded_data_len,
                        'total_bytes': data_len,
                        'tmpfilename': tmpfilename,
                        'filename': filename,
                        'status': 'downloading',
                        'eta': eta,
                        'speed': speed,
                    })
                else:
                    # no percent for live streams
                    mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
                    if mobj:
                        downloaded_data_len = int(float(mobj.group(1)) * 1024)
                        time_now = time.time()
                        speed = self.calc_speed(start, time_now, downloaded_data_len)
                        self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
                        cursor_in_new_line = False
                        self._hook_progress({
                            'downloaded_bytes': downloaded_data_len,
                            'tmpfilename': tmpfilename,
                            'filename': filename,
                            'status': 'downloading',
                            'speed': speed,
                        })
                    elif self.params.get('verbose', False):
                        if not cursor_in_new_line:
                            self.to_screen('')
                        cursor_in_new_line = True
                        self.to_screen('[rtmpdump] ' + line)
            proc.wait()
            if not cursor_in_new_line:
                self.to_screen('')
            return proc.returncode

        url = info_dict['url']
        player_url = info_dict.get('player_url', None)
        page_url = info_dict.get('page_url', None)
        app = info_dict.get('app', None)
        play_path = info_dict.get('play_path', None)
        tc_url = info_dict.get('tc_url', None)
        flash_version = info_dict.get('flash_version', None)
        live = info_dict.get('rtmp_live', False)
        conn = info_dict.get('rtmp_conn', None)
        protocol = info_dict.get('rtmp_protocol', None)

        self.report_destination(filename)
        tmpfilename = self.temp_name(filename)
        test = self.params.get('test', False)

        # Check for rtmpdump first
        if not check_executable('rtmpdump', ['-h']):
            self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
            return False

        # Download using rtmpdump. rtmpdump returns exit code 2 when
        # the connection was interrumpted and resuming appears to be
        # possible. This is part of rtmpdump's normal usage, AFAIK.
        basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
        if player_url is not None:
            basic_args += ['--swfVfy', player_url]
        if page_url is not None:
            basic_args += ['--pageUrl', page_url]
        if app is not None:
            basic_args += ['--app', app]
        if play_path is not None:
            basic_args += ['--playpath', play_path]
        if tc_url is not None:
            basic_args += ['--tcUrl', url]
        if test:
            basic_args += ['--stop', '1']
        if flash_version is not None:
            basic_args += ['--flashVer', flash_version]
        if live:
            basic_args += ['--live']
        if isinstance(conn, list):
            for entry in conn:
                basic_args += ['--conn', entry]
        elif isinstance(conn, compat_str):
            basic_args += ['--conn', conn]
        if protocol is not None:
            basic_args += ['--protocol', protocol]
        args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)]

        if sys.platform == 'win32' and sys.version_info < (3, 0):
            # Windows subprocess module does not actually support Unicode
            # on Python 2.x
            # See http://stackoverflow.com/a/9951851/35070
            subprocess_encoding = sys.getfilesystemencoding()
            args = [a.encode(subprocess_encoding, 'ignore') for a in args]
        else:
            subprocess_encoding = None

        if self.params.get('verbose', False):
            if subprocess_encoding:
                str_args = [
                    a.decode(subprocess_encoding) if isinstance(a, bytes) else a
                    for a in args]
            else:
                str_args = args
            try:
                import pipes
                shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
            except ImportError:
                shell_quote = repr
            self.to_screen('[debug] rtmpdump command line: ' + shell_quote(str_args))

        RD_SUCCESS = 0
        RD_FAILED = 1
        RD_INCOMPLETE = 2
        RD_NO_CONNECT = 3

        retval = run_rtmpdump(args)

        if retval == RD_NO_CONNECT:
            self.report_error('[rtmpdump] Could not connect to RTMP server.')
            return False

        while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
            prevsize = os.path.getsize(encodeFilename(tmpfilename))
            self.to_screen('[rtmpdump] %s bytes' % prevsize)
            time.sleep(5.0)  # This seems to be needed
            retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
            cursize = os.path.getsize(encodeFilename(tmpfilename))
            if prevsize == cursize and retval == RD_FAILED:
                break
            # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
            if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024:
                self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
                retval = RD_SUCCESS
                break
        if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
            fsize = os.path.getsize(encodeFilename(tmpfilename))
            self.to_screen('[rtmpdump] %s bytes' % fsize)
            self.try_rename(tmpfilename, filename)
            self._hook_progress({
                'downloaded_bytes': fsize,
                'total_bytes': fsize,
                'filename': filename,
                'status': 'finished',
            })
            return True
        else:
            self.to_stderr('\n')
            self.report_error('rtmpdump exited with code %d' % retval)
            return False
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3 Page 4