sys.stdout

Here are the examples of the python api sys.stdout taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

152 Examples 7

Example 101

Project: fusioncatcher Source File: analyze_bowtie2.py
def merge_bowtie2(psl_in, psl_ou):
    #
    psl = []
    fou = None
    if psl_ou == '-':
        fou = sys.stdout
    else:
        fou = open(psl_ou,'w')

    limit_psl = 10**5

    for bucket in chunks(psl_in,min_count = 2):

        for box in itertools.combinations(bucket,2):

            if box[0][psl_strand] == box[1][psl_strand]:

                merged = None

                temp = box[0][:]

                r1_start = int(box[0][psl_qStart])
                r2_start = int(box[1][psl_qStart])
                if r1_start > r2_start:
                    box = (box[1],box[0])

                r1_start = int(box[0][psl_qStart])
                r1_end = int(box[0][psl_qEnd])
                r2_start = int(box[1][psl_qStart])
                r2_end = int(box[1][psl_qEnd])

                t1_start = int(box[0][psl_tStart])
                t1_end = int(box[0][psl_tEnd])
                t2_start = int(box[1][psl_tStart])
                t2_end = int(box[1][psl_tEnd])

                if t1_start > t2_start:
                    continue

                wiggle = 9
                if r1_end + wiggle > r2_start and r1_end < r2_start:
                    dif = r2_start - r1_end

                    # extend the first
                    #box[0][psl_matches] = str(int(box[0][psl_matches]))
                    #box[0][psl_misMatches] = str(int(box[0][psl_misMatches]) + dif)

                    box[0][psl_qEnd] = str(int(box[0][psl_qEnd]) + dif)
                    box[0][psl_tEnd] = str(int(box[0][psl_tEnd]) + dif)

                    t = box[0][psl_blockSizes].split(',')
                    t[-2] = str(int(t[-2]) + dif)
                    box[0][psl_blockSizes] = ','.join(t)

                    # recompute
                    r1_start = int(box[0][psl_qStart])
                    r1_end = int(box[0][psl_qEnd])

                    t1_start = int(box[0][psl_tStart])
                    t1_end = int(box[0][psl_tEnd])

                elif r1_end > r2_start and r1_end < r2_start + wiggle:
                    dif = r2_start - r1_end

                    # cut the second
                    box[1][psl_matches] = str(int(box[1][psl_matches]) - dif)
                    box[1][psl_misMatches] = str(int(box[1][psl_misMatches]) + dif)

                    box[1][psl_qStart] = str(int(box[1][psl_qStart]) + dif)
                    box[1][psl_tStart] = str(int(box[1][psl_tStart]) + dif)

                    t = box[1][psl_blockSizes].split(',')
                    t[0] = str(int(t[0]) - dif)
                    box[1][psl_blockSizes] = ','.join(t)

                    t = box[1][psl_qStarts].split(',')
                    t[0] = str(int(t[0]) + dif)
                    box[1][psl_qStarts] = ','.join(t)

                    t = box[1][psl_tStarts].split(',')
                    t[0] = str(int(t[0]) + dif)
                    box[1][psl_tStarts] = ','.join(t)

                    # recompute
                    r2_start = int(box[1][psl_qStart])
                    r2_end = int(box[1][psl_qEnd])

                    t2_start = int(box[1][psl_tStart])
                    t2_end = int(box[1][psl_tEnd])

                if r1_end <= r2_start and t1_end <= t2_start: #and box[0][psl_strand] == "+" :
                    temp[psl_matches] = int(box[0][psl_matches]) + int(box[1][psl_matches])
                    temp[psl_misMatches] = int(box[0][psl_misMatches]) - int(box[1][psl_matches])

                    temp[psl_qNumInsert] = int(box[0][psl_qNumInsert]) + int(box[1][psl_qNumInsert])
                    temp[psl_qBaseInsert] = int(box[0][psl_qBaseInsert]) + int(box[1][psl_qBaseInsert])
                    temp[psl_tNumInsert] = int(box[0][psl_tNumInsert]) + int(box[1][psl_tNumInsert])
                    temp[psl_tBaseInsert] = int(box[0][psl_tBaseInsert]) + int(box[1][psl_tBaseInsert])

                    temp[psl_qStart] = r1_start
                    temp[psl_qEnd] = r2_end

                    temp[psl_tStart] = t1_start
                    temp[psl_tEnd] = t2_end

                    temp[psl_blockCount] = int(box[0][psl_blockCount]) + int(box[1][psl_blockCount])
                    temp[psl_blockSizes] = box[0][psl_blockSizes] + box[1][psl_blockSizes]

                    temp[psl_qStarts] = box[0][psl_qStarts] + box[1][psl_qStarts]

                    temp[psl_tStarts] = box[0][psl_tStarts] + box[1][psl_tStarts]
                    temp[psl_tNumInsert] = '1'

                    merged = temp

#                elif r1_end <= r2_start and box[0][psl_strand] == "-" and t2_end <= t1_start:
#
#                    temp[psl_matches] = int(box[0][psl_matches]) + int(box[1][psl_matches])
#                    temp[psl_misMatches] = int(box[0][psl_misMatches]) - int(box[1][psl_matches])
#
#                    temp[psl_qNumInsert] = int(box[0][psl_qNumInsert]) + int(box[1][psl_qNumInsert])
#                    temp[psl_qBaseInsert] = int(box[0][psl_qBaseInsert]) + int(box[1][psl_qBaseInsert])
#                    temp[psl_tNumInsert] = int(box[0][psl_tNumInsert]) + int(box[1][psl_tNumInsert])
#                    temp[psl_tBaseInsert] = int(box[0][psl_tBaseInsert]) + int(box[1][psl_tBaseInsert])
#
#                    temp[psl_qStart] = r1_start
#                    temp[psl_qEnd] = r2_end
#
#                    temp[psl_tStart] = t2_start
#                    temp[psl_tEnd] = t1_end
#
#                    temp[psl_blockCount] = int(box[0][psl_blockCount]) + int(box[1][psl_blockCount])
#                    temp[psl_blockSizes] = box[1][psl_blockSizes] + box[0][psl_blockSizes]
#
#                    temp[psl_qStarts] = box[0][psl_qStarts] + box[1][psl_qStarts]
#
#                    temp[psl_tStarts] = box[1][psl_tStarts] + box[0][psl_tStarts]
#                    temp[psl_tNumInsert] = '1'
#
#                    merged = temp

                if merged:
                    gc.disable()
                    psl.append(map(str,merged))
                    gc.enable()
                    if len(psl) >= limit_psl:
                        fou.writelines(['\t'.join(line)+'\n' for line in psl])
                        psl = []
    # output PSL
    if psl:
        fou.writelines(['\t'.join(line)+'\n' for line in psl])

Example 102

Project: openfisca-france Source File: merge_ipp_tax_and_benefit_tables_with_parameters.py
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--ipp-translations',
        default = os.path.join(parameters_dir, 'ipp-tax-and-benefit-tables-to-parameters.yaml'),
        help = 'path of YAML file containing the association between IPP fields and OpenFisca parameters')
    parser.add_argument('-o', '--origin', default = os.path.join(parameters_dir, 'param.xml'),
        help = 'path of XML file containing the original OpenFisca parameters')
    parser.add_argument('-p', '--param-translations',
        default = os.path.join(parameters_dir, 'param-to-parameters.yaml'),
        help = 'path of YAML file containing the association between param elements and OpenFisca parameters')
    parser.add_argument('-s', '--source-dir', default = 'yaml-clean',
        help = 'path of source directory containing clean IPP YAML files')
    parser.add_argument('-t', '--target', default = os.path.join(parameters_dir, 'parameters.xml'),
        help = 'path of generated YAML file containing the association between IPP fields with OpenFisca parameters')
    parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
    args = parser.parse_args()
    logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)

    file_system_encoding = sys.getfilesystemencoding()

    original_element_tree = etree.parse(args.origin)
    original_root_element = original_element_tree.getroot()

    # Apply translations to original parameters.
    with open(args.param_translations) as param_translations_file:
        param_translations = yaml.load(param_translations_file)
    for old_path, new_path in param_translations.iteritems():
        parent_element = None
        element = original_root_element
        for name in old_path.split('.'):
            for child in element:
                if child.get('code') == name:
                    parent_element = element
                    element = child
                    break
            else:
                assert False, 'Path "{}" not found in "{}"'.format(old_path, args.origin)
        parent_element.remove(element)
        if new_path is not None:
            parent_element = original_root_element
            split_new_path = new_path.split('.')
            for name in split_new_path[:-1]:
                for child in parent_element:
                    if child.get('code') == name:
                        parent_element = child
                        break
                else:
                    parent_element = etree.SubElement(parent_element, 'NODE', attrib = dict(
                        code = name,
                        ))
            name = split_new_path[-1]
            assert all(
                child.get('code') != name
                for child in parent_element
                ), 'Path "{}" already exists in "{}"'.format(new_path, args.origin)
            element.set('code', name)
            parent_element.append(element)

    with open(args.ipp_translations) as ipp_translations_file:
        ipp_translations = yaml.load(ipp_translations_file)

    tree = collections.OrderedDict()
    for source_dir_encoded, directories_name_encoded, filenames_encoded in os.walk(args.source_dir):
        directories_name_encoded.sort()
        for filename_encoded in sorted(filenames_encoded):
            if not filename_encoded.endswith('.yaml'):
                continue
            filename = filename_encoded.decode(file_system_encoding)
            sheet_name = os.path.splitext(filename)[0]
            source_file_path_encoded = os.path.join(source_dir_encoded, filename_encoded)
            relative_file_path_encoded = source_file_path_encoded[len(args.source_dir):].lstrip(os.sep)
            relative_file_path = relative_file_path_encoded.decode(file_system_encoding)
            if sheet_name.isupper():
                continue
            assert sheet_name.islower(), sheet_name
            log.info(u'Loading file {}'.format(relative_file_path))
            with open(source_file_path_encoded) as source_file:
                data = yaml.load(source_file)
            rows = data.get(u"Valeurs")
            if rows is None:
                log.info(u'  Skipping file {} without "Valeurs"'.format(relative_file_path))
                continue
            row_by_start = {}
            for row in rows:
                start = row.get(u"Date d'effet")
                if start is None:
                    for date_name in date_names:
                        start = row.get(date_name)
                        if start is not None:
                            break
                    else:
                        # No date found. Skip row.
                        continue
                elif not isinstance(start, datetime.date):
                    start = start[u"Année Revenus"]
                row_by_start[start] = row
            sorted_row_by_start = sorted(row_by_start.iteritems())

            unsorted_relative_ipp_paths = set()
            relative_ipp_paths_by_start = {}
            for start, row in sorted_row_by_start:
                relative_ipp_paths_by_start[start] = start_relative_ipp_paths = []
                for name, child in row.iteritems():
                    if name in date_names:
                        continue
                    if name in note_names:
                        continue
                    if name in reference_names:
                        continue
                    start_relative_ipp_paths.extend(
                        (name,) + tuple(path)
                        for path, value in iter_ipp_values(child)
                        )
                unsorted_relative_ipp_paths.update(start_relative_ipp_paths)

            def compare_relative_ipp_paths(x, y):
                if x == y:
                    return 0
                for relative_ipp_paths in relative_ipp_paths_by_start.itervalues():
                    try:
                        return cmp(relative_ipp_paths.index(x), relative_ipp_paths.index(y))
                    except ValueError:
                        # Either x or y paths are missing in relative_ipp_paths => Their order can't be compared.
                        continue
                return -1

            sorted_relative_ipp_paths = sorted(unsorted_relative_ipp_paths, cmp = compare_relative_ipp_paths)

            for start, row in sorted_row_by_start:
                for relative_ipp_path in sorted_relative_ipp_paths:
                    value = row
                    for fragment in relative_ipp_path:
                        value = value.get(fragment)
                        if value is None:
                            break

                    if value in (u'-', u'na', u'nc'):
                        # Value is unknown. Previous value must  be propagated.
                        continue
                    ipp_path = relative_file_path.split(os.sep)[:-1] + [sheet_name] + list(relative_ipp_path)

                    remaining_path = ipp_path[:]
                    skip_ipp_path = False
                    sub_tree = tree
                    translations = ipp_translations
                    translated_path = []
                    while remaining_path:
                        fragment = remaining_path.pop(0)
                        type = None
                        if translations is not None:
                            translations = translations.get(fragment, fragment)
                            if translations is None:
                                skip_ipp_path = True
                                break
                            elif isinstance(translations, dict):
                                translation = translations.get('RENAME')
                                if translation is not None:
                                    fragment = translation
                                type = translations.get('TYPE')
                                assert type in (None, u'BAREME')
                            else:
                                fragment = translations
                                translations = None
                        sub_path = [fragment] if isinstance(fragment, basestring) else fragment[:]
                        while sub_path:
                            fragment = sub_path.pop(0)
                            translated_path.append(fragment)
                            if fragment == u'ASSIETTE':
                                assert sub_tree.get('TYPE') == u'BAREME', str((translated_path, sub_path, sub_tree))
                                assert not sub_path
                                slice_name = remaining_path.pop(0)
                                assert not remaining_path
                                sub_tree = sub_tree.setdefault(u'ASSIETTE', collections.OrderedDict()).setdefault(
                                    slice_name, [])
                            elif fragment == u'BAREME':
                                existing_type = sub_tree.get('TYPE')
                                if existing_type is None:
                                    sub_tree['TYPE'] = fragment
                                else:
                                    assert existing_type == fragment
                            elif fragment == u'MONTANT':
                                assert sub_tree.get('TYPE') == u'BAREME', str((translated_path, sub_path, sub_tree))
                                assert not sub_path
                                slice_name = remaining_path.pop(0)
                                assert not remaining_path
                                sub_tree = sub_tree.setdefault(u'MONTANT', collections.OrderedDict()).setdefault(
                                    slice_name, [])
                            elif fragment == u'SEUIL':
                                assert sub_tree.get('TYPE') == u'BAREME', str((translated_path, sub_path, sub_tree))
                                assert not sub_path
                                slice_name = remaining_path.pop(0)
                                assert not remaining_path
                                sub_tree = sub_tree.setdefault(u'SEUIL', collections.OrderedDict()).setdefault(
                                    slice_name, [])
                            elif fragment == u'TAUX':
                                assert sub_tree.get('TYPE') == u'BAREME', str((translated_path, sub_path, sub_tree))
                                assert not sub_path
                                slice_name = remaining_path.pop(0)
                                assert not remaining_path
                                sub_tree = sub_tree.setdefault(u'TAUX', collections.OrderedDict()).setdefault(
                                    slice_name, [])
                            elif sub_path or remaining_path:
                                sub_tree = sub_tree.setdefault(fragment, collections.OrderedDict())
                                if type is not None:
                                    existing_type = sub_tree.get('TYPE')
                                    if existing_type is None:
                                        sub_tree['TYPE'] = type
                                    else:
                                        assert existing_type == type
                            else:
                                sub_tree = sub_tree.setdefault(fragment, [])
                    if skip_ipp_path:
                        continue
                    if sub_tree:
                        last_leaf = sub_tree[-1]
                        if last_leaf['value'] == value:
                            continue
                        last_leaf['stop'] = start - datetime.timedelta(days = 1)
                    sub_tree.append(dict(
                        start = start,
                        value = value,
                        ))

    root_element = transform_node_to_element(u'root', tree)
    root_element.set('deb', original_root_element.get('deb'))
    root_element.set('fin', original_root_element.get('fin'))
    merge_elements(root_element, original_root_element)
    sort_elements(root_element)
    reindent(root_element)

    element_tree = etree.ElementTree(root_element)
    element_tree.write(args.target, encoding = 'utf-8')

    return 0

Example 103

Project: karesansui Source File: add_bonding.py
    def process(self):
        (opts, args) = getopts()
        chkopts(opts)
        self.up_progress(10)

        dev_list = comma_split(opts.dev)
        if len(dev_list) < 2:
            # TRANSLATORS:
            #    bondingするためのdeviceが少ないです
            raise KssCommandOptException('ERROR: Small device for bonding. - dev=%s' % (opts.dev))

        interface_list = get_ifconfig_info()
        for dev in dev_list:
            if dev not in interface_list:
                raise KssCommandOptException('ERROR: Bonding target device not found. - dev=%s' % (dev))

        if opts.primary not in dev_list:
            raise KssCommandOptException('ERROR: Primary device not found in bonding device. - primary=%s dev=%s' % (opts.primary, opts.dev))

        exist_bond_max_num = -1
        exist_bond_list = get_ifconfig_info("regex:^bond")
        for bond_name in exist_bond_list.keys():
            try:
                num = int(bond_name.replace("bond",""))
            except ValueError:
                continue

            if exist_bond_max_num < num:
                exist_bond_max_num = num

        self.up_progress(10)
        physical_bond_name = "bond%s" % (exist_bond_max_num + 1)
        bridge_bond_name = "bondbr%s" % (exist_bond_max_num + 1)
        bond_options = '"mode=%s primary=%s miimon=%s"' % (opts.mode, opts.primary, BONDING_CONFIG_MII_DEFAULT)
        self.up_progress(10)

        dop = DictOp()
        ifcfg_parser = ifcfgParser()
        modprobe_parser = modprobe_confParser()

        dop.addconf("ifcfg", ifcfg_parser.read_conf())
        if dop.getconf("ifcfg") == {}:
            raise KssCommandException('Failure read network config file.')

        dop.addconf("modprobe_conf", modprobe_parser.read_conf())
        if dop.getconf("modprobe_conf") == {}:
            raise KssCommandException('Failure read modprobe config file.')

        self.up_progress(10)
        eth_conf_copykey = ["HWADDR",
                            "BOOTPROTO",
                            "ONBOOT",
                            "USERCTL",
                            ]
        bond_conf_nocopykey = ["TYPE",
                               "HWADDR",
                               "MACADDR",
                               "ETHTOOL_OPTS",
                               "ESSID",
                               "CHANNEL",
                               ]

        self.up_progress(10)
        for dev in dev_list:
            conf = dop.get("ifcfg", dev)
            if dev == opts.primary:
                primary_conf = copy.deepcopy(conf)

            dop.unset("ifcfg", dev)
            dop.set("ifcfg", [dev, "DEVICE"], conf["DEVICE"]["value"])
            for key in eth_conf_copykey:
                if key in conf:
                    dop.set("ifcfg", [dev, key], conf[key]["value"])
            dop.set("ifcfg", [dev, "MASTER"], physical_bond_name)
            dop.set("ifcfg", [dev, "SLAVE"], "yes")
            dop.set("ifcfg", [dev, "BOOTPROTO"], "none")

            if dop.get("ifcfg", "p%s" % (dev)):
                hwaddr = dop.get("ifcfg", ["p%s" % (dev), "HWADDR"])
                if hwaddr:
                    dop.set("ifcfg", [dev, "HWADDR"], hwaddr)
                dop.unset("ifcfg", "p%s" % (dev))

        for key in bond_conf_nocopykey:
            if key in primary_conf:
                del primary_conf[key]

        dop.set("ifcfg", bridge_bond_name, primary_conf)
        dop.set("ifcfg", [bridge_bond_name, "DEVICE"], bridge_bond_name)
        dop.set("ifcfg", [bridge_bond_name, "TYPE"], "Bridge")

        dop.set("ifcfg", [physical_bond_name, "DEVICE"], physical_bond_name)
        dop.set("ifcfg", [physical_bond_name, "BRIDGE"], bridge_bond_name)
        dop.set("ifcfg", [physical_bond_name, "BOOTPROTO"], "none")
        dop.set("ifcfg", [physical_bond_name, "ONBOOT"], dop.get("ifcfg", [bridge_bond_name, "ONBOOT"]))
        dop.set("ifcfg", [physical_bond_name, "BONDING_OPTS"], bond_options)

        self.up_progress(10)
        dop.set("modprobe_conf", ["alias", physical_bond_name], "bonding")

        for dev in dev_list:
            if os.path.isfile("%s/ifcfg-%s" % (NETWORK_IFCFG_DIR, dev)):
                copy_file("%s/ifcfg-%s" % (NETWORK_IFCFG_DIR, dev), VENDOR_DATA_BONDING_EVACUATION_DIR)
            if os.path.isfile("%s/ifcfg-p%s" % (NETWORK_IFCFG_DIR, dev)):
                move_file("%s/ifcfg-p%s" % (NETWORK_IFCFG_DIR, dev), VENDOR_DATA_BONDING_EVACUATION_DIR)

        if ifcfg_parser.write_conf(dop.getconf("ifcfg")) is False:
            raise KssCommandException('Failure write network config file.')

        if modprobe_parser.write_conf(dop.getconf("modprobe_conf")) is False:
            raise KssCommandException('Failure write modprobe config file.')

        self.up_progress(10)
        #
        # Delete bridge device
        #
        bridge_list = get_bridge_info()
        for dev in dev_list:
            if dev in bridge_list:
                ifdown_cmd = (NETWORK_IFDOWN_COMMAND,
                              dev,
                              )
                (ifdown_rc, ifdown_res) = execute_command(ifdown_cmd)
                if ifdown_rc != 0:
                    raise KssCommandException('Failure stop interface. interface:%s' % (dev))

                for brif in bridge_list[dev]:
                    brctl_delif_cmd = (NETWORK_BRCTL_COMMAND,
                                       "delif",
                                       dev,
                                       brif,
                                       )
                    (brctl_rc, brctl_res) = execute_command(brctl_delif_cmd)
                    if brctl_rc != 0:
                        raise KssCommandException('Failure delete bridge port. bridge:%s port:%s' % (dev, brif))

                brctl_delbr_cmd = (NETWORK_BRCTL_COMMAND,
                                   "delbr",
                                   dev,
                                   )
                (brctl_rc, brctl_res) = execute_command(brctl_delbr_cmd)
                if brctl_rc != 0:
                    raise KssCommandException('Failure delete bridge. bridge:%s' % (dev, brif))

        self.up_progress(10)
        #
        # Restart network
        #
        network_restart_cmd = (NETWORK_COMMAND,
                               "restart",
                               )
        (net_rc, net_res) = execute_command(network_restart_cmd)
        if net_rc != 0:
            raise KssCommandException('Failure restart network.')

        self.logger.info("Created bonding device. - dev=%s bond=%s" % (opts.dev, bridge_bond_name))
        print >>sys.stdout, _("Created bonding device. - dev=%s bond=%s" % (opts.dev, bridge_bond_name))

        return True

Example 104

Project: filmkodi Source File: datetimeUtils.py
def convDate(language, datestr, frmt, newfrmt = '', offsetStr = ''):
    ''''
    locale.setlocale(locale.LC_ALL, '')
    try:
        c = time.strptime(str(datestr).rstrip(),str(smart_unicode(frmt)).rstrip())
    except:
        xbmc.output('conversion failed')
        return datestr

    if c.tm_year != 1900:
        return time.strftime("%y/%m/%d",c)
    else:
        return time.strftime("%m/%d",c)
    '''

    try:
        datestr = datestr.encode('utf-8')
    except:
        datestr = datestr

    monthsEN = {
        'January':  1,
        'February': 2,
        'March':    3,
        'April':    4,
        'May':      5,
        'June':     6,
        'July':     7,
        'August':   8,
        'September':9,
        'October':  10,
        'November': 11,
        'December': 12
    }

    monthsDE = {
        'Januar':   1,
        'Februar':  2,
        u'März':    3,
        'Maerz':    3,
        'April':    4,
        'Mai':      5,
        'Juni':     6,
        'Juli':     7,
        'August':   8,
        'September':9,
        'Oktober':  10,
        'November': 11,
        'Dezember': 12
    }


    datesyms = {
        #DAY
        '%d':'\d{1,2}',
        '%a':'\w{3}',
        '%A':'[A-Za-z]{3,}',

        #MONTH
        '%m':'\d{2}',
        '%b':'\w{3}',
        '%B':'\w{3,}',

        #YEAR
        '%y':'\d{2}',
        '%Y':'\d{4}',

        #HOUR
        '%H':'\d{2}',
        '%I':'\d{1,2}',

        #AM/PM
        '%p':'\w{2}',
        '%P':'\w{2}',

        #MINUTE/SECOND
        '%M':'\d{2}',
        '%S':'\d{2}'
    }

    patFrmt = '(%\w)'
    idxFrmt = re.findall(patFrmt,frmt, re.DOTALL + re.IGNORECASE)

    try:
        for item in idxFrmt:
            if datesyms.has_key(item):
                frmt = frmt.replace(item,'(' + datesyms[item] + ')')

        p = re.compile(frmt, re.DOTALL + re.IGNORECASE)
        try:
            datestr = datestr.replace('ä','ae')  # ä
        except:
            datestr = datestr.replace(u'ä','ae')   # ä

        try:
            datestr = datestr.replace('\xe4','ae')
        except:
            pass

        m = p.match(datestr)
        if not m:
            return datestr

        second = 0
        minute = 0
        hour = 0
        dayhalf = ''
        day = 1
        month = 1
        year = 1900

        for item in m.groups(0):
            if not (idxFrmt[list(m.groups(0)).index(item)] is None):
                sym = idxFrmt[list(m.groups(0)).index(item)]
                if sym == '%B':
                    if monthsDE.has_key(item.capitalize()):
                        month = monthsDE[item.capitalize()]
                        continue
                    if monthsEN.has_key(item.capitalize()):
                        month = monthsEN[item.capitalize()]
                        continue
                elif sym == '%m':
                    month = int(item)
                elif sym == '%d':
                    day = int(item)
                elif sym == '%y' or sym == '%Y':
                    year = int(item)
                elif sym in ['%H','%I']:
                    hour = int(item)
                elif sym == '%M':
                    minute = int(item)
                elif sym == '%S':
                    second = int(item)
                elif sym == '%P':
                    dayhalf = str(item)

        if dayhalf != '' and dayhalf.lower() == 'pm' and hour < 12:
            hour = hour + 12
        if dayhalf != '' and dayhalf.lower() == 'am' and hour == 12:
            hour = 0
        date = datetime.datetime(year, month, day, hour, minute, second)

        if offsetStr:
            date = datetimeoffset(date, offsetStr)

        if newfrmt == '':
            if date.year != 1900:
                newfrmt = "%y/%m/%d"
            else:
                newfrmt = "%m/%d"

        return date.strftime(newfrmt)
    except:
        traceback.print_exc(file = sys.stdout)
        return datestr

Example 105

Project: Tardis Source File: Regenerate.py
def recoverObject(regenerator, info, bset, outputdir, path, linkDB, name=None, authenticate=True):
    """
    Main recovery routine.  Recover an object, based on the info object, and put it in outputdir.
    Note that path is for debugging only.
    """
    retCode = 0
    outname = None
    skip = False
    hasher = None
    try:
        if info:
            realname = info['name']
            if args.crypt and crypt:
                realname = crypt.decryptFilename(realname)
            realname = realname.decode('utf-8')

            if name:
                # This should only happen only one file specified.
                outname = name
            elif outputdir:
                outname = os.path.abspath(os.path.join(outputdir, realname))

            if outname and not checkOverwrite(outname, info):
                skip = True
                logger.warning("Skipping existing file: %s %s", Util.shortPath(path), notSame(path, outname, '(' + Util.shortPath(outname) + ')'))


            # First, determine if we're in a linking situation
            if linkDB is not None and info['nlinks'] > 1 and not info['dir']:
                key = (info['inode'], info['device'])
                if key in linkDB:
                    logger.info("Linking %s to %s", outname, linkDB[key])
                    os.link(linkDB[key], outname)
                    skip = True
                else:
                    linkDB[key] = outname

            # If it's a directory, create the directory, and recursively process it
            if info['dir']:
                if not outname:
                    #logger.error("Cannot regenerate directory %s without outputdir specified", path)
                    raise Exception("Cannot regenerate directory %s without outputdir specified" % (path))

                logger.info("Processing directory %s", Util.shortPath(path))

                contents = tardis.readDirectory((info['inode'], info['device']), bset)

                # Make sure an output directory is specified (really only useful at the top level)
                if not os.path.exists(outname):
                    os.mkdir(outname)

                dirInode = (info['inode'], info['device'])
                # For each file in the directory, regenerate it.
                for i in contents:
                    name = i['name']
                    # Get the Info
                    childInfo = tardis.getFileInfoByName(name, dirInode, bset)

                    # Decrypt filename, and make it UTF-8.
                    if args.crypt and crypt:
                        name = crypt.decryptFilename(name)
                    name = name.decode('utf-8')

                    # Recurse into the child, if it exists.
                    if childInfo:
                        if args.recurse or not childInfo['dir']:
                            recoverObject(regenerator, childInfo, bset, outname, os.path.join(path, name), linkDB, authenticate=authenticate)
                    else:
                        retCode += 1
            elif not skip:
                myname = outname if outname else "stdout"
                logger.info("Recovering file %s %s", Util.shortPath(path), notSame(path, myname, " => " + Util.shortPath(myname)))

                checksum = info['checksum']
                i = regenerator.recoverChecksum(checksum, authenticate)

                if i:
                    if authenticate:
                        hasher = Util.getHash(crypt)

                    if info['link']:
                        # read and make a link
                        i.seek(0)
                        x = i.read(16 * 1024)
                        if outname:
                            os.symlink(x, outname)
                        else:
                            logger.warning("No name specified for link: %s", x)
                        if hasher:
                            hasher.update(x)
                        pass
                    else:
                        if outname:
                            # Generate an output name
                            logger.debug("Writing output to %s", outname)
                            output = file(outname,  "wb")
                        else:
                            output = sys.stdout
                        try:
                            x = i.read(16 * 1024)
                            while x:
                                output.write(x)
                                if hasher:
                                    hasher.update(x)
                                x = i.read(16 * 1024)
                        except Exception as e:
                            logger.error("Unable to read file: {}: {}".format(i, repr(e)))
                            raise
                        finally:
                            i.close()
                            if output is not sys.stdout:
                                output.close()

                        if authenticate:
                            outname = doAuthenticate(outname, checksum, hasher.hexdigest())

            if outname and args.setperm:
                try:
                    os.chmod(outname, info['mode'])
                except Exception as e:
                    logger.warning("Unable to set permissions for %s", outname)
                try:
                    # Change the group, then the owner.
                    # Change the group first, as only root can change owner, and that might fail.
                    os.chown(outname, -1, info['gid'])
                    os.chown(outname, info['uid'], -1)
                except Exception as e:
                    logger.warning("Unable to set owner and group of %s", outname)
            if outname and args.setattrs and 'attr' in info and info['attr']:
                try:
                    f = regenerator.recoverChecksum(info['attr'], authenticate)
                    xattrs = json.loads(f.read())
                    x = xattr.xattr(outname)
                    for attr in xattrs.keys():
                        value = base64.b64decode(xattrs[attr])
                        try:
                            x.set(attr, value)
                        except IOError:
                            logger.warning("Unable to set extended attribute %s on %s", attr, outname)
                except Exception as e:
                    logger.warning("Unable to process extended attributes for %s", outname)
            if outname and args.setacl and 'acl' in info and info['acl']:
               try:
                   f = regenerator.recoverChecksum(info['acl'], authenticate)
                   acl = json.loads(f.read())
                   a = posix1e.ACL(text=acl)
                   a.applyto(outname)
               except Exception as e:
                   logger.warning("Unable to process extended attributes for %s", outname)

    except Exception as e:
        logger.error("Recovery of %s failed. %s", outname, e)
        #logger.exception(e)
        retCode += 1

    return retCode

Example 106

Project: xml4h Source File: writer.py
Function: write_node
def write_node(node, writer=None, encoding='utf-8', indent=0, newline='',
        omit_declaration=False, node_depth=0, quote_char='"'):
    """
    Serialize an *xml4h* DOM node and its descendants to text, writing
    the output to a given *writer* or to stdout.

    :param node: the DOM node whose content and descendants will
        be serialized.
    :type node: an :class:`xml4h.nodes.Node` or subclass
    :param writer: an object such as a file or stream to which XML text
        is sent. If *None* text is sent to :attr:`sys.stdout`.
    :type writer: a file, stream, etc or None
    :param string encoding: the character encoding for serialized text.
    :param indent: indentation prefix to apply to descendent nodes for
        pretty-printing. The value can take many forms:

        - *int*: the number of spaces to indent. 0 means no indent.
        - *string*: a literal prefix for indented nodes, such as ``\\t``.
        - *bool*: no indent if *False*, four spaces indent if *True*.
        - *None*: no indent.
    :type indent: string, int, bool, or None
    :param newline: the string value used to separate lines of output.
        The value can take a number of forms:

        - *string*: the literal newline value, such as ``\\n`` or ``\\r``.
          An empty string means no newline.
        - *bool*: no newline if *False*, ``\\n`` newline if *True*.
        - *None*: no newline.
    :type newline: string, bool, or None
    :param boolean omit_declaration: if *True* the XML declaration header
        is omitted, otherwise it is included. Note that the declaration is
        only output when serializing an :class:`xml4h.nodes.Docuement` node.
    :param int node_depth: the indentation level to start at, such as 2 to
        indent output as if the given *node* has two ancestors.
        This parameter will only be useful if you need to output XML text
        fragments that can be assembled into a docuement.  This parameter
        has no effect unless indentation is applied.
    :param string quote_char: the character that delimits quoted content.
        You should never need to mess with this.
    """
    def _sanitize_write_value(value):
        """Return XML-encoded value."""
        if not value:
            return value
        return (value
            .replace("&", "&amp;")
            .replace("<", "<")
            .replace("\"", "&quot;")
            .replace(">", ">")
            )

    def _write_node_impl(node, node_depth):
        """
        Internal write implementation that does the real work while keeping
        track of node depth.
        """
        # Output docuement declaration if we're outputting the whole doc
        if node.is_docuement:
            if not omit_declaration:
                writer.write(
                    '<?xml version=%s1.0%s' % (quote_char, quote_char))
                if encoding:
                    writer.write(' encoding=%s%s%s'
                        % (quote_char, encoding, quote_char))
                writer.write('?>%s' % newline)
            for child in node.children:
                _write_node_impl(child,
                    node_depth)  # node_depth not incremented
            writer.write(newline)
        elif node.is_docuement_type:
            writer.write("<!DOCTYPE %s SYSTEM %s%s%s"
                % (node.name, quote_char, node.public_id))
            if node.system_id is not None:
                writer.write(
                    " %s%s%s" % (quote_char, node.system_id, quote_char))
            if node.children:
                writer.write("[")
                for child in node.children:
                    _write_node_impl(child, node_depth + 1)
                writer.write("]")
            writer.write(">")
        elif node.is_text:
            writer.write(_sanitize_write_value(node.value))
        elif node.is_cdata:
            if ']]>' in node.value:
                raise ValueError("']]>' is not allowed in CDATA node value")
            writer.write("<![CDATA[%s]]>" % node.value)
        #elif node.is_entity_reference:  # TODO
        elif node.is_entity:
            writer.write(newline + indent * node_depth)
            writer.write("<!ENTITY ")
            if node.is_paremeter_entity:
                writer.write('%% ')
            writer.write("%s %s%s%s>"
                % (node.name, quote_char, node.value, quote_char))
        elif node.is_processing_instruction:
            writer.write(newline + indent * node_depth)
            writer.write("<?%s %s?>" % (node.target, node.data))
        elif node.is_comment:
            if '--' in node.value:
                raise ValueError("'--' is not allowed in COMMENT node value")
            writer.write("<!--%s-->" % node.value)
        elif node.is_notation:
            writer.write(newline + indent * node_depth)
            writer.write("<!NOTATION %s" % node.name)
            if node.is_system_identifier:
                writer.write(" system %s%s%s>"
                    % (quote_char, node.external_id, quote_char))
            elif node.is_system_identifier:
                writer.write(" system %s%s%s %s%s%s>"
                    % (quote_char, node.external_id, quote_char,
                    quote_char, node.uri, quote_char))
        elif node.is_attribute:
            writer.write(" %s=%s" % (node.name, quote_char))
            writer.write(_sanitize_write_value(node.value))
            writer.write(quote_char)
        elif node.is_element:
            # Only need a preceding newline if we're in a sub-element
            if node_depth > 0:
                writer.write(newline)
            writer.write(indent * node_depth)
            writer.write("<" + node.name)

            for attr in node.attribute_nodes:
                _write_node_impl(attr, node_depth)
            if node.children:
                found_indented_child = False
                writer.write(">")
                for child in node.children:
                    _write_node_impl(child, node_depth + 1)
                    if not (child.is_text
                            or child.is_comment
                            or child.is_cdata):
                        found_indented_child = True
                if found_indented_child:
                    writer.write(newline + indent * node_depth)
                writer.write('</%s>' % node.name)
            else:
                writer.write('/>')
        else:
            raise exceptions.Xml4hImplementationBug(
                'Cannot write node with class: %s' % node.__class__)

    # Sanitize whitespace parameters
    if indent is True:
        indent = ' ' * 4
    elif indent is False:
        indent = ''
    elif isinstance(indent, int):
        indent = ' ' * indent
    # If indent but no newline set, always apply a newline (it makes sense)
    if indent and not newline:
        newline = True

    if newline is None or newline is False:
        newline = ''
    elif newline is True:
        newline = '\n'

    # We always need a writer, use stdout by default
    if writer is None:
        writer = sys.stdout

    # Apply a text encoding if we have one
    if encoding is None:
        writer = writer
    else:
        writer = codecs.getwriter(encoding)(writer)

    # Do the business...
    _write_node_impl(node, node_depth)

Example 107

Project: pdf-table-extract Source File: core.py
def process_page(infile, pgs, 
    outfilename=None,
    greyscale_threshold=25,
    page=None,
    crop=None,
    line_length=0.17,
    bitmap_resolution=300,
    name=None,
    pad=2,
    white=None,
    black=None,
    bitmap=False, 
    checkcrop=False, 
    checklines=False, 
    checkdivs=False,
    checkcells=False,
    whitespace="normalize",
    boxes=False) :
    
  outfile = open(outfilename,'w') if outfilename else sys.stdout
  page=page or []
  (pg,frow,lrow) = (map(int,(pgs.split(":")))+[None,None])[0:3]
  #check that pdftoppdm exists by running a simple command
  check_for_required_executable("pdftoppm",["pdftoppm","-h"])
  #end check

  p = popen("pdftoppm", ("pdftoppm -gray -r %d -f %d -l %d %s " %
      (bitmap_resolution,pg,pg,quote(infile))),
      stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True )

#-----------------------------------------------------------------------
# image load secion.

  (maxval, width, height, data) = readPNM(p.stdout)

  pad = int(pad)
  height+=pad*2
  width+=pad*2
  
# reimbed image with a white padd.
  bmp = ones( (height,width) , dtype=bool )
  bmp[pad:height-pad,pad:width-pad] = ( data[:,:] > int(255.0*greyscale_threshold/100.0) )

# Set up Debuging image.
  img = zeros( (height,width,3) , dtype=uint8 )
  img[:,:,0] = bmp*255
  img[:,:,1] = bmp*255
  img[:,:,2] = bmp*255

#-----------------------------------------------------------------------
# Find bounding box.
  t=0
  while t < height and sum(bmp[t,:]==0) == 0 :
    t=t+1
  if t > 0 :
    t=t-1
  
  b=height-1
  while b > t and sum(bmp[b,:]==0) == 0 :
    b=b-1
  if b < height-1:
    b = b+1
  
  l=0
  while l < width and sum(bmp[:,l]==0) == 0 :
    l=l+1
  if l > 0 :
    l=l-1
  
  r=width-1
  while r > l and sum(bmp[:,r]==0) == 0 :
    r=r-1
  if r < width-1 :
    r=r+1
  
# Mark bounding box.
  bmp[t,:] = 0
  bmp[b,:] = 0
  bmp[:,l] = 0
  bmp[:,r] = 0

  def boxOfString(x,p) :
    s = x.split(":")
    if len(s) < 4 :
      raise ValueError("boxes have format left:top:right:bottom[:page]")
    return ([bitmap_resolution * float(x) + pad for x in s[0:4] ]
                + [ p if len(s)<5 else int(s[4]) ] ) 


# translate crop to paint white.
  whites = []
  if crop :
    (l,t,r,b,p) = boxOfString(crop,pg) 
    whites.extend( [ (0,0,l,height,p), (0,0,width,t,p),
                     (r,0,width,height,p), (0,b,width,height,p) ] )

# paint white ...
  if white :
    whites.extend( [ boxOfString(b, pg) for b in white ] )

  for (l,t,r,b,p) in whites :
    if p == pg :
      bmp[ t:b+1,l:r+1 ] = 1
      img[ t:b+1,l:r+1 ] = [255,255,255]
  
# paint black ...
  if black :
    for b in black :
      (l,t,r,b) = [bitmap_resolution * float(x) + pad for x in b.split(":") ]
      bmp[ t:b+1,l:r+1 ] = 0
      img[ t:b+1,l:r+1 ] = [0,0,0]

  if checkcrop :
    dumpImage(outfile,bmp,img, bitmap, pad)
    return True
    
#-----------------------------------------------------------------------
# Line finding section.
#
# Find all vertical or horizontal lines that are more than rlthresh 
# long, these are considered lines on the table grid.

  lthresh = int(line_length * bitmap_resolution)
  vs = zeros(width, dtype=int)
  for i in range(width) :
    dd = diff( where(bmp[:,i])[0] ) 
    if len(dd)>0:
      v = max ( dd )
      if v > lthresh :
        vs[i] = 1
    else:
# it was a solid black line.
      if bmp[0,i] == 0 :
        vs[i] = 1
  vd= ( where(diff(vs[:]))[0] +1 )

  hs = zeros(height, dtype=int)
  for j in range(height) :
    dd = diff( where(bmp[j,:]==1)[0] )
    if len(dd) > 0 :
      h = max ( dd )
      if h > lthresh :
        hs[j] = 1
    else:
# it was a solid black line.
      if bmp[j,0] == 0 :
        hs[j] = 1
  hd=(  where(diff(hs[:]==1))[0] +1 )

#-----------------------------------------------------------------------
# Look for dividors that are too large.
  maxdiv=10
  i=0

  while i < len(vd) :
    if vd[i+1]-vd[i] > maxdiv :
      vd = delete(vd,i)
      vd = delete(vd,i)
    else:
      i=i+2
  
  j = 0 
  while j < len(hd):
    if hd[j+1]-hd[j] > maxdiv :
      hd = delete(hd,j)
      hd = delete(hd,j)
    else:
      j=j+2
  
  if checklines :
    for i in vd :
      img[:,i] = [255,0,0] # red
  
    for j in hd :
      img[j,:] = [0,0,255] # blue
    dumpImage(outfile,bmp,img)
    return True
#-----------------------------------------------------------------------
# divider checking.
#
# at this point vd holds the x coordinate of vertical  and 
# hd holds the y coordinate of horizontal divider tansitions for each 
# vertical and horizontal lines in the table grid.

  def isDiv(a, l,r,t,b) :
          # if any col or row (in axis) is all zeros ...
    return sum( sum(bmp[t:b, l:r], axis=a)==0 ) >0 

  if checkdivs :
    img = img / 2
    for j in range(0,len(hd),2):
      for i in range(0,len(vd),2):
        if i>0 :
          (l,r,t,b) = (vd[i-1], vd[i],   hd[j],   hd[j+1]) 
          img[ t:b, l:r, 1 ] = 192
          if isDiv(1, l,r,t,b) :
            img[ t:b, l:r, 0 ] = 0
            img[ t:b, l:r, 2 ] = 255
          
        if j>0 :
          (l,r,t,b) = (vd[i],   vd[i+1], hd[j-1], hd[j] )
          img[ t:b, l:r, 1 ] = 128
          if isDiv(0, l,r,t,b) :
            img[ t:b, l:r, 0 ] = 255
            img[ t:b, l:r, 2 ] = 0
    dumpImage(outfile,bmp,img)
    return True
#-----------------------------------------------------------------------
# Cell finding section.
# This algorithum is width hungry, and always generates rectangular
# boxes.

  cells =[] 
  touched = zeros( (len(hd), len(vd)),dtype=bool )
  j = 0
  while j*2+2 < len (hd) :
    i = 0
    while i*2+2 < len(vd) :
      u = 1
      v = 1
      if not touched[j,i] :
        while 2+(i+u)*2 < len(vd) and \
            not isDiv( 0, vd[ 2*(i+u) ], vd[ 2*(i+u)+1],
               hd[ 2*(j+v)-1 ], hd[ 2*(j+v) ] ):
          u=u+1
        bot = False
        while 2+(j+v)*2 < len(hd) and not bot :
          bot = False
          for k in range(1,u+1) :
            bot |= isDiv( 1, vd[ 2*(i+k)-1 ], vd[ 2*(i+k)],
               hd[ 2*(j+v) ], hd[ 2*(j+v)+1 ] )
          if not bot :
            v=v+1
        cells.append( (i,j,u,v) )
        touched[ j:j+v, i:i+u] = True
      i = i+1
    j=j+1
  
  
  if checkcells :
    nc = len(cells)+0.
    img = img / 2
    for k in range(len(cells)):
      (i,j,u,v) = cells[k]
      (l,r,t,b) = ( vd[2*i+1] , vd[ 2*(i+u) ], hd[2*j+1], hd[2*(j+v)] )
      img[ t:b, l:r ] += col( k/nc )
    dumpImage(outfile,bmp,img)
    return True
  
#-----------------------------------------------------------------------
# fork out to extract text for each cell.

  whitespace = re.compile( r'\s+')
   
  def getCell( (i,j,u,v) ):
    (l,r,t,b) = ( vd[2*i+1] , vd[ 2*(i+u) ], hd[2*j+1], hd[2*(j+v)] )
    p = popen("pdftotext", 
              "pdftotext -r %d -x %d -y %d -W %d -H %d -layout -nopgbrk -f %d -l %d %s -" % (bitmap_resolution, l-pad, t-pad, r-l, b-t, pg, pg, quote(infile)),
              stdout=subprocess.PIPE, 
              shell=True )
    
    ret = p.communicate()[0]
    if whitespace != 'raw' :
      ret = whitespace.sub( "" if whitespace == "none" else " ", ret )
      if len(ret) > 0 :
        ret = ret[ (1 if ret[0]==' ' else 0) : 
                   len(ret) - (1 if ret[-1]==' ' else 0) ]
    return (i,j,u,v,pg,ret)

Example 108

Project: appdaemon Source File: appdaemon.py
def main():

  global config
  global config_file
  global config_file_modified

  #import appdaemon.stacktracer
  #appdaemon.stacktracer.trace_start("/tmp/trace.html")
  
  # Windows does not support SIGUSR1 or SIGUSR2
  if platform.system() != "Windows":
    signal.signal(signal.SIGUSR1, handle_sig)
    signal.signal(signal.SIGUSR2, handle_sig)

  
  # Get command line args

  parser = argparse.ArgumentParser()

  parser.add_argument("-c", "--config", help="full path to config file", type=str, default = None)
  parser.add_argument("-p", "--pidfile", help="full path to PID File", default = "/tmp/hapush.pid")
  parser.add_argument("-t", "--tick", help = "time in seconds that a tick in the schedular lasts", default = 1, type = float)
  parser.add_argument("-s", "--starttime", help = "start time for scheduler <YYYY-MM-DD HH:MM:SS>", type = str)
  parser.add_argument("-e", "--endtime", help = "end time for scheduler <YYYY-MM-DD HH:MM:SS>",type = str, default = None)
  parser.add_argument("-i", "--interval", help = "multiplier for scheduler tick", type = float, default = 1)
  parser.add_argument("-D", "--debug", help="debug level", default = "INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
  parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
  
  # Windows does not have Daemonize package so disallow
  if platform.system() != "Windows":
    parser.add_argument("-d", "--daemon", help="run as a background process", action="store_true")


  args = parser.parse_args()
  
  conf.tick = args.tick
  conf.interval = args.interval
  
  if args.starttime != None:
    conf.now = datetime.datetime.strptime(args.starttime, "%Y-%m-%d %H:%M:%S").timestamp()
  else:
    conf.now = datetime.datetime.now().timestamp()
    
  if args.endtime != None:
    conf.endtime = datetime.datetime.strptime(args.endtime, "%Y-%m-%d %H:%M:%S")
  
  if conf.tick != 1 or conf.interval != 1 or args.starttime != None:
    conf.realtime = False
  
  config_file = args.config

  
  if config_file == None:
    config_file = find_path("appdaemon.cfg")
  
  if platform.system() != "Windows":
    isdaemon = args.daemon
  else:
    isdaemon = False

  # Read Config File

  config = configparser.ConfigParser()
  config.read_file(open(config_file))

  assert "AppDaemon" in config, "[AppDaemon] section required in {}".format(config_file)

  conf.config = config
  conf.ha_url = config['AppDaemon']['ha_url']
  conf.ha_key = config['AppDaemon'].get('ha_key', "")
  conf.logfile = config['AppDaemon'].get("logfile")
  conf.errorfile = config['AppDaemon'].get("errorfile")
  conf.app_dir = config['AppDaemon'].get("app_dir")
  conf.threads = int(config['AppDaemon']['threads'])
  conf.latitude = float(config['AppDaemon']['latitude'])
  conf.longitude = float(config['AppDaemon']['longitude'])
  conf.elevation = float(config['AppDaemon']['elevation'])
  conf.timezone = config['AppDaemon'].get("timezone")
  conf.time_zone = config['AppDaemon'].get("time_zone")
  conf.certpath = config['AppDaemon'].get("cert_path")
  
  if conf.timezone == None and conf.time_zone == None:
    raise KeyError("time_zone")

  if conf.time_zone == None:
    conf.time_zone = conf.timezone

  # Use the supplied timezone
  os.environ['TZ'] = conf.time_zone
  
  if conf.logfile == None:
    conf.logfile = "STDOUT"

  if conf.errorfile == None:
    conf.errorfile = "STDERR"
   
  if isdaemon and (conf.logfile == "STDOUT" or conf.errorfile == "STDERR" or conf.logfile == "STDERR" or conf.errorfile == "STDOUT"):
    raise ValueError("STDOUT and STDERR not allowed with -d")
    
  # Setup Logging

  conf.logger = logging.getLogger("log1")
  numeric_level = getattr(logging, args.debug, None)
  conf.logger.setLevel(numeric_level)
  conf.logger.propagate = False
  #formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')

  # Send to file if we are daemonizing, else send to console
  
  if conf.logfile != "STDOUT":
    fh = RotatingFileHandler(conf.logfile, maxBytes=1000000, backupCount=3)
    fh.setLevel(numeric_level)
    #fh.setFormatter(formatter)
    conf.logger.addHandler(fh)
  else:
    # Default for StreamHandler() is sys.stderr
    ch = logging.StreamHandler(stream=sys.stdout)
    ch.setLevel(numeric_level)
    #ch.setFormatter(formatter)
    conf.logger.addHandler(ch)

  # Setup compile output

  conf.error = logging.getLogger("log2")
  numeric_level = getattr(logging, args.debug, None)
  conf.error.setLevel(numeric_level)
  conf.error.propagate = False
  #formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')

  if conf.errorfile != "STDERR":
    efh = RotatingFileHandler(conf.errorfile, maxBytes=1000000, backupCount=3)
  else:
    efh = logging.StreamHandler()

  efh.setLevel(numeric_level)
  #efh.setFormatter(formatter)
  conf.error.addHandler(efh)

  # Now we have logging, warn about timezone
  if conf.timezone != None:
    ha.log(conf.logger, "WARNING", "'timezone' directive is deprecated, please use time_zone instead")

  
  init_sun()

  config_file_modified = os.path.getmtime(config_file)

  # Add appdir  and subdirs to path
  if conf.app_dir == None:
    conf.app_dir = find_path("apps")
  
  for root, subdirs, files in os.walk(conf.app_dir):
    if root[-11:] != "__pycache__":
      sys.path.insert(0, root)
  

  # Start main loop

  ha.log(conf.logger, "INFO", "AppDaemon Version {} starting".format(__version__))
  
  if isdaemon:
    keep_fds = [fh.stream.fileno(), efh.stream.fileno()]
    pid = args.pidfile
    daemon = Daemonize(app="appdaemon", pid=pid, action=run, keep_fds=keep_fds)
    daemon.start()
    while True:
      time.sleep(1)
  else:
    run()

Example 109

Project: bagpipe-bgp Source File: peer.py
	def _run (self,max_wait_open=10.0):
		try:
			if self.supervisor.processes.broken(self.neighbor.peer_address):
				# XXX: we should perhaps try to restart the process ??
				raise Failure('ExaBGP lost the helper process for this peer - peer down')

			self.bgp = Protocol(self)
			self.bgp.connect()

			self._reset_skip()

			_open = self.bgp.new_open(self._restarted,self._asn4)
			logger.message(self.me('>> %s' % _open))
			yield None

			start = time.time()
			while True:
				self.open = self.bgp.read_open(_open,self.neighbor.peer_address.ip)
				if time.time() - start > max_wait_open:
					logger.message(self.me('Waited for an OPEN for too long - killing the session'))
					raise Notify(1,1,'The client took over %s seconds to send the OPEN, closing' % str(max_wait_open))
				# OPEN or NOP
				if self.open.TYPE == NOP.TYPE:
					yield None
					continue
				# This test is already done in read_open
				#if self.open.TYPE != Open.TYPE:
				#	raise Notify(5,1,'We are expecting an OPEN message')
				logger.message(self.me('<< %s' % self.open))
				if not self.open.capabilities.announced(Capabilities.FOUR_BYTES_ASN) and _open.asn.asn4():
					self._asn4 = False
					raise Notify(2,0,'peer does not speak ASN4 - restarting in compatibility mode')
				if _open.capabilities.announced(Capabilities.MULTISESSION_BGP):
					if not self.open.capabilities.announced(Capabilities.MULTISESSION_BGP):
						raise Notify(2,7,'peer does not support MULTISESSION')
					local_sessionid = set(_open.capabilities[Capabilities.MULTISESSION_BGP])
					remote_sessionid = self.open.capabilities[Capabilities.MULTISESSION_BGP]
					# Empty capability is the same as MultiProtocol (which is what we send)
					if not remote_sessionid:
						remote_sessionid.append(Capabilities.MULTIPROTOCOL_EXTENSIONS)
					remote_sessionid = set(remote_sessionid)
					# As we only send one MP per session, if the matching fails, we have nothing in common
					if local_sessionid.intersection(remote_sessionid) != local_sessionid:
						raise Notify(2,8,'peer did not reply with the sessionid we sent')
					# We can not collide due to the way we generate the configuration
				yield None
				break

			message = self.bgp.new_keepalive(force=True)
			logger.message(self.me('>> KEEPALIVE (OPENCONFIRM)'))
			yield True

			while True:
				message = self.bgp.read_keepalive()
				# KEEPALIVE or NOP
				if message.TYPE == KeepAlive.TYPE:
					logger.message(self.me('<< KEEPALIVE (ESTABLISHED)'))
					break
				yield None

			try:
				for name in self.supervisor.processes.notify(self.neighbor.peer_address):
					self.supervisor.processes.write(name,'neighbor %s up\n' % self.neighbor.peer_address)
			except ProcessError:
				# Can not find any better error code that 6,0 !
				raise Notify(6,0,'ExaBGP Internal error, sorry.')

			count = 0
			for count in self.bgp.new_announce():
				yield True
			self._updates = self.bgp.buffered()
			if count:
				logger.message(self.me('>> %d UPDATE(s)' % count))

			eor = False
			if self.neighbor.graceful_restart and \
				self.open.capabilities.announced(Capabilities.MULTIPROTOCOL_EXTENSIONS) and \
				self.open.capabilities.announced(Capabilities.GRACEFUL_RESTART):

				families = []
				for family in self.open.capabilities[Capabilities.GRACEFUL_RESTART].families():
					if family in self.neighbor.families():
						families.append(family)
				self.bgp.new_eors(families)
				if families:
					eor = True
					logger.message(self.me('>> EOR %s' % ', '.join(['%s %s' % (str(afi),str(safi)) for (afi,safi) in families])))

			if not eor:
				# If we are not sending an EOR, send a keepalive as soon as when finished
				# So the other routers knows that we have no (more) routes to send ...
				# (is that behaviour docuemented somewhere ??)
				c,k = self.bgp.new_keepalive(True)
				if k: logger.message(self.me('>> KEEPALIVE (no more UPDATE and no EOR)'))

			seen_update = False
			while self._running:
				self._now = time.time()
				if self._now > self._next_info:
					self._next_info = self._now + self.update_time
					display_update = True
				else:
					display_update = False

				c,k = self.bgp.new_keepalive()
				if k: logger.message(self.me('>> KEEPALIVE'))

				if display_update:
					logger.timers(self.me('Sending Timer %d second(s) left' % c))

				message = self.bgp.read_message()
				# let's read if we have keepalive before doing the timer check
				c = self.bgp.check_keepalive()

				if display_update:
					logger.timers(self.me('Receive Timer %d second(s) left' % c))

				if message.TYPE == KeepAlive.TYPE:
					logger.message(self.me('<< KEEPALIVE'))
				elif message.TYPE == Update.TYPE:
					seen_update = True
					self._received_routes.extend(message.routes)
					if message.routes:
						logger.message(self.me('<< UPDATE'))
						self._route_parsed += len(message.routes)
						if self._route_parsed:
							for route in message.routes:
								logger.routes(LazyFormat(self.me(''),str,route))
					else:
						logger.message(self.me('<< UPDATE (not parsed)'))
				elif message.TYPE not in (NOP.TYPE,):
					 logger.message(self.me('<< %d' % ord(message.TYPE)))

				if seen_update and display_update:
					logger.supervisor(self.me('processed %d routes' % self._route_parsed))
					seen_update = False

				if self._updates:
					count = 0
					for count in self.bgp.new_update():
						yield True
					logger.message(self.me('>> UPDATE (%d)' % count))
					self._updates = self.bgp.buffered()

				yield None

			if self.neighbor.graceful_restart and self.open.capabilities.announced(Capabilities.GRACEFUL_RESTART):
				logger.warning('Closing the connection without notification')
				self.bgp.close()
				return

			# User closing the connection
			raise Notify(6,3)
		except NotConnected, e:
			logger.warning('we can not connect to the peer %s' % str(e))
			self._more_skip()
			try:
				self.bgp.close()
			except Failure:
				pass
			return
		except Notify,e:
			logger.warning(self.me('Sending Notification (%d,%d) [%s] %s' % (e.code,e.subcode,str(e),e.data)))
			try:
				self.bgp.new_notification(e)
			except Failure:
				pass
			try:
				self.bgp.close()
			except Failure:
				pass
			return
		except Notification, e:
			logger.warning(self.me('Received Notification (%d,%d) from peer %s' % (e.code,e.subcode,str(e))))
			try:
				self.bgp.close()
			except Failure:
				pass
			return
		except Failure, e:
			logger.warning(self.me(str(e)),'connection')
			self._more_skip()
			try:
				self.bgp.close()
			except Failure:
				pass
			return
		except Exception, e:
			logger.warning(self.me('UNHANDLED EXCEPTION'))
			self._more_skip()
			if self.debug_trace:
				# should really go to syslog
				traceback.print_exc(file=sys.stdout)
				raise
			else:
				logger.warning(self.me(str(e)))
			if self.bgp: self.bgp.close()
			return

Example 110

Project: kay Source File: frontend.py
    def extract(self, argv):
        """Subcommand for extracting messages from source files and generating
        a POT file.

        :param argv: the command arguments
        """
        parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
                              description=self.commands['extract'])
        parser.add_option('--charset', dest='charset',
                          help='charset to use in the output (default '
                               '"%default")')
        parser.add_option('-k', '--keyword', dest='keywords', action='append',
                          help='keywords to look for in addition to the '
                               'defaults. You can specify multiple -k flags on '
                               'the command line.')
        parser.add_option('--no-default-keywords', dest='no_default_keywords',
                          action='store_true',
                          help="do not include the default keywords")
        parser.add_option('--mapping', '-F', dest='mapping_file',
                          help='path to the extraction mapping file')
        parser.add_option('--no-location', dest='no_location',
                          action='store_true',
                          help='do not include location comments with filename '
                               'and line number')
        parser.add_option('--omit-header', dest='omit_header',
                          action='store_true',
                          help='do not include msgid "" entry in header')
        parser.add_option('-o', '--output', dest='output',
                          help='path to the output POT file')
        parser.add_option('-w', '--width', dest='width', type='int',
                          help="set output line width (default 76)")
        parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
                          help='do not break long message lines, longer than '
                               'the output line width, into several lines')
        parser.add_option('--sort-output', dest='sort_output',
                          action='store_true',
                          help='generate sorted output (default False)')
        parser.add_option('--sort-by-file', dest='sort_by_file',
                          action='store_true',
                          help='sort output by file location (default False)')
        parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
                          metavar='EMAIL@ADDRESS',
                          help='set report address for msgid')
        parser.add_option('--copyright-holder', dest='copyright_holder',
                          help='set copyright holder in output')
        parser.add_option('--project', dest='project',
                          help='set project name in output')
        parser.add_option('--version', dest='version',
                          help='set project version in output')
        parser.add_option('--add-comments', '-c', dest='comment_tags',
                          metavar='TAG', action='append',
                          help='place comment block with TAG (or those '
                               'preceding keyword lines) in output file. One '
                               'TAG per argument call')
        parser.add_option('--strip-comment-tags', '-s',
                          dest='strip_comment_tags', action='store_true',
                          help='Strip the comment tags from the comments.')

        parser.set_defaults(charset='utf-8', keywords=[],
                            no_default_keywords=False, no_location=False,
                            omit_header = False, width=None, no_wrap=False,
                            sort_output=False, sort_by_file=False,
                            comment_tags=[], strip_comment_tags=False)
        options, args = parser.parse_args(argv)
        if not args:
            parser.error('incorrect number of arguments')

        if options.output not in (None, '-'):
            outfile = open(options.output, 'w')
        else:
            outfile = sys.stdout

        keywords = DEFAULT_KEYWORDS.copy()
        if options.no_default_keywords:
            if not options.keywords:
                parser.error('you must specify new keywords if you disable the '
                             'default ones')
            keywords = {}
        if options.keywords:
            keywords.update(parse_keywords(options.keywords))

        if options.mapping_file:
            fileobj = open(options.mapping_file, 'U')
            try:
                method_map, options_map = parse_mapping(fileobj)
            finally:
                fileobj.close()
        else:
            method_map = DEFAULT_MAPPING
            options_map = {}

        if options.width and options.no_wrap:
            parser.error("'--no-wrap' and '--width' are mutually exclusive.")
        elif not options.width and not options.no_wrap:
            options.width = 76

        if options.sort_output and options.sort_by_file:
            parser.error("'--sort-output' and '--sort-by-file' are mutually "
                         "exclusive")

        try:
            catalog = Catalog(project=options.project,
                              version=options.version,
                              msgid_bugs_address=options.msgid_bugs_address,
                              copyright_holder=options.copyright_holder,
                              charset=options.charset)

            for dirname in args:
                if not os.path.isdir(dirname):
                    parser.error('%r is not a directory' % dirname)

                def callback(filename, method, options):
                    if method == 'ignore':
                        return
                    filepath = os.path.normpath(os.path.join(dirname, filename))
                    optstr = ''
                    if options:
                        optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
                                                      k, v in options.items()])
                    self.log.info('extracting messages from %s%s', filepath,
                                  optstr)

                extracted = extract_from_dir(dirname, method_map, options_map,
                                             keywords, options.comment_tags,
                                             callback=callback,
                                             strip_comment_tags=
                                                options.strip_comment_tags)
                for filename, lineno, message, comments in extracted:
                    filepath = os.path.normpath(os.path.join(dirname, filename))
                    catalog.add(message, None, [(filepath, lineno)],
                                auto_comments=comments)

            if options.output not in (None, '-'):
                self.log.info('writing PO template file to %s' % options.output)
            write_po(outfile, catalog, width=options.width,
                     no_location=options.no_location,
                     omit_header=options.omit_header,
                     sort_output=options.sort_output,
                     sort_by_file=options.sort_by_file)
        finally:
            if options.output:
                outfile.close()

Example 111

Project: btrfs-sxbackup Source File: __main__.py
Function: main
def main():
    # Parse arguments
    parser = ArgumentParser(prog=_APP_NAME)
    parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=False,
                        help='do not log to stdout')
    parser.add_argument('--version', action='version', version='%s v%s' % (_APP_NAME, __version__))
    parser.add_argument('-v', dest='verbosity', action='count',
                        help='can be specified multiple times to increase verbosity')

    subparsers = parser.add_subparsers()
    subparsers.required = True
    subparsers.dest = 'command'

    # Reusable options
    compress_args = ['-c', '--compress']
    compress_kwargs = {'action': 'store_true',
                       'help': 'enables compression during transmission. Requires lzop to be installed on both source'
                               ' and destination',
                       'default': None}

    source_retention_args = ['-sr', '--source-retention']
    source_retention_kwargs = {'type': str,
                               'default': None,
                               'help': 'expression defining which source snapshots to retain/cleanup.'
                                       ' can be a static number (of backups) or more complex expression like'
                                       ' "1d:4/d, 1w:daily, 2m:none" literally translating to: "1 day from now keep'
                                       ' 4 backups a day, 1 week from now keep daily backups,'
                                       ' 2 months from now keep none"'}

    destination_retention_args = ['-dr', '--destination-retention']
    destination_retention_kwargs = {'type': str,
                                    'default': None,
                                    'help': 'expression defining which destination snapshots to retain/cleanup.'
                                            ' can be a static number (of backups) or more complex'
                                            ' expression (see --source-retention argument)'}

    subvolumes_args = ['subvolumes']
    subvolumes_kwargs = {'type': str,
                         'nargs': '+',
                         'metavar': 'subvolume',
                         'help': 'backup job source or destination subvolume. local path or SSH url'}

    # Initialize command cmdline params
    p_init = subparsers.add_parser(_CMD_INIT, help='initialize backup job')
    p_init.add_argument('source_subvolume', type=str, metavar='source-subvolume',
                        help='source subvolume tobackup. local path or ssh url')
    p_init.add_argument('destination_subvolume', type=str, metavar='destination-subvolume', nargs='?', default=None,
                        help='optional destination subvolume receiving backup snapshots. local path or ssh url')
    p_init.add_argument(*source_retention_args, **source_retention_kwargs)
    p_init.add_argument(*destination_retention_args, **destination_retention_kwargs)
    p_init.add_argument(*compress_args, **compress_kwargs)

    p_destroy = subparsers.add_parser(_CMD_DESTROY, help='destroy backup job by removing configuration files from source'
                                                         ' and destination. backup snapshots will be kept on both sides'
                                                         ' by default.')
    p_destroy.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_destroy.add_argument('--purge', action='store_true', help='removes all backup snapshots from source and destination')

    # Update command cmdline params
    p_update = subparsers.add_parser(_CMD_UPDATE, help='update backup job')
    p_update.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_update.add_argument(*source_retention_args, **source_retention_kwargs)
    p_update.add_argument(*destination_retention_args, **destination_retention_kwargs)
    p_update.add_argument(*compress_args, **compress_kwargs)
    p_update.add_argument('-nc', '--no-compress', action='store_true', help='disable compression during transmission')

    # Run command cmdline params
    p_run = subparsers.add_parser(_CMD_RUN, help='run backup job')
    p_run.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_run.add_argument('-m', '--mail', type=str, nargs='?', const='',
                       help='enables email notifications. If an email address is given, it overrides the'
                            ' default email-recipient setting in /etc/btrfs-sxbackup.conf')
    p_run.add_argument('-li', '--log-ident', dest='log_ident', type=str, default=None,
                       help='log ident used for syslog logging, defaults to script name')

    # Info command cmdline params
    p_info = subparsers.add_parser(_CMD_INFO, help='backup job info')
    p_info.add_argument(*subvolumes_args, **subvolumes_kwargs)

    # Purge command cmdline params
    p_purge = subparsers.add_parser(_CMD_PURGE, help="purge backups according to retention expressions")
    p_purge.add_argument(*subvolumes_args, **subvolumes_kwargs)
    purge_source_retention_kwargs = source_retention_kwargs.copy()
    purge_destination_retention_kwargs = destination_retention_kwargs.copy()
    purge_source_retention_kwargs['help'] = 'Optionally override %s' % purge_source_retention_kwargs['help']
    purge_destination_retention_kwargs['help'] = 'Optionally override %s' % purge_destination_retention_kwargs['help']
    p_purge.add_argument(*source_retention_args, **purge_source_retention_kwargs)
    p_purge.add_argument(*destination_retention_args, **purge_destination_retention_kwargs)

    # Transfer
    p_transfer = subparsers.add_parser(_CMD_TRANSFER, help='transfer snapshot')
    p_transfer.add_argument('source_subvolume', type=str, metavar='source-subvolume',
                            help='source subvolume to transfer. local path or ssh url')
    p_transfer.add_argument('destination_subvolume', type=str, metavar='destination-subvolume',
                            help='destination subvolume. local path or ssh url')
    p_transfer.add_argument(*compress_args, **compress_kwargs)

    # Initialize logging
    args = parser.parse_args()

    # Read global configuration
    Configuration.instance().read()

    logger = logging.getLogger()

    if not args.quiet:
        log_std_handler = logging.StreamHandler(sys.stdout)
        log_std_handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
        logger.addHandler(log_std_handler)

    log_memory_handler = None
    log_trace = False
    email_recipient = None

    def handle_exception(ex: Exception):
        """
        Exception handler
        :param ex:
        :return:
        """

        # Log exception message
        if len(str(ex)) > 0:
            logger.error('%s' % str(ex))

        if isinstance(ex, CalledProcessError):
            if ex.output:
                output = ex.output.decode().strip()
                if len(output) > 0:
                    logger.error('%s' % output)

        if log_trace:
            # Log stack trace
            logger.error(traceback.format_exc())

        # Email notification
        if email_recipient:
            try:
                # Format message and send
                msg = '\n'.join(map(lambda log_record: log_memory_handler.formatter.format(log_record),
                                    log_memory_handler.buffer))
                mail.send(email_recipient, '%s FAILED' % _APP_NAME, msg)
            except Exception as ex:
                logger.error(str(ex))

    # Syslog handler
    if args.command == _CMD_RUN:
        log_syslog_handler = logging.handlers.SysLogHandler('/dev/log')
        log_syslog_handler.setFormatter(logging.Formatter(_APP_NAME + '[%(process)d] %(levelname)s %(message)s'))
        logger.addHandler(log_syslog_handler)

        # Log ident support
        if args.log_ident:
            log_ident = args.log_ident if args.log_ident else Configuration.instance().log_ident
            if log_ident:
                log_syslog_handler.ident = log_ident + ' '

        # Mail notification support
        if args.mail is not None:
            email_recipient = args.mail if len(args.mail) > 0 else Configuration.instance().email_recipient

            # Memory handler will buffer output for sending via mail later if needed
            log_memory_handler = logging.handlers.MemoryHandler(capacity=-1)
            log_memory_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
            logger.addHandler(log_memory_handler)

    if args.verbosity and args.verbosity >= 1:
        logger.setLevel(logging.DEBUG)
        log_trace = True
    else:
        logger.setLevel(logging.INFO)
    logger.info('%s v%s' % (_APP_NAME, __version__))

    exitcode = 0

    try:
        if args.command == _CMD_RUN:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.run()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INIT:
            source_retention = RetentionExpression(args.source_retention) if args.source_retention else None
            destination_retention = RetentionExpression(args.destination_retention) if args.destination_retention else None
            job = Job.init(source_url=urllib.parse.urlsplit(args.source_subvolume),
                           source_retention=source_retention,
                           dest_url=urllib.parse.urlsplit(args.destination_subvolume) if args.destination_subvolume
                           else None,
                           dest_retention=destination_retention,
                           compress=args.compress)

        elif args.command == _CMD_UPDATE:
            source_retention = RetentionExpression(args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(args.destination_retention) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.update(source_retention=source_retention,
                               dest_retention=dest_retention,
                               compress=args.compress if args.compress else
                               not args.no_compress if args.no_compress else
                               None)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_DESTROY:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.destroy(purge=args.purge)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INFO:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume), raise_errors=False)
                    job.print_info()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_PURGE:
            source_retention = RetentionExpression(args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(args.destination_retention) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.purge(source_retention=source_retention, dest_retention=dest_retention)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_TRANSFER:
            source = Location(urllib.parse.urlsplit(args.source_subvolume))
            destination = Location(urllib.parse.urlsplit(args.destination_subvolume))
            source.transfer_btrfs_snapshot(destination, compress=args.compress)

    except SystemExit as e:
        if e.code != 0:
            raise

    except KeyboardInterrupt as k:
        exitcode = 1

    except Exception as e:
        handle_exception(e)
        exitcode = 1

    exit(exitcode)

Example 112

Project: nsec3map Source File: map.py
def main(argv):
    log.logger = log.Logger()
    try:
        (options, nslist, zone) = parse_arguments(argv)
    except N3MapError, e:
        log.fatal_exit(2, e)
    output_rrfile = None
    chain = None
    label_counter = None
    walker = None
    process_pool = None
    hash_queues = None
    if options['progress']:
        loglevel = log.logger.loglevel
        log.logger = log.ProgressLineLogger()
        log.logger.loglevel = loglevel

    try:
        stats = {}
        options['timeout'] /= 1000.0
        qprovider = queryprovider.QueryProvider(nslist,
                timeout=options['timeout'], max_retries=options['max_retries'], 
                query_interval = options['query_interval'], stats=stats)

        if options['soa_check']:
            n3map.walker.check_soa(zone, qprovider)

        if options['zone_type'] == 'auto':
            options['zone_type'] = n3map.walker.detect_dnssec_type(zone,
                    qprovider)

        if options['zone_type'] == 'nsec3':
            (hash_queues, process_pool) = prehash.create_prehash_pool(
                options['processes'], options['queue_element_size'],
                options['use_openssl'])
            if options['predict']:
                proc,pipe = create_zone_predictor()
                predictor = (proc,pipe)
            else:
                predictor = None


        if options['input'] is not None:
            records_file = None
            try:
                records_file = rrfile.open_input_rrfile(options['input'])
                chain = []
                if options['zone_type'] == 'nsec3':
                    for rr in records_file.nsec3_reader():
                        check_part_of_zone(rr, zone)
                        chain.append(rr)
                    label_counter = records_file.label_counter
                elif options['zone_type'] == 'nsec':
                    for rr in records_file.nsec_reader():
                        check_part_of_zone(rr, zone)
                        chain.append(rr)
            except IOError, e:
                log.fatal("unable to read input file: \n", str(e))
            except FileParseError, e:
                log.fatal("unable to parse input file: \n", str(e))
            finally:
                if records_file is not None:
                    records_file.close()
                    records_file = None

        if options['output'] is not None:
            if options['output'] == '-':
                output_rrfile = rrfile.RRFile(sys.stdout)
            else:
                try:
                    output_rrfile =  rrfile.open_output_rrfile(options['output'])
                except IOError, e:
                    log.fatal("unable to open output file: ", str(e))
        

        if options['zone_type'] == 'nsec3':
            if output_rrfile is not None:
                output_rrfile.write_header(zone, "List of NSEC3 RRs")
            if options['label_counter'] is not None:
                label_counter = options['label_counter']
            walker = NSEC3Walker(zone, 
                                 qprovider, 
                                 hash_queues, 
                                 process_pool, 
                                 nsec3_records=[] if chain is None else chain,
                                 ignore_overlapping=options['ignore_overlapping'],
                                 label_counter=label_counter,
                                 output_file=output_rrfile,
                                 stats=stats,
                                 predictor=predictor,
                                 aggressive=options['aggressive'])

        elif options['zone_type'] == 'nsec':
            if output_rrfile is not None:
                output_rrfile.write_header(zone, "List of NSEC RRs")

            if options['query_mode'] == "mixed":
                walker = NSECWalkerMixed(zone,
                                         qprovider,
                                         options['query_chars'] == 'ldh',
                                         nsec_chain=chain,
                                         startname=options['start'],
                                         endname=options['end'],
                                         stats=stats,
                                         output_file=output_rrfile)
            elif options['query_mode'] == "A":
                walker = NSECWalkerA(zone,
                                     qprovider,
                                     options['query_chars'] == 'ldh',
                                     nsec_chain=chain,
                                     startname=options['start'],
                                     endname=options['end'],
                                     stats=stats,
                                     output_file=output_rrfile)
            else:
                walker = NSECWalkerN(zone,
                                     qprovider,
                                     nsec_chain=chain,
                                     startname=options['start'],
                                     endname=options['end'],
                                     stats=stats,
                                     output_file=output_rrfile)

        if walker is not None:
            starttime = time.time()
            walker.walk()
            elapsed = timedelta(seconds=time.time() - starttime)
            log.info("finished mapping of {0:s} in {1:s}".format( str(zone), str(elapsed)))
        

        if output_rrfile is not None:
            output_rrfile.write_stats(stats)
            
    except N3MapError, e:
        log.fatal(e)
    except IOError, e:
        log.fatal(str(e))
    finally:
        if output_rrfile is not None:
            output_rrfile.close()
    
    return 0

Example 113

Project: rtiacquire Source File: rtiacquire.py
    def __init__(self):
        gtk.Window.__init__(self)
        self.connect('destroy', self.destroy_cb)

        self.config_window = None
        self.live_hide_timeout = 0
        self.light_hop_timeout = 0
        self.busy = False

        self.leds = ledmap.Ledmap(os.path.join(source_dir, 'data', 
                                               'led-maps.txt'))

        logging.debug('loaded %d maps', len(self.leds.get_names()))
        for name in self.leds.get_names():
            bytes = self.leds.get_bytes(name)
            logging.debug('%s: %d lights', name, len(bytes))

        # where project directories get written, see RTI cap above
        self.outdir = options.outdir

        self.lights = lights.Lights()

        # try to reset the lights ... if this fails, disable dome controls
        try:
            self.dome_controls = True
            name = self.leds.get_names()[0]
            self.lights.set_triple(self.leds.get_bytes(name)[0])
        except lights.Error as e:
            logging.debug('no lights found, disabling dome controls')
            self.dome_controls = False

        self.vbox = gtk.VBox(False, 0)
        self.add(self.vbox)
        self.vbox.show()

        fixed = gtk.Fixed()
        self.vbox.pack_start(fixed, False)
        fixed.show()

        self.camera = camera.Camera()
        self.preview = preview.Preview(self.camera)
        fixed.put(self.preview, 0, 0)
        self.preview.show()
        self.preview.connect('motion_notify_event', self.preview_motion_cb)

        if options.verbose:
            try:
                config = camera.Config(self.camera) 
                config.prettyprint(sys.stdout, config.get_root_widget())
            except:
                logging.debug("No Camera detected: unable to print config")
        eb = gtk.EventBox()
        fixed.put(eb, 0, 0)
        eb.show()

        self.progress = progress.Progress()
        self.progress.set_size_request(preview_width, -1)
        eb.add(self.progress)

        eb = gtk.EventBox()
        fixed.put(eb, 0, 0)
        eb.show()

        self.info = info.Info()
        self.info.set_size_request(preview_width, -1)
        eb.add(self.info)

        eb = gtk.EventBox()
        fixed.put(eb, 20, 380)
        eb.show()

        self.play_image = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        self.pause_image = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        self.live = gtk.Button()
        self.live.set_image(self.play_image)
        self.live.set_tooltip_text("Start/stop live preview")
        self.live.connect('clicked', self.live_cb, None)
        eb.add(self.live)
        self.live.show()

        self.toolbar = gtk.HBox(False, 5)
        self.toolbar.set_border_width(3)
        self.vbox.pack_end(self.toolbar)
        self.toolbar.show()

        button = gtk.Button()
        quit_image = gtk.image_new_from_stock(gtk.STOCK_QUIT, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        quit_image.show()
        button.set_tooltip_text("Quit RTIAcquire")
        button.connect('clicked', self.destroy_cb, None)
        button.add(quit_image)
        self.toolbar.pack_end(button, False, False)
        button.show()

        if self.dome_controls:
            self.dome_picker = gtk.combo_box_new_text()
            for name in self.leds.get_names():
                self.dome_picker.append_text(name)
            self.dome_picker.set_active(0)
            self.dome_picker.set_tooltip_text("Select lighting system")
            self.dome_picker.connect('changed', self.dome_picker_cb, None)
            self.toolbar.pack_start(self.dome_picker, False, False)
            self.dome_picker.show()

            self.light_picker = gtk.SpinButton(climb_rate = 1)
            self.light_picker.set_numeric(True)
            self.light_picker.set_wrap(True)
            self.light_picker.set_increments(1, 1)
            self.light_picker.set_tooltip_text("Pick light")
            self.light_picker_refresh()
            self.light_picker.connect('value_changed', 
                    self.light_picker_cb, None)
            self.toolbar.pack_start(self.light_picker, False, False)
            self.light_picker.show()

        button = gtk.Button()
        menu_image = gtk.image_new_from_stock(gtk.STOCK_PREFERENCES, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        menu_image.show()
        button.set_tooltip_text("Camera settings")
        button.connect('clicked', self.config_cb, None)
        button.add(menu_image)
        self.toolbar.pack_start(button, False, False)
        button.show()

        button = gtk.Button('Focus')
        button.set_tooltip_text("Focus camera automatically")
        button.connect('clicked', self.focus_cb, None)
        self.toolbar.pack_start(button, False, False)
        button.show()

        photo_image = gtk.image_new_from_file(
                os.path.join(source_dir, 'data', 'camera_24.png'))
        photo = gtk.Button()
        photo.set_image(photo_image)
        photo.set_tooltip_text("Take single photo")
        photo.connect('clicked', self.photo_cb, None)
        self.toolbar.pack_start(photo, False, False)
        photo.show()

        if self.dome_controls:
            photo = gtk.Button('RTI Preview')
            photo.set_tooltip_text("Take preview RTI image")
            photo.connect('clicked', self.rti_preview_cb, None)
            self.toolbar.pack_start(photo, False, False)
            photo.show()

            photo = gtk.Button('RTI Capture ...')
            photo.set_tooltip_text("Start full RTI acquisition")
            photo.connect('clicked', self.rti_capture_cb, None)
            self.toolbar.pack_start(photo, False, False)
            photo.show()

        self.info.msg('Welcome to RTI Acquire', 'v1.3, March 2014')

        self.show()

Example 114

Project: wok Source File: engine.py
    def __init__(self, output_lvl=1):
        """
        Set up CLI options, logging levels, and start everything off.
        Afterwards, run a dev server if asked to.
        """

        # CLI options
        # -----------
        parser = OptionParser(version='%prog v{0}'.format(wok.version))

        # Add option to initialize an new project
        init_grp = OptionGroup(parser, "Initialize project",
                "Creates a config file and the required directories. ")
        init_grp.add_option('--init', action='store_true',
                dest='initproject',
                help="create a confg file before generating the site")
        init_grp.add_option('--site_title',
                dest='site_title',
                help="configures the site title to the given value")
        parser.add_option_group(init_grp)

        # Add option to to run the development server after generating pages
        devserver_grp = OptionGroup(parser, "Development server",
                "Runs a small development server after site generation. "
                "--address and --port will be ignored if --server is absent.")
        devserver_grp.add_option('--server', action='store_true',
                dest='runserver',
                help="run a development server after generating the site")
        devserver_grp.add_option('--address', action='store', dest='address',
                help="specify ADDRESS on which to run development server")
        devserver_grp.add_option('--port', action='store', dest='port',
                type='int',
                help="specify PORT on which to run development server")
        parser.add_option_group(devserver_grp)

        # Options for noisiness level and logging
        logging_grp = OptionGroup(parser, "Logging",
                "By default, log messages will be sent to standard out, "
                "and report only errors and warnings.")
        parser.set_defaults(loglevel=logging.WARNING)
        logging_grp.add_option('-q', '--quiet', action='store_const',
                const=logging.ERROR, dest='loglevel',
                help="be completely quiet, log nothing")
        logging_grp.add_option('--warnings', action='store_const',
                const=logging.WARNING, dest='loglevel',
                help="log warnings in addition to errors")
        logging_grp.add_option('-v', '--verbose', action='store_const',
                const=logging.INFO, dest='loglevel',
                help="log ALL the things!")
        logging_grp.add_option('--debug', action='store_const',
                const=logging.DEBUG, dest='loglevel',
                help="log debugging info in addition to warnings and errors")
        logging_grp.add_option('--log', '-l', dest='logfile',
                help="log to the specified LOGFILE instead of standard out")
        parser.add_option_group(logging_grp)

        cli_options, args = parser.parse_args()

        # Set up logging
        # --------------
        logging_options = {
            'format': '%(levelname)s: %(message)s',
            'level': cli_options.loglevel,
        }
        if cli_options.logfile:
            logging_options['filename'] = cli_options.logfile
        else:
            logging_options['stream'] = sys.stdout

        logging.basicConfig(**logging_options)

        # Init project
        # ------------

        if cli_options.initproject:
            ''' Create the config file and the required directories if the user said to.
            '''
            orig_dir = os.getcwd()
            os.chdir(self.SITE_ROOT)

            # create config

            options = Engine.default_options.copy()

            # read old config if present
            if os.path.isfile('config'):
                with open('config') as f:
                    yaml_config = yaml.load(f)

                if yaml_config:
                    options.update(yaml_config)

            if cli_options.site_title:
                options['site_title'] = cli_options.site_title

            # save new config
            with open('config', 'w') as f:
                    yaml.dump(options, f)

            # create required dirs

            required_dirs = [options['content_dir'], options['template_dir']]
            for required_dir in required_dirs:
                if not os.path.isdir(required_dir):
                    os.mkdir(required_dir)

            os.chdir(orig_dir)

        # Action!
        # -------
        self.generate_site()

        # Dev server
        # ----------
        if cli_options.runserver:
            ''' Run the dev server if the user said to, and watch the specified
            directories for changes. The server will regenerate the entire wok
            site if changes are found after every request.
            '''
            output_dir = os.path.join(self.options['server_root'])
            host = '' if cli_options.address is None else cli_options.address
            port = 8000 if cli_options.port is None else cli_options.port
            server = dev_server(serv_dir=output_dir, host=host, port=port,
                dir_mon=True,
                watch_dirs=[
                    self.options['media_dir'],
                    self.options['template_dir'],
                    self.options['content_dir']
                ],
                change_handler=self.generate_site)
            server.run()

Example 115

Project: Nagstamon Source File: Multisite.py
    def _get_status(self):
        """
            Get status from Check_MK Server
        """

        ret = Result()

        # Create URLs for the configured filters
        url_params = ''

        if self.force_authuser:
            url_params += "&force_authuser=1"

        url_params += '&is_host_acknowledged=-1&is_service_acknowledged=-1'
        url_params += '&is_host_notifications_enabled=-1&is_service_notifications_enabled=-1'
        url_params += '&is_host_active_checks_enabled=-1&is_service_active_checks_enabled=-1'
        url_params += '&host_scheduled_downtime_depth=-1&is_in_downtime=-1'

        try:
            response = []
            try:
                response = self._get_url(self.urls['api_hosts'] + url_params)
            except MultisiteError as e:
                if e.terminate:
                    return e.result

            if response == '':
                return Result(result='',
                              error='Login failed',
                              status_code=401)

            for row in response[1:]:
                host= dict(list(zip(copy.deepcopy(response[0]), copy.deepcopy(row))))
                n = {
                    'host':               host['host'],
                    'status':             self.statemap.get(host['host_state'], host['host_state']),
                    'last_check':         host['host_check_age'],
                    'duration':           host['host_state_age'],
                    'status_information': html.unescape(host['host_plugin_output'].replace('\n', ' ')),
                    'attempt':            host['host_attempt'],
                    'site':               host['sitename_plain'],
                    'address':            host['host_address']
                }

                # host objects contain service objects
                if n['host'] not in self.new_hosts:
                    new_host = n['host']
                    self.new_hosts[new_host] = GenericHost()
                    self.new_hosts[new_host].name = n['host']
                    self.new_hosts[new_host].server = self.name
                    self.new_hosts[new_host].status = n['status']
                    self.new_hosts[new_host].last_check = n['last_check']
                    self.new_hosts[new_host].duration = n['duration']
                    self.new_hosts[new_host].attempt = n['attempt']
                    self.new_hosts[new_host].status_information= html.unescape(n['status_information'].replace('\n', ' '))
                    self.new_hosts[new_host].site = n['site']
                    self.new_hosts[new_host].address = n['address']

                    # transisition to Check_MK 1.1.10p2
                    if 'host_in_downtime' in host:
                        if host['host_in_downtime'] == 'yes':
                            self.new_hosts[new_host].scheduled_downtime = True
                    if 'host_acknowledged' in host:
                        if host['host_acknowledged'] == 'yes':
                            self.new_hosts[new_host].acknowledged = True
                    if 'host_notifications_enabled' in host:
                        if host['host_notifications_enabled'] == 'no':
                            self.new_hosts[new_host].notifications_disabled = True

                    # hard/soft state for later filter evaluation
                    real_attempt, max_attempt = self.new_hosts[new_host].attempt.split('/')
                    if real_attempt != max_attempt:
                        self.new_hosts[new_host].status_type = 'soft'
                    else:
                        self.new_hosts[new_host].status_type = 'hard'

            del response

        except:
            import traceback
            traceback.print_exc(file=sys.stdout)

            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=result, error=error)

        # Add filters to the url which should only be applied to the service request
        if conf.filter_services_on_unreachable_hosts == True:
            url_params += '&hst2=0'

        # services
        try:
            response = []
            try:
                response = self._get_url(self.urls['api_services'] + url_params)
            except MultisiteError as e:
                if e.terminate:
                    return e.result
                else:
                    response = copy.deepcopy(e.result.content)
                    ret = copy.deepcopy(e.result)

            for row in response[1:]:
                service = dict(list(zip(copy.deepcopy(response[0]), copy.deepcopy(row))))
                n = {
                    'host':               service['host'],
                    'service':            service['service_description'],
                    'status':             self.statemap.get(service['service_state'], service['service_state']),
                    'last_check':         service['svc_check_age'],
                    'duration':           service['svc_state_age'],
                    'attempt':            service['svc_attempt'],
                    'status_information': html.unescape(service['svc_plugin_output'].replace('\n', ' ')),
                    # Check_MK passive services can be re-scheduled by using the Check_MK service
                    'passiveonly':        service['svc_is_active'] == 'no' and not service['svc_check_command'].startswith('check_mk'),
                    'flapping':           service['svc_flapping'] == 'yes',
                    'site':               service['sitename_plain'],
                    'address':            service['host_address'],
                    'command':            service['svc_check_command'],
                }

                # host objects contain service objects
                if n['host'] not in self.new_hosts:
                    self.new_hosts[n['host']] = GenericHost()
                    self.new_hosts[n['host']].name = n['host']
                    self.new_hosts[n['host']].status = 'UP'
                    self.new_hosts[n['host']].site = n['site']
                    self.new_hosts[n['host']].address = n['address']
                # if a service does not exist create its object
                if n['service'] not in self.new_hosts[n['host']].services:
                    new_service = n['service']
                    self.new_hosts[n['host']].services[new_service] = GenericService()
                    self.new_hosts[n['host']].services[new_service].host = n['host']
                    self.new_hosts[n['host']].services[new_service].server = self.name
                    self.new_hosts[n['host']].services[new_service].name = n['service']
                    self.new_hosts[n['host']].services[new_service].status = n['status']
                    self.new_hosts[n['host']].services[new_service].last_check = n['last_check']
                    self.new_hosts[n['host']].services[new_service].duration = n['duration']
                    self.new_hosts[n['host']].services[new_service].attempt = n['attempt']
                    self.new_hosts[n['host']].services[new_service].status_information = n['status_information'].strip()
                    self.new_hosts[n['host']].services[new_service].passiveonly = n['passiveonly']
                    self.new_hosts[n['host']].services[new_service].flapping = n['flapping']
                    self.new_hosts[n['host']].services[new_service].site = n['site']
                    self.new_hosts[n['host']].services[new_service].address = n['address']
                    self.new_hosts[n['host']].services[new_service].command = n['command']

                    # transistion to Check_MK 1.1.10p2
                    if 'svc_in_downtime' in service:
                        if service['svc_in_downtime'] == 'yes':
                            self.new_hosts[n['host']].services[new_service].scheduled_downtime = True
                    if 'svc_acknowledged' in service:
                        if service['svc_acknowledged'] == 'yes':
                            self.new_hosts[n['host']].services[new_service].acknowledged = True
                    if 'svc_flapping' in service:
                        if service['svc_flapping'] == 'yes':
                            self.new_hosts[n['host']].services[new_service].flapping = True
                    if 'svc_notifications_enabled' in service:
                        if service['svc_notifications_enabled'] == 'no':
                            self.new_hosts[n['host']].services[new_service].notifications_disabled = True

                    # hard/soft state for later filter evaluation
                    real_attempt, max_attempt = self.new_hosts[n['host']].services[new_service].attempt.split('/')
                    if real_attempt != max_attempt:
                        self.new_hosts[n['host']].services[new_service].status_type = 'soft'
                    else:
                        self.new_hosts[n['host']].services[new_service].status_type = 'hard'

            del response

        except:
            import traceback
            traceback.print_exc(file=sys.stdout)

            # set checking flag back to False
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=copy.deepcopy(result), error=copy.deepcopy(error))

        del url_params

        return ret

Example 116

Project: coveragepy Source File: summary.py
    def report(self, morfs, outfile=None):
        """Writes a report summarizing coverage statistics per module.

        `outfile` is a file object to write the summary to. It must be opened
        for native strings (bytes on Python 2, Unicode on Python 3).

        """
        file_reporters = self.find_file_reporters(morfs)

        # Prepare the formatting strings, header, and column sorting.
        max_name = max([len(fr.relative_filename()) for fr in file_reporters] + [5])
        fmt_name = u"%%- %ds  " % max_name
        fmt_err = u"%s   %s: %s"
        fmt_skip_covered = u"\n%s file%s skipped due to complete coverage."

        header = (fmt_name % "Name") + u" Stmts   Miss"
        fmt_coverage = fmt_name + u"%6d %6d"
        if self.branches:
            header += u" Branch BrPart"
            fmt_coverage += u" %6d %6d"
        width100 = Numbers.pc_str_width()
        header += u"%*s" % (width100+4, "Cover")
        fmt_coverage += u"%%%ds%%%%" % (width100+3,)
        if self.config.show_missing:
            header += u"   Missing"
            fmt_coverage += u"   %s"
        rule = u"-" * len(header)

        column_order = dict(name=0, stmts=1, miss=2, cover=-1)
        if self.branches:
            column_order.update(dict(branch=3, brpart=4))

        if outfile is None:
            outfile = sys.stdout

        def writeout(line):
            """Write a line to the output, adding a newline."""
            if env.PY2:
                line = line.encode(output_encoding())
            outfile.write(line.rstrip())
            outfile.write("\n")

        # Write the header
        writeout(header)
        writeout(rule)

        # `lines` is a list of pairs, (line text, line values).  The line text
        # is a string that will be printed, and line values is a tuple of
        # sortable values.
        lines = []

        total = Numbers()
        skipped_count = 0

        for fr in file_reporters:
            try:
                analysis = self.coverage._analyze(fr)
                nums = analysis.numbers
                total += nums

                if self.config.skip_covered:
                    # Don't report on 100% files.
                    no_missing_lines = (nums.n_missing == 0)
                    no_missing_branches = (nums.n_partial_branches == 0)
                    if no_missing_lines and no_missing_branches:
                        skipped_count += 1
                        continue

                args = (fr.relative_filename(), nums.n_statements, nums.n_missing)
                if self.branches:
                    args += (nums.n_branches, nums.n_partial_branches)
                args += (nums.pc_covered_str,)
                if self.config.show_missing:
                    missing_fmtd = analysis.missing_formatted()
                    if self.branches:
                        branches_fmtd = analysis.arcs_missing_formatted()
                        if branches_fmtd:
                            if missing_fmtd:
                                missing_fmtd += ", "
                            missing_fmtd += branches_fmtd
                    args += (missing_fmtd,)
                text = fmt_coverage % args
                # Add numeric percent coverage so that sorting makes sense.
                args += (nums.pc_covered,)
                lines.append((text, args))
            except Exception:
                report_it = not self.config.ignore_errors
                if report_it:
                    typ, msg = sys.exc_info()[:2]
                    # NotPython is only raised by PythonFileReporter, which has a
                    # should_be_python() method.
                    if typ is NotPython and not fr.should_be_python():
                        report_it = False
                if report_it:
                    writeout(fmt_err % (fr.relative_filename(), typ.__name__, msg))

        # Sort the lines and write them out.
        if getattr(self.config, 'sort', None):
            position = column_order.get(self.config.sort.lower())
            if position is None:
                raise CoverageException("Invalid sorting option: {0!r}".format(self.config.sort))
            lines.sort(key=lambda l: (l[1][position], l[0]))

        for line in lines:
            writeout(line[0])

        # Write a TOTAl line if we had more than one file.
        if total.n_files > 1:
            writeout(rule)
            args = ("TOTAL", total.n_statements, total.n_missing)
            if self.branches:
                args += (total.n_branches, total.n_partial_branches)
            args += (total.pc_covered_str,)
            if self.config.show_missing:
                args += ("",)
            writeout(fmt_coverage % args)

        # Write other final lines.
        if not total.n_files and not skipped_count:
            raise CoverageException("No data to report.")

        if self.config.skip_covered and skipped_count:
            writeout(fmt_skip_covered % (skipped_count, 's' if skipped_count > 1 else ''))

        return total.n_statements and total.pc_covered

Example 117

Project: bacpypes Source File: constructeddata.py
Function: arrayof
def ArrayOf(klass):
    """Function to return a class that can encode and decode a list of
    some other type."""
    global _array_of_map
    global _array_of_classes, _sequence_of_classes

    # if this has already been built, return the cached one
    if klass in _array_of_map:
        return _array_of_map[klass]

    # no ArrayOf(ArrayOf(...)) allowed
    if klass in _array_of_classes:
        raise TypeError("nested arrays disallowed")
    # no ArrayOf(SequenceOf(...)) allowed
    if klass in _sequence_of_classes:
        raise TypeError("arrays of SequenceOf disallowed")

    # define a generic class for arrays
    @bacpypes_debugging
    class ArrayOf(Array):

        subtype = None

        def __init__(self, value=None):
            if value is None:
                self.value = [0]
            elif isinstance(value, list):
                self.value = [len(value)]
                self.value.extend(value)
            else:
                raise TypeError("invalid constructor datatype")

        def append(self, value):
            if issubclass(self.subtype, Atomic):
                pass
            elif issubclass(self.subtype, AnyAtomic) and not isinstance(value, Atomic):
                raise TypeError("instance of an atomic type required")
            elif not isinstance(value, self.subtype):
                raise TypeError("%s value required" % (self.subtype.__name__,))
            self.value.append(value)
            self.value[0] = len(self.value) - 1

        def __len__(self):
            return self.value[0]

        def __getitem__(self, item):
            # no wrapping index
            if (item < 0) or (item > self.value[0]):
                raise IndexError("index out of range")

            return self.value[item]

        def __setitem__(self, item, value):
            # no wrapping index
            if (item < 1) or (item > self.value[0]):
                raise IndexError("index out of range")

            # special length handling for index 0
            if item == 0:
                if value < self.value[0]:
                    # trim
                    self.value = self.value[0:value + 1]
                elif value > self.value[0]:
                    # extend
                    self.value.extend( [None] * (value - self.value[0]) )
                else:
                    return
                self.value[0] = value
            else:
                self.value[item] = value

        def __delitem__(self, item):
            # no wrapping index
            if (item < 1) or (item > self.value[0]):
                raise IndexError("index out of range")

            # delete the item and update the length
            del self.value[item]
            self.value[0] -= 1

        def index(self, value):
            # only search through values
            for i in range(1, self.value[0] + 1):
                if value == self.value[i]:
                    return i

            # not found
            raise ValueError("%r not in array" % (value,))

        def encode(self, taglist):
            if _debug: ArrayOf._debug("(%r)encode %r", self.__class__.__name__, taglist)

            for value in self.value[1:]:
                if issubclass(self.subtype, (Atomic, AnyAtomic)):
                    # a helper cooperates between the atomic value and the tag
                    helper = self.subtype(value)

                    # build a tag and encode the data into it
                    tag = Tag()
                    helper.encode(tag)

                    # now encode the tag
                    taglist.append(tag)
                elif isinstance(value, self.subtype):
                    # it must have its own encoder
                    value.encode(taglist)
                else:
                    raise TypeError("%s must be a %s" % (value, self.subtype.__name__))

        def decode(self, taglist):
            if _debug: ArrayOf._debug("(%r)decode %r", self.__class__.__name__, taglist)

            # start with an empty array
            self.value = [0]

            while len(taglist) != 0:
                tag = taglist.Peek()
                if tag.tagClass == Tag.closingTagClass:
                    break

                if issubclass(self.subtype, (Atomic, AnyAtomic)):
                    if _debug: ArrayOf._debug("    - building helper: %r %r", self.subtype, tag)
                    taglist.Pop()

                    # a helper cooperates between the atomic value and the tag
                    helper = self.subtype(tag)

                    # save the value
                    self.value.append(helper.value)
                else:
                    if _debug: ArrayOf._debug("    - building value: %r", self.subtype)
                    # build an element
                    value = self.subtype()

                    # let it decode itself
                    value.decode(taglist)

                    # save what was built
                    self.value.append(value)

            # update the length
            self.value[0] = len(self.value) - 1

        def encode_item(self, item, taglist):
            if _debug: ArrayOf._debug("(%r)encode_item %r %r", self.__class__.__name__, item, taglist)

            if item == 0:
                # a helper cooperates between the atomic value and the tag
                helper = Unsigned(self.value[0])

                # build a tag and encode the data into it
                tag = Tag()
                helper.encode(tag)

                # now encode the tag
                taglist.append(tag)
            else:
                value = self.value[item]

                if issubclass(self.subtype, (Atomic, AnyAtomic)):
                    # a helper cooperates between the atomic value and the tag
                    helper = self.subtype(self.value[item])

                    # build a tag and encode the data into it
                    tag = Tag()
                    helper.encode(tag)

                    # now encode the tag
                    taglist.append(tag)
                elif isinstance(value, self.subtype):
                    # it must have its own encoder
                    value.encode(taglist)
                else:
                    raise TypeError("%s must be a %s" % (value, self.subtype.__name__))

        def decode_item(self, item, taglist):
            if _debug: ArrayOf._debug("(%r)decode_item %r %r", self.__class__.__name__, item, taglist)

            if item == 0:
                # a helper cooperates between the atomic value and the tag
                helper = Unsigned(taglist.Pop())

                # save the value
                self.value = helper.value
            elif issubclass(self.subtype, (Atomic, AnyAtomic)):
                if _debug: ArrayOf._debug("    - building helper: %r", self.subtype)

                # a helper cooperates between the atomic value and the tag
                helper = self.subtype(taglist.Pop())

                # save the value
                self.value = helper.value
            else:
                if _debug: ArrayOf._debug("    - building value: %r", self.subtype)
                # build an element
                value = self.subtype()

                # let it decode itself
                value.decode(taglist)

                # save what was built
                self.value = value

        def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
            try:
                value_list = enumerate(self.value)
            except TypeError:
                file.write("%s(non-sequence) %r\n" % ("    " * indent, self.value))
                return

            for i, value in value_list:
                if i == 0:
                    file.write("%slength = %d\n" % ("    " * indent, value))
                elif issubclass(self.subtype, (Atomic, AnyAtomic)):
                    file.write("%s[%d] = %r\n" % ("    " * indent, i, value))
                elif isinstance(value, self.subtype):
                    file.write("%s[%d]\n" % ("    " * indent, i))
                    value.debug_contents(indent+1, file, _ids)
                else:
                    file.write("%s%s must be a %s" % ("    " * indent, value, self.subtype.__name__))

        def dict_contents(self, use_dict=None, as_class=dict):
            # return arrays as arrays
            mapped_value = []

            for value in self.value:
                if issubclass(self.subtype, Atomic):
                    mapped_value.append(value)              ### ambiguous
                elif issubclass(self.subtype, AnyAtomic):
                    mapped_value.append(value.value)        ### ambiguous
                elif isinstance(value, self.subtype):
                    mapped_value.append(value.dict_contents(as_class=as_class))

            # return what we built
            return mapped_value

    # constrain it to a list of a specific type of item
    setattr(ArrayOf, 'subtype', klass)
    ArrayOf.__name__ = 'ArrayOf' + klass.__name__

    # cache this type
    _array_of_map[klass] = ArrayOf
    _array_of_classes[ArrayOf] = 1

    # return this new type
    return ArrayOf

Example 118

Project: openfisca-france Source File: link_ipp_tax_and_benefit_tables_to_parameters.py
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-s', '--source-dir', default = 'yaml-clean',
        help = 'path of source directory containing clean IPP YAML files')
    parser.add_argument('-t', '--target', default = 'ipp-tax-and-benefit-tables-to-openfisca-parameters.yaml',
        help = 'path of generated YAML file containing the association between IPP fields to OpenFisca parameters')
    parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
    args = parser.parse_args()
    logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)

    file_system_encoding = sys.getfilesystemencoding()

    ipp_infos_by_value = {}
    for source_dir_encoded, directories_name_encoded, filenames_encoded in os.walk(args.source_dir):
        directories_name_encoded.sort()
        for filename_encoded in sorted(filenames_encoded):
            if not filename_encoded.endswith('.yaml'):
                continue
            filename = filename_encoded.decode(file_system_encoding)
            sheet_name = os.path.splitext(filename)[0]
            source_file_path_encoded = os.path.join(source_dir_encoded, filename_encoded)
            relative_file_path_encoded = source_file_path_encoded[len(args.source_dir):].lstrip(os.sep)
            relative_file_path = relative_file_path_encoded.decode(file_system_encoding)
            if sheet_name.isupper():
                continue
            assert sheet_name.islower(), sheet_name
            log.info(u'Loading file {}'.format(relative_file_path))
            with open(source_file_path_encoded) as source_file:
                data = yaml.load(source_file)
            rows = data.get(u"Valeurs")
            if rows is None:
                log.info(u'  Skipping file {} without "Valeurs"'.format(relative_file_path))
                continue
            for row in rows:
                start = row.get(u"Date d'effet")
                if start is None:
                    for date_name in date_names:
                        start = row.get(date_name)
                        if start is not None:
                            break
                    else:
                        # No date found. Skip row.
                        continue
                elif not isinstance(start, datetime.date):
                    start = start[u"Année Revenus"]

                for name, child in row.iteritems():
                    if name in date_names:
                        continue
                    for path, value in iter_ipp_values(child):
                        if isinstance(value, basestring):
                            split_value = value.split()
                            if len(split_value) == 2 and split_value[1] in (
                                    u'%',
                                    u'AF',  # anciens francs
                                    u'CFA',  # francs CFA
                                    u'COTISATIONS',
                                    u'EUR',
                                    u'FRF',
                                    ):
                                value = float(split_value[0])
                        if isinstance(value, float) and value == int(value):
                            value = int(value)
                        full_path = tuple(relative_file_path.split(os.sep)[:-1]) + (sheet_name, name) + tuple(path)
                        ipp_infos_by_value.setdefault(value, []).append(dict(
                            path = full_path,
                            start = start,
                            ))

#    print yaml.dump(ipp_infos_by_value, allow_unicode = True, default_flow_style = False, indent = 2, width = 120)

    tax_benefit_system = FranceTaxBenefitSystem()

    # print yaml.dump(tax_benefit_system.legislation_json, allow_unicode = True, default_flow_style = False, indent = 2,
    #     width = 120)

#    openfisca_infos_by_value = {}
#    for path, start, value in iter_openfisca_values(tax_benefit_system.legislation_json):
#        openfisca_infos_by_value.setdefault(value, []).append(dict(
#            path = tuple(path),
#            start = start,
#            ))
#    print yaml.dump(openfisca_infos_by_value, allow_unicode = True, default_flow_style = False, indent = 2, width = 120)

#    ipp_count = {}
#    for path, start, value in iter_openfisca_values(tax_benefit_system.legislation_json):
#        ipp_infos = ipp_infos_by_value.get(value)
#        if ipp_infos is None:
#            # OpenFisca parameter doesn't exit in IPP.
#            continue
#        for ipp_info in ipp_infos:
#            if ipp_info['start'] == start:
#                ipp_child = ipp_count
#                ipp_path = ipp_info['path']
#                for name in path:
#                    ipp_child = ipp_child.setdefault(name, {})
#                    ipp_child_count = ipp_child.setdefault('count_by_path', {})
#                    for ipp_index in range(len(ipp_path)):
#                        ipp_sub_path = ipp_path[:ipp_index + 1]
#                        ipp_child_count[ipp_sub_path] = ipp_child_count.get(ipp_sub_path, 0) + 1
#    print yaml.dump(ipp_count, allow_unicode = True, default_flow_style = False, indent = 2, width = 120)

    starts_by_ipp_path_by_openfisca_path = {}
    starts_by_openfisca_path_by_ipp_path = {}
    for path, start, value in iter_openfisca_values(tax_benefit_system.legislation_json):
        ipp_infos = ipp_infos_by_value.get(value)
        if ipp_infos is None:
            # OpenFisca parameter doesn't exit in IPP.
            continue
        same_start_ipp_paths = [
            ipp_info['path']
            for ipp_info in ipp_infos
            if ipp_info['start'] == start
            ]
        if len(same_start_ipp_paths) == 1:
            ipp_path = same_start_ipp_paths[0]
            starts_by_ipp_path_by_openfisca_path.setdefault(tuple(path), {}).setdefault(ipp_path, set()).add(start)
            starts_by_openfisca_path_by_ipp_path.setdefault(ipp_path, {}).setdefault(tuple(path), set()).add(start)

#    for openfisca_path, starts_by_ipp_path in sorted(starts_by_ipp_path_by_openfisca_path.iteritems()):
##        if len(starts_by_ipp_path) == 1:
##            print u'.'.join(openfisca_path), '->', u' / '.join(starts_by_ipp_path.keys()[0])
#        if len(starts_by_ipp_path) > 1:
#            print u'.'.join(openfisca_path), '->', starts_by_ipp_path

#    for ipp_path, starts_by_openfisca_path in sorted(starts_by_openfisca_path_by_ipp_path.iteritems()):
#        if len(starts_by_openfisca_path) == 1:
#            print u' / '.join(ipp_path), '->', u'.'.join(
#                unicode(fragment)
#                for fragment in starts_by_openfisca_path.keys()[0]
#                )
##        if len(starts_by_openfisca_path) > 1:
##            print u' / '.join(ipp_path), '->', u'.'.join(
##                unicode(fragment)
##                for fragment in starts_by_openfisca_path.keys()[0]
##                )

    openfisca_path_by_ipp_tree = collections.OrderedDict()
    for ipp_path, starts_by_openfisca_path in sorted(starts_by_openfisca_path_by_ipp_path.iteritems()):
        openfisca_path_by_ipp_sub_tree = openfisca_path_by_ipp_tree
        for ipp_name in ipp_path[:-1]:
            openfisca_path_by_ipp_sub_tree = openfisca_path_by_ipp_sub_tree.setdefault(ipp_name,
                collections.OrderedDict())
        ipp_name = ipp_path[-1]
        openfisca_path_by_ipp_sub_tree[ipp_name] = [
            u'.'.join(
                unicode(fragment)
                for fragment in openfisca_name
                )
            for openfisca_name in sorted(starts_by_openfisca_path)
            ]
    with open(args.target, 'w') as target_file:
        yaml.dump(openfisca_path_by_ipp_tree, target_file, allow_unicode = True, default_flow_style = False, indent = 2,
            width = 120)

    return 0

Example 119

Project: pywebsocket Source File: echo_client.py
def main():
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout)

    parser = OptionParser()
    # We accept --command_line_flag style flags which is the same as Google
    # gflags in addition to common --command-line-flag style flags.
    parser.add_option('-s', '--server-host', '--server_host',
                      dest='server_host', type='string',
                      default='localhost', help='server host')
    parser.add_option('-p', '--server-port', '--server_port',
                      dest='server_port', type='int',
                      default=_UNDEFINED_PORT, help='server port')
    parser.add_option('-o', '--origin', dest='origin', type='string',
                      default=None, help='origin')
    parser.add_option('-r', '--resource', dest='resource', type='string',
                      default='/echo', help='resource path')
    parser.add_option('-m', '--message', dest='message', type='string',
                      help=('comma-separated messages to send. '
                           '%s will force close the connection from server.' %
                            _GOODBYE_MESSAGE))
    parser.add_option('-q', '--quiet', dest='verbose', action='store_false',
                      default=True, help='suppress messages')
    parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
                      default=False, help='use TLS (wss://). By default, '
                      'it looks for ssl and pyOpenSSL module and uses found '
                      'one. Use --tls-module option to specify which module '
                      'to use')
    parser.add_option('--tls-module', '--tls_module', dest='tls_module',
                      type='choice',
                      choices=[_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
                      help='Use ssl module if "%s" is specified. '
                      'Use pyOpenSSL module if "%s" is specified' %
                      (_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
    parser.add_option('--tls-version', '--tls_version',
                      dest='tls_version',
                      type='string', default=_TLS_VERSION_SSL23,
                      help='TLS/SSL version to use. One of \'' +
                      _TLS_VERSION_SSL23 + '\' (SSL version 2 or 3), \'' +
                      _TLS_VERSION_SSL3 + '\' (SSL version 3), \'' +
                      _TLS_VERSION_TLS1 + '\' (TLS version 1)')
    parser.add_option('--disable-tls-compression', '--disable_tls_compression',
                      dest='disable_tls_compression',
                      action='store_true', default=False,
                      help='Disable TLS compression. Available only when '
                      'pyOpenSSL module is used.')
    parser.add_option('-k', '--socket-timeout', '--socket_timeout',
                      dest='socket_timeout', type='int', default=_TIMEOUT_SEC,
                      help='Timeout(sec) for sockets')
    parser.add_option('--draft75', dest='draft75',
                      action='store_true', default=False,
                      help='Obsolete option. Don\'t use this.')
    parser.add_option('--protocol-version', '--protocol_version',
                      dest='protocol_version',
                      type='string', default=_PROTOCOL_VERSION_HYBI13,
                      help='WebSocket protocol version to use. One of \'' +
                      _PROTOCOL_VERSION_HYBI13 + '\', \'' +
                      _PROTOCOL_VERSION_HYBI08 + '\', \'' +
                      _PROTOCOL_VERSION_HYBI00 + '\'')
    parser.add_option('--version-header', '--version_header',
                      dest='version_header',
                      type='int', default=-1,
                      help='Specify Sec-WebSocket-Version header value')
    parser.add_option('--deflate-frame', '--deflate_frame',
                      dest='deflate_frame',
                      action='store_true', default=False,
                      help='Use the deflate-frame extension.')
    parser.add_option('--use-permessage-deflate', '--use_permessage_deflate',
                      dest='use_permessage_deflate',
                      action='store_true', default=False,
                      help='Use the permessage-deflate extension.')
    parser.add_option('--log-level', '--log_level', type='choice',
                      dest='log_level', default='warn',
                      choices=['debug', 'info', 'warn', 'error', 'critical'],
                      help='Log level.')

    (options, unused_args) = parser.parse_args()

    logging.basicConfig(level=logging.getLevelName(options.log_level.upper()))

    if options.draft75:
        logging.critical('--draft75 option is obsolete.')
        sys.exit(1)

    if options.protocol_version == _PROTOCOL_VERSION_HIXIE75:
        logging.critical(
            'Value %s is obsolete for --protocol_version options' %
            _PROTOCOL_VERSION_HIXIE75)
        sys.exit(1)

    if options.use_tls:
        if options.tls_module is None:
            if _import_ssl():
                options.tls_module = _TLS_BY_STANDARD_MODULE
                logging.debug('Using ssl module')
            elif _import_pyopenssl():
                options.tls_module = _TLS_BY_PYOPENSSL
                logging.debug('Using pyOpenSSL module')
            else:
                logging.critical(
                        'TLS support requires ssl or pyOpenSSL module.')
                sys.exit(1)
        elif options.tls_module == _TLS_BY_STANDARD_MODULE:
            if not _import_ssl():
                logging.critical('ssl module is not available')
                sys.exit(1)
        elif options.tls_module == _TLS_BY_PYOPENSSL:
            if not _import_pyopenssl():
                logging.critical('pyOpenSSL module is not available')
                sys.exit(1)
        else:
            logging.critical('Invalid --tls-module option: %r',
                             options.tls_module)
            sys.exit(1)

        if (options.disable_tls_compression and
            options.tls_module != _TLS_BY_PYOPENSSL):
            logging.critical('You can disable TLS compression only when '
                             'pyOpenSSL module is used.')
            sys.exit(1)
    else:
        if options.tls_module is not None:
            logging.critical('Use --tls-module option only together with '
                             '--use-tls option.')
            sys.exit(1)

        if options.disable_tls_compression:
            logging.critical('Use --disable-tls-compression only together '
                             'with --use-tls option.')
            sys.exit(1)

    # Default port number depends on whether TLS is used.
    if options.server_port == _UNDEFINED_PORT:
        if options.use_tls:
            options.server_port = common.DEFAULT_WEB_SOCKET_SECURE_PORT
        else:
            options.server_port = common.DEFAULT_WEB_SOCKET_PORT

    # optparse doesn't seem to handle non-ascii default values.
    # Set default message here.
    if not options.message:
        options.message = u'Hello,\u65e5\u672c'   # "Japan" in Japanese

    EchoClient(options).run()

Example 120

Project: release-tools Source File: dashboard.py
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--project-list',
        default=governance.PROJECTS_LIST,
        help='a URL pointing to a projects.yaml file, defaults to %(default)s',
    )
    parser.add_argument(
        '--releases-repo',
        default=os.path.expanduser('~/repos/openstack/releases'),
        help='path to local copy of the releases repository',
    )
    parser.add_argument(
        '--format', '-f',
        choices=['csv', 'etherpad'],
        default='csv',
    )
    parser.add_argument(
        'series',
        help='the series name',
    )
    args = parser.parse_args()

    # Load all of the existing deliverable data and determine the most
    # recent version tagged.
    latest_versions = {}
    release_notes = {}
    pat = os.path.join(
        args.releases_repo,
        'deliverables',
        args.series,
        '*.yaml',
    )
    for fn in glob.glob(pat):
        with open(fn, 'r') as f:
            y = yaml.safe_load(f.read())
        deliverable = os.path.basename(fn)[:-5]
        v = y['releases'][-1]['version']
        latest_versions[deliverable] = v
        release_notes[deliverable] = y.get('release-notes')

    team_data = governance.get_team_data()
    teams = {
        n.lower(): governance.Team(n, i)
        for n, i in team_data.items()
    }

    # Organize deliverables by their release model, whether they are
    # managed, and the team that owns them.
    deliverables_by_model = {
        MILESTONE: {},
        INTERMEDIARY: {},
        TRAILING: {},
    }
    for t in teams.values():
        for dn, di in t.deliverables.items():
            for model in deliverables_by_model.keys():
                if model in di.tags:
                    dbm_team = deliverables_by_model[model].setdefault(
                        di.team.name.lower(), [])
                    dbm_team.append(di)
                    break

    # Dump the dashboard data
    if args.format == 'csv':
        writer = csv.writer(sys.stdout)
        writer.writerow(
            ('Release Model',
             'Team',
             'Deliverable Type',
             'Deliverable Name',
             'Pre-RC1',
             'RC1',
             'Branched at',
             'Latest RC',
             'Release Notes',
             'Comments',
             'PTL Nick',
             'PTL Email',
             'IRC Channel')
        )
        for model in [MILESTONE, INTERMEDIARY, TRAILING]:
            short_model = model.rpartition('-')[-1]
            dbm_teams = sorted(deliverables_by_model[model].items())
            for team_name, team_deliverables in dbm_teams:
                team = teams[team_name]
                for d in sorted(team_deliverables, key=lambda d: d.name):
                    writer.writerow(
                        (short_model,
                         team.name.lower(),
                         d.type,
                         d.name,
                         latest_versions.get(d.name, 'not found'),
                         '',  # RC1
                         '',  # Branched at
                         '',  # Latest RC
                         release_notes.get(d.name, ''),  # Release notes
                         '',  # Comments
                         team.data['ptl']['irc'],
                         team.data['ptl']['email'],
                         team.data.get('irc-channel'))
                    )

    else:
        for model in [MILESTONE, INTERMEDIARY, TRAILING]:
            print('{}\n'.format(model))
            dbm_teams = sorted(deliverables_by_model[model].items())
            for team_name, team_deliverables in dbm_teams:
                team = teams[team_name]
                print('  * {}'.format(team_name))
                print('    * PTL: {} - {}'.format(
                    team.data['ptl']['irc'],
                    team.data['ptl']['email'],
                ))
                print('    * IRC: {}'.format(team.data.get('irc-channel', '')))
                print('    * Deliverables')
                for d in sorted(team_deliverables, key=lambda d: d.name):
                    v = latest_versions.get(d.name, 'not found')
                    print('      * {d.name} ({d.type}) [{v}]'.format(d=d, v=v))
                print()

Example 121

Project: pycopia Source File: test.py
    def test_XHTML(self):
        """Construct an XHTML page. Verify visually."""
        htd = XHTML.new_docuement(dtds.XHTML)
        htd.title = "This is the title."
        htd.add_header(1, 'Main docuement & "stuff"')
        htd.new_para("This is a test. This is text.")
        htd.add_unordered_list(["List line one.", "list line two."])
        BR = htd.get_new_element("Br")
        A = htd.get_new_element("A", href="somelink.html")
        A.add_text("some link")
        p = htd.get_para()
        p.append(A)
        p.add_text(" This is ")
        b = p.bold("bold")
        p.add_text(" text. using ")
        stb = htd.get_new_element("B")
        stb.add_text("bold tags")
        p.text(stb)
        p.add_text(" Dynamic Date: ")
        p.append(XHTML.DynamicNode(thedate))
        rp = str(p)
        htd.append(POM.ASIS(rp))
        # table methods
        t = htd.add_table(border=1)
        t.summary = "This is a test table."
        t.caption("table caption")
        h = t.set_heading(2, "heading col 2")
        h.set_attribute("class", "headerclass")
        t.set_heading(1, "heading col 1")
        t.set_cell(1,1,"row 1, col 1")
        t.set_cell(1,2,"row 2, col 1")
        t.set_cell(2,1,"row 1, col 2")
        t.set_cell(2,2,"row 2, col 2")
        # sections
        div = htd.get_section("section1")
        div.add_header(1, "Div heading.")
        div.new_para("First div para.")
        htd.append(div)
        div2 = div.get_section("section2")
        div2.new_para("Second div para")
        div.append(div2)

        dl = div.add_definition_list()
        dl.add_definitions({"def1":"The definition of 1", 
                        "def2": "The definition of 2"})

        # using the nodemaker object
        NM = htd.nodemaker
        ul = NM("Ul", None, 
                NM("Li", None, "line 1"), 
                NM("Li", None, "line 2"),
                NM("Li", None, "Date: ", NM("code", None, thedate)), # another way to add dynamic node
                )
        htd.append(ul)
        htd.append(NM("JS", None, 'var a = new Array(8);'))
        # using the creator object.
        creator = htd.creator
        parts = creator([("Just", "just/"), "How will this turn out?", ["It is hard to tell.", "Well, not too hard."]])

        htd.add_comment("the name attribute is required for all but submit & reset")
        htd.append(parts)
        f = htd.add_form(action="http://localhost:4001/cgi-bin/testing.py", method="post")

        f.add_textarea("mytextarea", """Default text in the textarea.""") ; f.append(BR)
        f.add_input(type="text", name="mytext", value="mytext text") ; f.append(BR)
        f.add_input(type="button", name="button1", src="button.png", value="Button") ; f.append(BR)
        f.add_input(type="submit", name="submit1", src="submit.png", value="Ok") ; f.append(BR)
        f.add_radiobuttons("radlist", ["one", "two", "three", "four"], vertical=False) ; f.append(BR)
        f.add_checkboxes("checks", ["one", "two", "three", "four"], vertical=True) ; f.append(BR)
        f.add_fileinput(name="myfile", default="/etc/hosts") ; f.append(BR)
        f.add_textinput(name="mytext", label="Enter text") ; f.append(BR)
        f.yes_no("What's it gonna be?")
        f.add_select(["one", "two", ("three", True), "four", 
                       {"agroup": ["group1", "group2"]}], 
                       name="myselect") ; f.append(BR)

        f.add_select({"Group1": Enums("g1one", "g1two", "g1three")+[("g1four", True)],
                      "Group2": Enums("g2one", "g2two", "g2three"),
                      "Group3": Enums("g3one", "g3two", "g3three"),
                    }, name="useenums") ; f.append(BR)

        f.add_select(["mone", "mtwo", ("mthree", True), ("mfour", True)], name="multiselect", multiple=True) ; f.append(BR)

        set = f.add_fieldset("afieldset")
        set.add_textinput(name="settext", label="Enter set text")
        set.add_textinput(name="settext2", label="Enter set text 2", default="Default text.")
        set.append(BR)
        tbl = htd.new_table([1,2,3,4,5], 
                            [NULL, NULL, NULL], 
                            ["col1", "col2", "col3"], width="100%", summary="autogenerated")

        gentable = table.GenericTable(["heading1", "heading2", "heading3"], 
                    title="Sample generic table")
        gentable.append([1,2,3])
        gentable.append([4,5,6])
        tbl2 = htd.new_table_from_GenericTable(gentable)
        # object 
        subdoc = XHTML.new_docuement(dtds.XHTML)
        parts = subdoc.creator(("Add a docuement object.", ["Some data.", "some more data.."]))
        subdoc.append(parts)
        sdfo = open("/tmp/subdoc.html", "w")
        subdoc.emit(sdfo)
        sdfo.close()
        htd.add_object(data="subdoc.html", type=subdoc.MIMETYPE,
                                    width="400px", height="600px")
        htd.emit(sys.stdout)
        print "-----"
        fo = open(XHTMLFILENAME, "w")
        bw = POM.BeautifulWriter(fo, XHTML.INLINE)
        htd.emit(bw)
        fo.close()
        print "----- Form values:"
        print f.fetch_form_values()
        print "----- Form elements:"
        felems = f.fetch_form_elements()
        for name, elemlist in felems.items():
            print repr(name), ": ", repr(elemlist)
            print
        # visually verify the page.
        webbrowser.open("file://%s" % (XHTMLFILENAME,))

Example 122

Project: opendrift Source File: opendrift_gui.py
    def __init__(self):
        tk.Tk.__init__(self)
        self.title('OpenDrift')
        o = OpenOil()
        try:
            img = ImageTk.PhotoImage(Image.open(o.test_data_folder() +
                                     '../../docs/opendrift_logo.png'))
            panel = tk.Label(self.master, image=img)
            panel.image = img
            panel.grid(row=0, column=0)
        except:
            pass # Could not display logo

        self.top = tk.Frame(self.master,
                            relief=tk.FLAT, pady=25, padx=25)
        self.top.grid(row=0, column=1, rowspan=1)

        tk.Label(self.top, text='Simulation type').grid(row=0, column=0)
        self.model = tk.StringVar()
        models = ['OpenOil', 'Leeway']
        self.model.set(models[0])
        self.modeldrop = tk.OptionMenu(self.top, self.model,
                                       *(models), command=self.set_model)
        self.modeldrop.grid(row=0, column=1)

        self.categoryLabel = tk.Label(self.master, text='Oil type')
        self.categoryLabel.grid(row=1, column=0)
        oljetyper = o.oiltypes
        self.oljetype = tk.StringVar()
        self.oljetype.set(oljetyper[0])
        self.categorydrop = tk.OptionMenu(self.master,
                                          self.oljetype, *oljetyper)
        self.categorydrop.grid(row=1, column=1)

        ##########
        # Release
        ##########
        self.start_t = tk.Frame(self.master, relief=tk.FLAT)
        self.start_t.grid(row=2, column=0, rowspan=1)
        startlabel = tk.Label(self.start_t, text="\n\nStart release\n\n")
        startlabel.grid(row=0, column=0)
        self.start = tk.Frame(self.master, bg='lightgray', bd=2,
                              relief=tk.SUNKEN, pady=5, padx=5)
        self.start.grid(row=2, column=1, rowspan=1)

        tk.Label(self.start, text='Longitude').grid(row=0, column=0)
        tk.Label(self.start, text='Latitude').grid(row=0, column=1)
        tk.Label(self.start, text='Radius [m]').grid(row=0, column=2)
        self.lon = tk.Entry(self.start, width=6, justify=tk.RIGHT)
        self.lat = tk.Entry(self.start, width=6, justify=tk.RIGHT)
        self.radius = tk.Entry(self.start, width=6, justify=tk.RIGHT)
        self.lon.grid(row=1, column=0)
        self.lon.insert(0, '4.5')
        self.lat.grid(row=1, column=1)
        self.lat.insert(0, '60.0')
        self.radius.grid(row=1, column=2)
        self.radius.insert(0, '5000')
        ##########
        # Time
        ##########
        now = datetime.now()
        tk.Label(self.start, text='Day').grid(row=2, column=0)
        tk.Label(self.start, text='Month').grid(row=2, column=1)
        tk.Label(self.start, text='Year').grid(row=2, column=2)
        tk.Label(self.start, text='Hour').grid(row=2, column=3)
        tk.Label(self.start, text='Minutes [UTC]').grid(row=2, column=4)
        self.datevar = tk.StringVar()
        self.dates = range(1, 32)
        self.datevar.set(now.day)
        self.date = tk.OptionMenu(self.start, self.datevar, *self.dates)
        self.date.grid(row=3, column=0)

        self.monthvar = tk.StringVar()
        self.months = ['January', 'February', 'March', 'April', 'May',
                       'June', 'July', 'August', 'September', 'October',
                       'November', 'December']
        self.monthvar.set(self.months[now.month-1])
        self.month = tk.OptionMenu(self.start, self.monthvar,
                                   *self.months)
        self.month.grid(row=3, column=1)

        self.yearvar = tk.StringVar()
        self.years = range(2015, 2017)
        self.yearvar.set(now.year)
        self.year = tk.OptionMenu(self.start, self.yearvar, *self.years)
        self.year.grid(row=3, column=2)

        self.hourvar = tk.StringVar()
        self.hours = range(0, 24)
        self.hourvar.set(now.hour)
        self.hour = tk.OptionMenu(self.start, self.hourvar, *self.hours)
        self.hour.grid(row=3, column=3)

        self.minutevar = tk.StringVar()
        self.minutes = range(0, 60, 5)
        self.minutevar.set(now.minute)
        self.minute = tk.OptionMenu(self.start, self.minutevar,
                                    *self.minutes)
        self.minute.grid(row=3, column=4)

        ###############
        # Release End
        ###############
        self.end_t = tk.Frame(self.master, relief=tk.FLAT)
        endlabel = tk.Label(self.end_t, text="\n\nEnd release\n\n")
        endlabel.grid(row=0, column=0)
        self.end_t.grid(row=3, column=0, rowspan=1)

        self.end = tk.Frame(self.master, bg='gray', bd=2,
                            relief=tk.SUNKEN, padx=5, pady=5)
        self.end.grid(row=3, column=1)

        tk.Label(self.end, text='Longitude').grid(row=0, column=0)
        tk.Label(self.end, text='Latitude').grid(row=0, column=1)
        tk.Label(self.end, text='Radius [m]').grid(row=0, column=2)
        self.elon = tk.Entry(self.end, width=6, justify=tk.RIGHT)
        self.elat = tk.Entry(self.end, width=6, justify=tk.RIGHT)
        self.eradius = tk.Entry(self.end, width=6, justify=tk.RIGHT)
        self.elon.grid(row=1, column=0)
        self.elon.insert(0, '4.5')
        self.elat.grid(row=1, column=1)
        self.elat.insert(0, '60.0')
        self.eradius.grid(row=1, column=2)
        self.eradius.insert(0, '0')
        ##########
        # Time
        ##########
        now = datetime.now()
        tk.Label(self.end, text='Day').grid(row=2, column=0)
        tk.Label(self.end, text='Month').grid(row=2, column=1)
        tk.Label(self.end, text='Year').grid(row=2, column=2)
        tk.Label(self.end, text='Hour').grid(row=2, column=3)
        tk.Label(self.end, text='Minutes [UTC]').grid(row=2, column=4)
        self.edatevar = tk.StringVar()
        self.edates = range(1, 32)
        self.edatevar.set(now.day)
        self.edate = tk.OptionMenu(self.end, self.edatevar, *self.edates)
        self.edate.grid(row=3, column=0)

        self.emonthvar = tk.StringVar()
        self.emonthvar.set(self.months[now.month-1])
        self.emonth = tk.OptionMenu(self.end, self.emonthvar,
                                    *self.months)
        self.emonth.grid(row=3, column=1)

        self.eyearvar = tk.StringVar()
        self.eyears = range(2015, 2017)
        self.eyearvar.set(now.year)
        self.eyear = tk.OptionMenu(self.end, self.eyearvar, *self.eyears)
        self.eyear.grid(row=3, column=2)

        self.ehourvar = tk.StringVar()
        self.ehours = range(0, 24)
        self.ehourvar.set(now.hour)
        self.ehour = tk.OptionMenu(self.end, self.ehourvar, *self.ehours)
        self.ehour.grid(row=3, column=3)

        self.eminutevar = tk.StringVar()
        self.eminutes = range(0, 60, 5)
        self.eminutevar.set(now.minute)
        self.eminute = tk.OptionMenu(self.end, self.eminutevar,
                                     *self.eminutes)
        self.eminute.grid(row=3, column=4)

        #######################
        # Simulation duration
        #######################
        self.duration = tk.Frame(self.master, bd=2,
                                 relief=tk.FLAT, padx=5, pady=15)
        self.duration.grid(row=4, column=1)
        tk.Label(self.duration, text='Run simulation ').grid(row=4, column=0)
        self.durationhours = tk.Entry(self.duration, width=3,
                                      justify=tk.RIGHT)
        self.durationhours.grid(row=4, column=1)
        self.durationhours.insert(0, 50)
        tk.Label(self.duration, text=' hours ').grid(row=4, column=2)

        self.directionvar = tk.StringVar()
        self.directionvar.set('forwards')
        self.direction = tk.OptionMenu(self.duration, self.directionvar,
                                       'forwards', 'backwards')
        self.direction.grid(row=4, column=3)
        tk.Label(self.duration, text=' in time ').grid(row=4, column=4)

        ##############
        # Output box
        ##############
        self.text = tk.Text(self, wrap="word")
        self.text.grid(row=5, columnspan=5, sticky='nsew')
        self.text.tag_configure("stderr", foreground="#b22222")
        sys.stdout = TextRedirector(self.text, "stdout")
        sys.stderr = TextRedirector(self.text, "stderr")
        s = tk.Scrollbar(self)
        s.grid(row=5, column=5, sticky='ns')
        s.config(command=self.text.yview)
        self.text.config(yscrollcommand=s.set)

        ##############
        # Driver data
        ##############
        o = OpenOil()
        self.current = reader_netCDF_CF_generic.Reader('http://thredds.met.no/thredds/dodsC/sea/norkyst800m/1h/aggregate_be')
        #    o.test_data_folder() +
        #    '16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
        self.wind = reader_netCDF_CF_generic.Reader('http://thredds.met.no/thredds/dodsC/arome25/arome_metcoop_default2_5km_latest.nc')
        #self.wind = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
        #    '16Nov2015_NorKyst_z_surface/arome_subset_16Nov2015.nc')

        print '#'*41
        print 'Current data coverage:'
        print str(self.current.start_time) + ' - ' + \
            str(self.current.end_time)
        print '#'*41
        print 'Wind data coverage:'
        print str(self.wind.start_time) + ' - ' + \
            str(self.wind.end_time)
        print '#'*41
        self.start_time = self.current.start_time

        ##########
        # RUN
        ##########
        tk.Button(self.master, text='PEIS PAO', bg='green',
                  command=self.run_opendrift).grid(row=7, column=1,
                                              sticky=tk.W, pady=4)

Example 123

Project: f5vpn-login Source File: f5vpn-login.py
def main(argv):
    global proxy_addr, verbosity
    global ssl_client_certfile, ssl_client_keyfile

    if '--help' in argv:
        usage(argv[0], sys.stdout)
        sys.exit(0)

    if sys.version_info < (2,3,5):
        sys.stderr.write("Python 2.3.5 or later is required.\n")
        sys.exit(1)

    if os.geteuid() != 0:
        sys.stderr.write("ERROR: \n")
        sys.stderr.write(
"  This script must be run as root. Preferably setuid (via companion .c\n"
"  program), but it'll work when invoked as root directly, too.\n")
        sys.exit(1)

    # Set effective uid to userid; will become root as necessary
    os.seteuid(os.getuid())
    user = getpass.getuser()

    try:
        opts,args=getopt.getopt(argv[1:], "", ['verbose', 'http-proxy=', 'socks5-proxy=', 'dont-check-certificates', 'client-certificate=', 'client-certificate-key='])
    except getopt.GetoptError, e:
        sys.stderr.write("Unknown option: %s\n" % e.opt)
        usage(argv[0], sys.stderr)
        sys.exit(1)

    if len(args) > 1:
        usage(argv[0], sys.stderr)
        sys.exit(1)

    prefs = get_prefs()
    old_session = None
    userhost = None
    if prefs is not None:
        path, userhost, old_session = prefs.split('\0')

    if len(args) > 0:
        if args[0] != userhost:
            # Don't attempt to reuse session if switching users or servers.
            old_session = None
        userhost = args[0]

    if userhost is None:
        sys.stderr.write("The host argument must be provided the first time.\n")
        sys.exit(1)

    if '@' in userhost:
        user,host = userhost.rsplit('@', 1)
    else:
        host = userhost

    verbosity = 0
    check_certificates = True

    for opt,val in opts:
        if opt == '--verbose':
            verbosity += 1
        elif opt == '--http-proxy':
            proxy_addr = ('http',) + parse_hostport(val)
            sys.stderr.write("Using proxy: %r\n" % (proxy_addr,))
        elif opt == '--socks5-proxy':
            if socks is None:
                sys.stderr.write("Cannot use a socks5 proxy: you do not seem to have the socks module available.\n")
                sys.stderr.write("Please install SocksiPy: http://socksipy.sourceforge.net/\n")
                sys.exit(1)
            proxy_addr = ('socks5',) + parse_hostport(val)
            sys.stderr.write("Using proxy: %r\n" % (proxy_addr,))
        elif opt == '--dont-check-certificates':
            check_certificates = False
        elif opt == '--client-certificate':
            ssl_client_certfile = val
        elif opt == '--client-certificate-key':
            ssl_client_keyfile = val
    if check_certificates:
        # Updates global ssl_cert_path
        find_certificates_file()

    params = None

    if old_session:
        print "Trying old session..."
        menu_number = get_vpn_menu_number(host, old_session)
        if menu_number is not None:
            params = get_VPN_params(host, old_session, menu_number)
            session = old_session

    if params is None:
        client_data = get_vpn_client_data(host)
        # Loop keep asking for passwords while the site gives a new prompt
        while True:
            password = getpass.getpass("password for %s@%s? " % (user, host))
            session = do_login(client_data, host, user, password)
            if session is not None:
                print "Session id gotten:", session
                break

        print "Getting params..."
        menu_number = get_vpn_menu_number(host, session)
        if menu_number is None:
            sys.stderr.write("Unable to find the 'Network Access' entry in main menu. Do you have VPN access?\n")
            sys.exit(1)

        params = get_VPN_params(host, session, menu_number)

    if params is None:
        print "Couldn't get embed info. Sorry."
        sys.exit(2)

    write_prefs('\0'.join(['', userhost, session]))

    if verbosity:
        sys.stderr.write("VPN Parameter dump:\n")
        for k,v in params.iteritems():
            sys.stderr.write("   %r: %r\n" % (k,v))

    print "Got plugin params, execing vpn client"

    execPPPd(params)

Example 124

Project: mysql-postgresql-converter Source File: db_converter.py
def parse(input_filename, output_filename):
    "Feed it a file, and it'll output a fixed one"

    # State storage
    if input_filename == "-":
        num_lines = -1
    else:
        num_lines = int(subprocess.check_output(["wc", "-l", input_filename]).strip().split()[0])
    tables = {}
    current_table = None
    creation_lines = []
    enum_types = []
    foreign_key_lines = []
    fulltext_key_lines = []
    sequence_lines = []
    cast_lines = []
    num_inserts = 0
    started = time.time()

    # Open output file and write header. Logging file handle will be stdout
    # unless we're writing output to stdout, in which case NO PROGRESS FOR YOU.
    if output_filename == "-":
        output = sys.stdout
        logging = open(os.devnull, "w")
    else:
        output = open(output_filename, "w")
        logging = sys.stdout

    if input_filename == "-":
        input_fh = sys.stdin
    else:
        input_fh = open(input_filename)


    output.write("-- Converted by db_converter\n")
    output.write("START TRANSACTION;\n")
    output.write("SET standard_conforming_strings=off;\n")
    output.write("SET escape_string_warning=off;\n")
    output.write("SET CONSTRAINTS ALL DEFERRED;\n\n")

    for i, line in enumerate(input_fh):
        time_taken = time.time() - started
        percentage_done = (i+1) / float(num_lines)
        secs_left = (time_taken / percentage_done) - time_taken
        logging.write("\rLine %i (of %s: %.2f%%) [%s tables] [%s inserts] [ETA: %i min %i sec]" % (
            i + 1,
            num_lines,
            ((i+1)/float(num_lines))*100,
            len(tables),
            num_inserts,
            secs_left // 60,
            secs_left % 60,
        ))
        logging.flush()
        line = line.decode("utf8").strip().replace(r"\\", "WUBWUBREALSLASHWUB").replace(r"\'", "''").replace("WUBWUBREALSLASHWUB", r"\\")
        # Ignore comment lines
        if line.startswith("--") or line.startswith("/*") or line.startswith("LOCK TABLES") or line.startswith("DROP TABLE") or line.startswith("UNLOCK TABLES") or not line:
            continue

        # Outside of anything handling
        if current_table is None:
            # Start of a table creation statement?
            if line.startswith("CREATE TABLE"):
                current_table = line.split('"')[1]
                tables[current_table] = {"columns": []}
                creation_lines = []
            # Inserting data into a table?
            elif line.startswith("INSERT INTO"):
                output.write(line.encode("utf8").replace("'0000-00-00 00:00:00'", "NULL") + "\n")
                num_inserts += 1
            # ???
            else:
                print "\n ! Unknown line in main body: %s" % line

        # Inside-create-statement handling
        else:
            # Is it a column?
            if line.startswith('"'):
                useless, name, definition = line.strip(",").split('"',2)
                try:
                    type, extra = definition.strip().split(" ", 1)

                    # This must be a tricky enum
                    if ')' in extra:
                        type, extra = definition.strip().split(")")

                except ValueError:
                    type = definition.strip()
                    extra = ""
                extra = re.sub("CHARACTER SET [\w\d]+\s*", "", extra.replace("unsigned", ""))
                extra = re.sub("COLLATE [\w\d]+\s*", "", extra.replace("unsigned", ""))

                # See if it needs type conversion
                final_type = None
                set_sequence = None
                if type.startswith("tinyint("):
                    type = "int4"
                    set_sequence = True
                    final_type = "boolean"
                elif type.startswith("int("):
                    type = "integer"
                    set_sequence = True
                elif type.startswith("bigint("):
                    type = "bigint"
                    set_sequence = True
                elif type == "longtext":
                    type = "text"
                elif type == "mediumtext":
                    type = "text"
                elif type == "tinytext":
                    type = "text"
                elif type.startswith("varchar("):
                    size = int(type.split("(")[1].rstrip(")"))
                    type = "varchar(%s)" % (size * 2)
                elif type.startswith("smallint("):
                    type = "int2"
                    set_sequence = True
                elif type == "datetime":
                    type = "timestamp with time zone"
                elif type == "double":
                    type = "double precision"
                elif type.endswith("blob"):
                    type = "bytea"
                elif type.startswith("enum(") or type.startswith("set("):

                    types_str = type.split("(")[1].rstrip(")").rstrip('"')
                    types_arr = [type_str.strip('\'') for type_str in types_str.split(",")]

                    # Considered using values to make a name, but its dodgy
                    # enum_name = '_'.join(types_arr)
                    enum_name = "{0}_{1}".format(current_table, name)

                    if enum_name not in enum_types:
                        output.write("CREATE TYPE {0} AS ENUM ({1}); \n".format(enum_name, types_str));
                        enum_types.append(enum_name)

                    type = enum_name

                if final_type:
                    cast_lines.append("ALTER TABLE \"%s\" ALTER COLUMN \"%s\" DROP DEFAULT, ALTER COLUMN \"%s\" TYPE %s USING CAST(\"%s\" as %s)" % (current_table, name, name, final_type, name, final_type))
                # ID fields need sequences [if they are integers?]
                if name == "id" and set_sequence is True:
                    sequence_lines.append("CREATE SEQUENCE %s_id_seq" % (current_table))
                    sequence_lines.append("SELECT setval('%s_id_seq', max(id)) FROM %s" % (current_table, current_table))
                    sequence_lines.append("ALTER TABLE \"%s\" ALTER COLUMN \"id\" SET DEFAULT nextval('%s_id_seq')" % (current_table, current_table))
                # Record it
                creation_lines.append('"%s" %s %s' % (name, type, extra))
                tables[current_table]['columns'].append((name, type, extra))
            # Is it a constraint or something?
            elif line.startswith("PRIMARY KEY"):
                creation_lines.append(line.rstrip(","))
            elif line.startswith("CONSTRAINT"):
                foreign_key_lines.append("ALTER TABLE \"%s\" ADD CONSTRAINT %s DEFERRABLE INITIALLY DEFERRED" % (current_table, line.split("CONSTRAINT")[1].strip().rstrip(",")))
                foreign_key_lines.append("CREATE INDEX ON \"%s\" %s" % (current_table, line.split("FOREIGN KEY")[1].split("REFERENCES")[0].strip().rstrip(",")))
            elif line.startswith("UNIQUE KEY"):
                creation_lines.append("UNIQUE (%s)" % line.split("(")[1].split(")")[0])
            elif line.startswith("FULLTEXT KEY"):

                fulltext_keys = " || ' ' || ".join( line.split('(')[-1].split(')')[0].replace('"', '').split(',') )
                fulltext_key_lines.append("CREATE INDEX ON %s USING gin(to_tsvector('english', %s))" % (current_table, fulltext_keys))

            elif line.startswith("KEY"):
                pass
            # Is it the end of the table?
            elif line == ");":
                output.write("CREATE TABLE \"%s\" (\n" % current_table)
                for i, line in enumerate(creation_lines):
                    output.write("    %s%s\n" % (line, "," if i != (len(creation_lines) - 1) else ""))
                output.write(');\n\n')
                current_table = None
            # ???
            else:
                print "\n ! Unknown line inside table creation: %s" % line


    # Finish file
    output.write("\n-- Post-data save --\n")
    output.write("COMMIT;\n")
    output.write("START TRANSACTION;\n")

    # Write typecasts out
    output.write("\n-- Typecasts --\n")
    for line in cast_lines:
        output.write("%s;\n" % line)

    # Write FK constraints out
    output.write("\n-- Foreign keys --\n")
    for line in foreign_key_lines:
        output.write("%s;\n" % line)

    # Write sequences out
    output.write("\n-- Sequences --\n")
    for line in sequence_lines:
        output.write("%s;\n" % line)

    # Write full-text indexkeyses out
    output.write("\n-- Full Text keys --\n")
    for line in fulltext_key_lines:
        output.write("%s;\n" % line)

    # Finish file
    output.write("\n")
    output.write("COMMIT;\n")
    print ""

Example 125

Project: ilastik-0.5 Source File: classificationMgr.py
Function: run
    def run(self):
        self.ilastik.activeImageLock.acquire()
        F, L = self.classificationMgr.getTrainingMatrix()
        self.ilastik.activeImageLock.release()
        self.dataPending.set()
        while not self.stopped:
            self.dataPending.wait()
            self.dataPending.clear()
            if not self.stopped: #no needed, but speeds up the final thread.join()
                features = None
                self.ilastik.activeImageLock.acquire()
                self.ilastik.project.dataMgr.featureLock.acquire()
                try:
                    activeImageNumber = self.ilastik._activeImageNumber
                    activeImage = self.ilastik._activeImage
                    newLabels = self.ilastik.labelWidget.getPendingLabels()
                    
                    self.classificationMgr.updateTrainingMatrixForImage(newLabels,  activeImage)
                    features,labels = self.classificationMgr.getTrainingMatrix()
                    #if len(newLabels) > 0 or len(self.classifiers) == 0:
                    if features is not None:
                        print "retraining..."
                        interactiveMessagePrint("1>> Pop training Data")
                        if features.shape[0] == labels.shape[0]:
                            #self.classifiers = deque()
                            jobs = []
                            for i in range(self.numberOfClassifiers):
                                job = jobMachine.IlastikJob(ClassifierInteractiveThread.trainClassifier, [self, features, labels, i, self.numberOfClassifiers])
                                jobs.append(job)
                            self.jobMachine.process(jobs)                        
                        else:
                            print "##################### shape mismatch #####################"
                    
                    vs = self.ilastik.labelWidget.getVisibleState()
                    features = self.classificationMgr.getFeatureSlicesForViewStateForImage(vs, activeImage)
                    vs.append(activeImage)
    
                    interactiveMessagePrint("1>> Pop _prediction Data")
                    if len(self.classifiers) > 0:
                        #make a little test _prediction to get the shape and see if it works:
                        tempPred = None
                        if features is not None:
                            tfm = features[0][0,:]
                            tfm.shape = (1,) + tfm.shape 
                            tempPred = self.classifiers[0].predict(tfm)
                                            
                        if tempPred is not None:
                            self._prediction = []
                            jobs= []
                            self.count = 0
                            for i in range(len(features)):
                                self._prediction.append(numpy.zeros((features[i].shape[0],) + (tempPred.shape[1],) , 'float32'))
                                for j in range(0,features[i].shape[0],128**2):
                                    for k in range(len(self.classifiers)):
                                        end = min(j+128**2,features[i].shape[0])
                                        job = jobMachine.IlastikJob(ClassifierInteractiveThread.classifierPredict, [self, i, j, end, k, features])
                                        jobs.append(job)
                                
                            self.jobMachine.process(jobs)

                            shape = activeImage.shape

                            tp = []
                            tp.append(self._prediction[0].reshape((shape[2],shape[3],self._prediction[0].shape[-1])))
                            tp.append(self._prediction[1].reshape((shape[1],shape[3],self._prediction[1].shape[-1])))
                            tp.append(self._prediction[2].reshape((shape[1],shape[2],self._prediction[2].shape[-1])))

                            descriptions =  self.ilastik.project.dataMgr.module["Classification"]["labelDescriptions"]
                            all =  range(len(descriptions))
                            not_predicted = numpy.setdiff1d(all, self.classifiers[0].unique_vals - 1)

                            uncertaintyData = activeImage.overlayMgr["Classification/Uncertainty"]._data

                            #Axis 0
                            tpc = tp[0]
                            ba = DM.BlockAccessor2D(tpc[:,:,:])
                            for i in range(ba._blockCount):
                                b = ba.getBlockBounds(i,0)
                                lb = tpc[b[0]:b[1],b[2]:b[3],:]
                                margin = activeLearning.computeEnsembleMargin2D(lb)
                                
                                uncertaintyData[vs[0], vs[1], b[0]:b[1],b[2]:b[3],0] = margin
#                                seg = segmentationMgr.LocallyDominantSegmentation2D(lb, 1.0)
#                                self.ilastik.project.dataMgr[vs[-1]]._dataVol.segmentation[vs[0], vs[1], b[0]:b[1],b[2]:b[3]] = seg

                                for p_i, p_num in enumerate(self.classifiers[0].unique_vals):
                                    predictionData = activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num-1].name]._data
                                    predictionData[vs[0],vs[1],b[0]:b[1],b[2]:b[3],0] = tpc[b[0]:b[1],b[2]:b[3],p_i]

                                for p_i, p_num in enumerate(not_predicted):
                                    predictionData = activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num].name]._data
                                    predictionData[vs[0],vs[1],b[0]:b[1],b[2]:b[3],0] = 0

                            #Axis 1
                            tpc = tp[1]
                            ba = DM.BlockAccessor2D(tpc[:,:,:])
                            for i in range(ba._blockCount):
                                b = ba.getBlockBounds(i,0)
                                lb = tpc[b[0]:b[1],b[2]:b[3],:]
                                margin = activeLearning.computeEnsembleMargin2D(lb)
                                uncertaintyData[vs[0], b[0]:b[1],vs[2],b[2]:b[3],0] = margin

                                for p_i, p_num in enumerate(self.classifiers[0].unique_vals):
                                    predictionData = activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num-1].name]._data
                                    predictionData[vs[0],b[0]:b[1],vs[2],b[2]:b[3],0] = tpc[b[0]:b[1],b[2]:b[3],p_i]

                                for p_i, p_num in enumerate(not_predicted):
                                    predictionData = activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num].name]._data
                                    predictionData[vs[0],b[0]:b[1],vs[2],b[2]:b[3],0] = 0


                            #Axis 2
                            tpc = tp[2]
                            ba = DM.BlockAccessor2D(tpc[:,:,:])
                            for i in range(ba._blockCount):
                                b = ba.getBlockBounds(i,0)
                                lb = tpc[b[0]:b[1],b[2]:b[3],:]
                                margin = activeLearning.computeEnsembleMargin2D(lb)
                                uncertaintyData[vs[0], b[0]:b[1],b[2]:b[3], vs[3],0] = margin

                                for p_i, p_num in enumerate(self.classifiers[0].unique_vals):
                                    predictionData = activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num-1].name]._data
                                    predictionData[vs[0],b[0]:b[1],b[2]:b[3],vs[3],0] = tpc[b[0]:b[1],b[2]:b[3],p_i]

                                for p_i, p_num in enumerate(not_predicted):
                                    predictionData = activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num].name]._data
                                    predictionData[vs[0],b[0]:b[1],b[2]:b[3],vs[3],0] = 0


                        else:
                            print "##################### _prediction None #########################"
                    else:
                        print "##################### No Classifiers ############################"
                    if have_qt:
                        self.emit(QtCore.SIGNAL("resultsPending()"))
                    else:
                        raise "Need to add code to signal results pending without Qt"
                    self.ilastik.project.dataMgr.featureLock.release()
                    self.ilastik.activeImageLock.release()                     
                except Exception, e:
                    print "########################## exception in Interactivethread ###################"
                    print e
                    traceback.print_exc(file=sys.stdout)

                    self.ilastik.activeImageLock.release() 
                    self.ilastik.project.dataMgr.featureLock.release()

Example 126

Project: wsgidav Source File: wsgidav_app.py
Function: call
    def __call__(self, environ, start_response):

        # util.log("SCRIPT_NAME='%s', PATH_INFO='%s'" % (
        #    environ.get("SCRIPT_NAME"), environ.get("PATH_INFO")))

        # We optionall unquote PATH_INFO here, although this should already be
        # done by the server (#8).
        path = environ["PATH_INFO"]
        if self.config.get("unquote_path_info", False):
            path = compat.unquote(environ["PATH_INFO"])
        # GC issue 22: Pylons sends root as u'/'
        # if isinstance(path, unicode):
        if not compat.is_native(path):
            util.log("Got non-native PATH_INFO: %r" % path)
            # path = path.encode("utf8")
            path = compat.to_native(path)

        # Always adding these values to environ:
        environ["wsgidav.config"] = self.config
        environ["wsgidav.provider"] = None
        environ["wsgidav.verbose"] = self._verbose

        # Find DAV provider that matches the share

        # sorting share list by reverse length
#        shareList = self.providerMap.keys()
#        shareList.sort(key=len, reverse=True)
        shareList = sorted(self.providerMap.keys(), key=len, reverse=True)

        share = None
        for r in shareList:
            # @@: Case sensitivity should be an option of some sort here;
            # os.path.normpath might give the preferred case for a filename.
            if r == "/":
                share = r
                break
            elif path.upper() == r.upper() or path.upper().startswith(r.upper() + "/"):
                share = r
                break

        # Note: we call the next app, even if provider is None, because OPTIONS
        #       must still be handled.
        #       All other requests will result in '404 Not Found'
        if share is not None:
            share_data = self.providerMap.get(share)
            environ["wsgidav.provider"] = share_data['provider']
        # TODO: test with multi-level realms: 'aa/bb'
        # TODO: test security: url contains '..'

        # Transform SCRIPT_NAME and PATH_INFO
        # (Since path and share are unquoted, this also fixes quoted values.)
        if share == "/" or not share:
            environ["PATH_INFO"] = path
        else:
            environ["SCRIPT_NAME"] += share
            environ["PATH_INFO"] = path[len(share):]
#        util.log("--> SCRIPT_NAME='%s', PATH_INFO='%s'" % (environ.get("SCRIPT_NAME"), environ.get("PATH_INFO")))

        # assert isinstance(path, str)
        assert compat.is_native(path)
        # See http://mail.python.org/pipermail/web-sig/2007-January/002475.html
        # for some clarification about SCRIPT_NAME/PATH_INFO format
        # SCRIPT_NAME starts with '/' or is empty
        assert environ["SCRIPT_NAME"] == "" or environ[
            "SCRIPT_NAME"].startswith("/")
        # SCRIPT_NAME must not have a trailing '/'
        assert environ["SCRIPT_NAME"] in (
            "", "/") or not environ["SCRIPT_NAME"].endswith("/")
        # PATH_INFO starts with '/'
        assert environ["PATH_INFO"] == "" or environ[
            "PATH_INFO"].startswith("/")

        start_time = time.time()

        def _start_response_wrapper(status, response_headers, exc_info=None):
            # Postprocess response headers
            headerDict = {}
            for header, value in response_headers:
                if header.lower() in headerDict:
                    util.warn("Duplicate header in response: %s" % header)
                headerDict[header.lower()] = value

            # Check if we should close the connection after this request.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.4
            forceCloseConnection = False
            currentContentLength = headerDict.get("content-length")
            statusCode = int(status.split(" ", 1)[0])
            contentLengthRequired = (environ["REQUEST_METHOD"] != "HEAD"
                                     and statusCode >= 200
                                     and not statusCode in (204, 304))
#            print(environ["REQUEST_METHOD"], statusCode, contentLengthRequired)
            if contentLengthRequired and currentContentLength in (None, ""):
                # A typical case: a GET request on a virtual resource, for which
                # the provider doesn't know the length
                util.warn(
                    "Missing required Content-Length header in %s-response: closing connection" %
                    statusCode)
                forceCloseConnection = True
            elif not type(currentContentLength) is str:
                util.warn("Invalid Content-Length header in response (%r): closing connection" %
                          headerDict.get("content-length"))
                forceCloseConnection = True

            # HOTFIX for Vista and Windows 7 (GC issue 13, issue 23)
            # It seems that we must read *all* of the request body, otherwise
            # clients may miss the response.
            # For example Vista MiniRedir didn't understand a 401 response,
            # when trying an anonymous PUT of big files. As a consequence, it
            # doesn't retry with credentials and the file copy fails.
            # (XP is fine however).
            util.readAndDiscardInput(environ)

            # Make sure the socket is not reused, unless we are 100% sure all
            # current input was consumed
            if(util.getContentLength(environ) != 0
               and not environ.get("wsgidav.all_input_read")):
                util.warn(
                    "Input stream not completely consumed: closing connection")
                forceCloseConnection = True

            if forceCloseConnection and headerDict.get("connection") != "close":
                util.warn("Adding 'Connection: close' header")
                response_headers.append(("Connection", "close"))

            # Log request
            if self._verbose >= 1:
                userInfo = environ.get("http_authenticator.username")
                if not userInfo:
                    userInfo = "(anonymous)"
                threadInfo = ""
                if self._verbose >= 1:
                    threadInfo = "<%s> " % threading.currentThread().ident
                extra = []
                if "HTTP_DESTINATION" in environ:
                    extra.append('dest="%s"' % environ.get("HTTP_DESTINATION"))
                if environ.get("CONTENT_LENGTH", "") != "":
                    extra.append("length=%s" % environ.get("CONTENT_LENGTH"))
                if "HTTP_DEPTH" in environ:
                    extra.append("depth=%s" % environ.get("HTTP_DEPTH"))
                if "HTTP_RANGE" in environ:
                    extra.append("range=%s" % environ.get("HTTP_RANGE"))
                if "HTTP_OVERWRITE" in environ:
                    extra.append("overwrite=%s" %
                                 environ.get("HTTP_OVERWRITE"))
                if self._verbose >= 1 and "HTTP_EXPECT" in environ:
                    extra.append('expect="%s"' % environ.get("HTTP_EXPECT"))
                if self._verbose >= 2 and "HTTP_CONNECTION" in environ:
                    extra.append('connection="%s"' %
                                 environ.get("HTTP_CONNECTION"))
                if self._verbose >= 2 and "HTTP_USER_AGENT" in environ:
                    extra.append('agent="%s"' % environ.get("HTTP_USER_AGENT"))
                if self._verbose >= 2 and "HTTP_TRANSFER_ENCODING" in environ:
                    extra.append('transfer-enc=%s' %
                                 environ.get("HTTP_TRANSFER_ENCODING"))
                if self._verbose >= 1:
                    extra.append('elap=%.3fsec' % (time.time() - start_time))
                extra = ", ".join(extra)

#               This is the CherryPy format:
#                127.0.0.1 - - [08/Jul/2009:17:25:23] "GET /loginPrompt?redirect=/renderActionList%3Frelation%3Dpersonal%26key%3D%26filter%3DprivateSchedule&reason=0 HTTP/1.1" 200 1944 "http://127.0.0.1:8002/command?id=CMD_Schedule" "Mozilla/5.0 (Windows; U; Windows NT 6.0; de; rv:1.9.1) Gecko/20090624 Firefox/3.5"
#                print >>sys.stderr, '%s - %s - [%s] "%s" %s -> %s' % (
                print('%s - %s - [%s] "%s" %s -> %s' % (
                    threadInfo + environ.get("REMOTE_ADDR", ""),
                    userInfo,
                    util.getLogTime(),
                    environ.get("REQUEST_METHOD") + " " +
                    environ.get("PATH_INFO", ""),
                    extra,
                    status,
                    # response_headers.get(""), # response Content-Length
                    # referer
                ), file=sys.stdout)

            return start_response(status, response_headers, exc_info)

        # Call next middleware
        app_iter = self._application(environ, _start_response_wrapper)
        for v in app_iter:
            yield v
        if hasattr(app_iter, "close"):
            app_iter.close()

        return

Example 127

Project: cloudstack-cloudmonkey Source File: cloudmonkey.py
    def print_result(self, result, result_filter=[]):
        if not result or len(result) == 0:
            return

        filtered_result = copy.deepcopy(result)
        if result_filter and isinstance(result_filter, list) \
                and len(result_filter) > 0:
            tfilter = {}  # temp var to hold a dict of the filters
            tresult = filtered_result  # dupe the result to filter
            if result_filter:
                for res in result_filter:
                    tfilter[res] = 1
                for okey, oval in result.iteritems():
                    if isinstance(oval, dict):
                        for tkey in oval:
                            if tkey not in tfilter:
                                try:
                                    del(tresult[okey][oval][tkey])
                                except:
                                    pass
                    elif isinstance(oval, list):
                        for x in range(len(oval)):
                            if isinstance(oval[x], dict):
                                for tkey in oval[x]:
                                    if tkey not in tfilter:
                                        try:
                                            del(tresult[okey][x][tkey])
                                        except:
                                            pass
                            else:
                                try:
                                    del(tresult[okey][x])
                                except:
                                    pass
            filtered_result = tresult

        def print_result_json(result):
            self.monkeyprint(json.dumps(result,
                                        sort_keys=True,
                                        indent=2,
                                        ensure_ascii=False,
                                        separators=(',', ': ')))

        def print_result_xml(result):
            custom_root = "CloudStack-%s" % self.profile.replace(" ", "_")
            xml = dicttoxml(result, attr_type=False, custom_root=custom_root)
            self.monkeyprint(parseString(xml).toprettyxml())

        def print_result_csv(result):
            if "count" in result:
                result.pop("count")

            if len(result.keys()) == 1:
                item = result[result.keys()[0]]
                if isinstance(item, list):
                    result = item
                elif isinstance(item, dict):
                    result = [item]

            if isinstance(result, list) and len(result) > 0:
                if isinstance(result[0], dict):
                    keys = result[0].keys()
                    writer = csv.DictWriter(sys.stdout, keys)
                    print ','.join(keys)
                    for item in result:
                        row = {}
                        for k in keys:
                            if k not in item:
                                row[k] = None
                            else:
                                if type(item[k]) is unicode:
                                    row[k] = item[k].encode('utf8')
                                else:
                                    row[k] = item[k]
                        writer.writerow(row)
            elif isinstance(result, dict):
                keys = result.keys()
                writer = csv.DictWriter(sys.stdout, keys)
                print ','.join(keys)
                writer.writerow(result)

        def print_result_tabular(result):
            def print_table(printer, toprow):
                if printer:
                    self.monkeyprint(printer.get_string())
                return PrettyTable(toprow)
            printer = None
            toprow = []
            if not result:
                return
            toprow = set(reduce(lambda x, y: x + y, map(lambda x: x.keys(),
                         filter(lambda x: isinstance(x, dict), result))))
            printer = print_table(printer, toprow)
            for node in result:
                if not node:
                    continue
                for key in toprow:
                    if key not in node:
                        node[key] = ''
                row = map(lambda x: node[x], toprow)
                if printer and row:
                    printer.add_row(row)
            print_table(printer, toprow)

        def print_result_as_dict(result):
            for key in sorted(result.keys(), key=lambda x:
                              x not in ['id', 'count', 'name'] and x):
                if isinstance(result[key], list):
                    self.monkeyprint(key + ":")
                    print_result_as_list(result[key])
                elif isinstance(result[key], dict):
                    self.monkeyprint(key + ":")
                    print_result_as_dict(result[key])
                else:
                    value = unicode(result[key])
                    self.monkeyprint(key, " = ", value)

        def print_result_as_list(result):
            for idx, node in enumerate(result):
                if isinstance(node, dict):
                    if self.display == 'table':
                        print_result_tabular(result)
                        break
                    print_result_as_dict(node)
                elif isinstance(node, list):
                    print_result_as_list(node)
                else:
                    self.monkeyprint(filtered_result)
                if result and node and (idx+1) < len(result):
                    self.monkeyprint(self.ruler * 80)

        if self.display == "json":
            print_result_json(filtered_result)
            return

        if self.display == "xml":
            print_result_xml(filtered_result)
            return

        if self.display == "csv":
            print_result_csv(filtered_result)
            return

        if isinstance(filtered_result, dict):
            print_result_as_dict(filtered_result)
        elif isinstance(filtered_result, list):
            print_result_as_list(filtered_result)
        else:
            self.monkeyprint(filtered_result)

Example 128

Project: PyDA Source File: maininterface.py
    def initUI(self):
        self.locationStack = list()

        self.app.title("PyDA")

        # Set up the Menu Bar
        self.menu_bar = MenuBar(self.app)
        self.menu_bar.addMenu('File')
        self.menu_bar.addMenuItem('File', 'Disassemble File', self.onDisassembleFile)
        self.menu_bar.addMenuItem('File', 'Load PyDA Save', self.onLoad)
        self.menu_bar.addMenuItem('File', 'Save', self.onSave)
        self.menu_bar.addMenuSeparator('File')
        self.menu_bar.addMenuItem('File', 'Exit', self.onExit)

        # Set up the Tool Bar
        # TODO: Add images to buttons with mouseover text
        self.toolbar = ToolBar(self.app, 'top')
        self.toolbar.addButton('Back', self.onBack, 'left')
        self.toolbar.addVertSeperator('left')
        self.toolbar.addButton('Disassemble File', self.onDisassembleFile, 'left')
        self.toolbar.addButton('Load', self.onLoad, 'left')
        self.toolbar.addButton('Save', self.onSave, 'left')
        self.toolbar.addVertSeperator('left')
        self.toolbar.addButton('Share', self.onShare, 'left')

        # Set up the status bar ##
        self.status_bar = ToolBar(self.app, 'bottom', relief='sunken', borderwidth=2)
        self.status_bar.addLabel('Status:', 'left')
        self.status_label = self.status_bar.addLabel('Ready', 'left')
        self.progress_bar = self.status_bar.addProgressBar('right', length=200, mode='indeterminate')

        # Set up the vertical paned window
        self.tl_v_window = PanedWindow(self.app, 'top', borderwidth=1,
                relief="sunken", sashwidth=4, orient="vertical")

        # Set up the horizontal paned window and add to the vertical window
        self.tl_h_window = self.tl_v_window.addElement(
                PanedWindow(self.tl_v_window, borderwidth=1,
                    relief="sunken", sashwidth=4))

        # Set up the two notebooks
        self.left_notebook = self.tl_h_window.addNotebook()
        self.right_notebook = self.tl_h_window.addNotebook()
        self.bottom_notebook = self.tl_v_window.addNotebook()

        # Set up the functions listbox
        self.functions_listbox = self.left_notebook.addListboxWithScrollbar(
                'Functions', background='white', borderwidth=1,
                highlightthickness=1, relief='sunken')

        self.functions_listbox.bind('<Double-Button-1>', self.onFunctionDoubleClick)

        # Set up the strings listbox
        self.strings_listbox = self.left_notebook.addListboxWithScrollbar(
                'Strings', background='white', borderwidth=1,
                highlightthickness=1, relief='sunken')

        self.strings_listbox.bind('<Double-Button-1>', self.onStringDoubleClick)

        # Set up the disassembly textbox
        self.disassembly_textbox = self.right_notebook.addTextboxWithScrollbar(
                'Disassembly', tcl_buffer_size=self.TEXTBOX_BUFFER_SIZE,
                tcl_buffer_low_cutoff=self.TEXTBOX_BUFFER_LOW_CUTOFF,
                tcl_buffer_high_cutoff=self.TEXTBOX_BUFFER_HIGH_CUTOFF,
                tcl_moveto_yview=self.TEXTBOX_MOVETO_YVIEW,
                max_lines_jump=self.TEXTBOX_MAX_LINES_JUMP,
                background="white", borderwidth=1, highlightthickness=1, relief='sunken')

        # Set up the data section textbox
        # self.data_sections_textbox = self.right_notebook.addTextboxWithScrollbar(
        #         'Data Sections', background="white", borderwidth=1,
        #         highlightthickness=1, relief='sunken')
        self.data_sections_textbox = self.right_notebook.addTextboxWithScrollbar(
                'Data Sections', tcl_buffer_size=self.TEXTBOX_BUFFER_SIZE,
                tcl_buffer_low_cutoff=self.TEXTBOX_BUFFER_LOW_CUTOFF,
                tcl_buffer_high_cutoff=self.TEXTBOX_BUFFER_HIGH_CUTOFF,
                tcl_moveto_yview=self.TEXTBOX_MOVETO_YVIEW,
                max_lines_jump=self.TEXTBOX_MAX_LINES_JUMP,
                background="white", borderwidth=1, highlightthickness=1, relief='sunken')

        # Set up the output window
        debug_frame = self.bottom_notebook.addFrame('Debug')
        debug_frame_2 = debug_frame.addFrame('bottom', 'x', False, borderwidth=1)
        debug_frame_1 = debug_frame.addFrame('top', 'both', True, borderwidth=1)
        self.debug_textbox = debug_frame_1.addTextboxWithScrollbar(
                background='white', borderwidth=1, highlightthickness=1,
                relief='sunken')
        self.debug_entry = debug_frame_2.addEntryWithLabel(
                'Command:', 'bottom', 'x', True, background='white',
                borderwidth=1, highlightthickness=1, relief='sunken')

        self.debug_data_model = TextModel()
        self.debug_textbox.setDataModel(self.debug_data_model)

        # Set up the chat window
        chat_frame = self.bottom_notebook.addFrame('Chat')
        chat_frame_2 = chat_frame.addFrame('bottom', 'x', False, borderwidth=1)
        chat_frame_1 = chat_frame.addFrame('top', 'both', True, borderwidth=1)
        self.chat_textbox = chat_frame_1.addTextboxWithScrollbar(
                background='white', borderwidth=1, highlightthickness=1,
                relief='sunken')
        self.chat_entry = chat_frame_2.addEntryWithLabel(
                'Send:', 'bottom', 'x', True, background='white',
                borderwidth=1, highlightthickness=1, relief='sunken')

        self.chat_data_model = TextModel()
        self.chat_textbox.setDataModel(self.chat_data_model)

        # Set up the context menus
        self.section_context_menu = ContextMenu([('Copy', self.onCopyString)])
        self.address_context_menu = ContextMenu([('Copy String', self.onCopyString), ('Copy Value', self.onCopyValue)])
        self.disass_comment_context_menu = ContextMenu([(';  Comment', self.disassComment)])
        self.data_comment_context_menu = ContextMenu([(';  Comment', self.dataComment)])
        self.disass_label_context_menu   = ContextMenu([('n  Rename Label', self.disassRenameLabel)])
        self.data_label_context_menu   = ContextMenu([('n  Rename Label', self.dataRenameLabel)])

        # Force the mouse to always have focus
        self.tk_focusFollowsMouse()

        # Get the appropriate button number based on system
        right_click_button = "<Button-2>" if system() == "Darwin" else "<Button-3>"

        dis_textbox_context_queue = self.app.createCallbackQueue()
        # Create a context manager for the disassembly textbox
        self.disassembly_textbox_context_manager = WidgetContextManager(
                self.app, dis_textbox_context_queue, self.disassembly_textbox, self.PYDA_SEP,
                self.PYDA_BEGL, right_click_button, [
                    (self.PYDA_SECTION, 'darkgreen', self.section_context_menu),
                    (self.PYDA_ADDRESS, 'black', self.address_context_menu),
                    (self.PYDA_MNEMONIC, 'blue', None),
                    (self.PYDA_OP_STR, 'darkblue', None),
                    (self.PYDA_COMMENT, 'darkgreen', self.disass_comment_context_menu),
                    (self.PYDA_LABEL, 'saddle brown', self.disass_label_context_menu),
                    (self.PYDA_BYTES, 'dark gray', None),
                    (self.PYDA_GENERIC, 'black', None),
                    (self.PYDA_ENDL, 'black', self.disass_comment_context_menu)], )

        self.disassembly_textbox.context_manager = self.disassembly_textbox_context_manager

        self.disassembly_textbox.bind('<Key>', self.keyHandler)

        data_textbox_context_queue = self.app.createCallbackQueue()
        # Create a context manager for the data sections textbox
        self.data_textbox_context_manager = WidgetContextManager(
                self.app, data_textbox_context_queue, self.data_sections_textbox, self.PYDA_SEP,
                self.PYDA_BEGL, right_click_button, [
                    (self.PYDA_SECTION, 'darkgreen', None),
                    (self.PYDA_MNEMONIC, 'blue', None),
                    (self.PYDA_OP_STR, 'darkblue', None),
                    (self.PYDA_COMMENT, 'darkgreen', self.data_comment_context_menu),
                    (self.PYDA_LABEL, 'saddle brown', self.data_label_context_menu),
                    (self.PYDA_BYTES, 'dark gray', None),
                    (self.PYDA_GENERIC, 'black', None),
                    (self.PYDA_ENDL, 'black', self.data_comment_context_menu)])

        self.data_sections_textbox.context_manager = self.data_textbox_context_manager

        self.data_sections_textbox.bind('<Key>', self.keyHandler)

        # Redirect stdout to the debug window
        if self.REDIR_STDOUT:
            sys.stdout = StdoutRedirector(self.stdoutMessage)
            print "Stdout is being redirected to here"

Example 129

Project: configman Source File: for_modules.py
    @staticmethod
    def write(source_mapping, output_stream=sys.stdout):
        """This method writes a Python module respresenting all the keys
        and values known to configman.
        """
        # a set of classes, modules and/or functions that are values in
        # configman options.  These values will have to be imported in the
        # module that this method is writing.
        set_of_classes_needing_imports = set()
        # once symbols are imported, they are in the namespace of the module,
        # but that's not where we want them.  We only want them to be values
        # in configman Options.  This set will be used to make a list of
        # these symbols, to forewarn a future configman that reads this
        # module, that it can ignore these symbols. This will prevent that
        # future configman from issuing a "mismatced symbols" warinng.
        symbols_to_ignore = set()

        # look ahead to see what sort of imports we're going to have to do
        for key in source_mapping.keys_breadth_first():
            value = source_mapping[key]

            if isinstance(value, Aggregation):
                # Aggregations don't get included, skip on
                continue

            if '.' in key:
                # this indicates that there are things in nested namespaces,
                # we will use the DotDict class to represent namespaces
                set_of_classes_needing_imports.add(DotDict)

            option = None
            if isinstance(value, Option):
                # it's the value inside the option, not the option itself
                # that is of interest to us
                option = value
                value = option.value

            if value is None:
                # we don't need in import anything having to do with None
                continue

            if isclass(value) or ismodule(value) or isfunction(value):
                # we know we need to import any of these types
                set_of_classes_needing_imports.add(value)
            else:
                try:
                    # perhaps the value is an instance of a class?  If so,
                    # we'll likely need to import that class, but only if
                    # we don't have a way to convert a string to that class
                    set_of_classes_needing_imports.add(value.__class__)
                except AttributeError:
                    # it's not a class instance, we can skip on
                    pass

        # for everyone of the imports that we're going to have to create
        # we need to know the dotted module pathname and the name of the
        # of the class/module/function.  This routine make a list of 3-tuples
        # class, dotted_module_path, class_name
        class_and_module_path_and_class_name = []
        for a_class in set_of_classes_needing_imports:
            module_path, class_name = get_import_for_type(a_class)
            if (not module_path) and (not class_name):
                continue
            class_and_module_path_and_class_name.append(
                (a_class, module_path, class_name)
            )

        # using the collection of 3-tuples, create a lookup mapping where a
        # class is the key to a 2-tuple of the dotted_module_path & class_name.
        # This is also the appropriate time to detect any name collisions
        # and create a mapping of aliases, so we can resolve name collisions.
        class_name_by_module_path_list = defaultdict(list)
        alias_by_class = {}
        previously_used_names = set()
        for (
            a_class,
            a_module_path,
            class_name
        ) in class_and_module_path_and_class_name:
            if class_name:
                if class_name in previously_used_names:
                    new_class_name_alias = "%s_%s" % (
                        a_module_path.replace('.', '_'),
                        class_name
                    )
                    alias_by_class[a_class] = new_class_name_alias
                    previously_used_names.add(new_class_name_alias)
                else:
                    previously_used_names.add(class_name)
                class_name_by_module_path_list[a_module_path].append(
                    (a_class, class_name)
                )

        # start writing the output module
        print("# generated Python configman file\n", file=output_stream)

        # the first section that we're going to write is imports of the form:
        #     from X import Y
        # and
        #     from X import (
        #         A,
        #         B,
        #     )
        sorted_list = [x.value for x in sorted([OrderableObj(x) for x in
                       class_name_by_module_path_list.keys()])]
        for a_module_path in sorted_list:
            print(a_module_path)
            # if there is no module path, then it is something that we don't
            # need to import.  If the module path begins with underscore then
            # it is private and we ought not step into that mire.  If that
            # causes the output module to fail, it is up to the implementer
            # of the configman option to have created an approprate
            # "from_string" & "to_string" configman Option function references.
            if a_module_path is None or a_module_path.startswith('_'):
                continue
            list_of_class_names = \
                class_name_by_module_path_list[a_module_path]
            if len(list_of_class_names) > 1:
                output_line = "from %s import (\n" % a_module_path
                sorted_list = [x.value for x in sorted([OrderableTuple(x)
                               for x in list_of_class_names])]
                for a_class, a_class_name in sorted_list:
                    if a_class in alias_by_class:
                        output_line = "%s\n    %s as %s," % (
                            output_line,
                            a_class_name,
                            alias_by_class[a_class]
                        )
                        symbols_to_ignore.add(alias_by_class[a_class])
                    else:
                        output_line = "%s    %s,\n" % (
                            output_line,
                            a_class_name
                        )
                        symbols_to_ignore.add(a_class_name)

                output_line = output_line + ')'
                print(output_line.strip(), file=output_stream)
            else:
                a_class, a_class_name = list_of_class_names[0]
                output_line = "from %s import %s" % (
                    a_module_path,
                    a_class_name
                )
                if a_class in alias_by_class:
                    output_line = "%s as %s" % (
                        output_line,
                        alias_by_class[a_class]
                    )
                    symbols_to_ignore.add(alias_by_class[a_class])
                else:
                    symbols_to_ignore.add(a_class_name)
                print(output_line.strip(), file=output_stream)
        print('', file=output_stream)

        # The next section to write will be the imports of the form:
        #     import X
        sorted_list = [x.value for x in sorted([OrderableObj(x) for x in
                       class_name_by_module_path_list.keys()])]
        for a_module_path in sorted_list:
            list_of_class_names = \
                class_name_by_module_path_list[a_module_path]
            a_class, a_class_name = list_of_class_names[0]
            if a_module_path:
                continue
            import_str = ("import %s" % a_class_name).strip()
            symbols_to_ignore.add(a_class_name)
            print(import_str, file=output_stream)

        # See the explanation of 'symbols_to_ignore' above
        if symbols_to_ignore:
            print(
                "\n" \
                "# the following symbols will be ignored by configman when\n" \
                "# this module is used as a value source.  This will\n" \
                "# suppress the mismatch warning since these symbols are\n" \
                "# values for options, not option names themselves.",
                file=output_stream
            )
            print("ignore_symbol_list = [", file=output_stream)
            for a_symbol in sorted(symbols_to_ignore):
                print('    "%s",' % a_symbol, file=output_stream)
            print(']\n', file=output_stream)

        # finally, as the last step, we need to write out the keys and values
        # will be used by a future configman as Options and values.
        sorted_keys = sorted(
            source_mapping.keys_breadth_first(include_dicts=True)
        )
        for key in sorted_keys:
            value = source_mapping[key]
            if isinstance(value, Namespace):
                ValueSource.write_namespace(key, value, output_stream)
            elif isinstance(value, Option):
                ValueSource.write_option(
                    key,
                    value,
                    alias_by_class,
                    output_stream
                )
            elif isinstance(value, Aggregation):
                # skip Aggregations
                continue
            else:
                ValueSource.write_bare_value(key, value, output_stream)

Example 130

Project: Nagstamon Source File: IcingaWeb2.py
    def _get_status(self):
        """
            Get status from Icinga Server - only JSON
        """
        # define CGI URLs for hosts and services
        if self.cgiurl_hosts == self.cgiurl_services == None:
            # services (unknown, warning or critical?)
            self.cgiurl_services = {'hard': self.monitor_cgi_url + '/monitoring/list/services?service_state>0&service_state<=3&service_state_type=1&addColumns=service_last_check&format=json', \
                                    'soft': self.monitor_cgi_url + '/monitoring/list/services?service_state>0&service_state<=3&service_state_type=0&addColumns=service_last_check&format=json'}
            # hosts (up or down or unreachable)
            self.cgiurl_hosts = {'hard': self.monitor_cgi_url + '/monitoring/list/hosts?host_state>0&host_state<=2&host_state_type=1&addColumns=host_last_check&format=json', \
                                 'soft': self.monitor_cgi_url + '/monitoring/list/hosts?host_state>0&host_state<=2&host_state_type=0&addColumns=host_last_check&format=json'}

        # new_hosts dictionary
        self.new_hosts = dict()

        # hosts - mostly the down ones
        # now using JSON output from Icinga
        try:
            for status_type in 'hard', 'soft':   
                # first attempt
                result = self.FetchURL(self.cgiurl_hosts[status_type], giveback='raw')            
                # authentication errors get a status code 200 too back because its
                # HTML works fine :-(
                if result.status_code < 400 and\
                   result.result.startswith('<'):
                    # in case of auth error reset HTTP session and try again
                    self.reset_HTTP()
                    result = self.FetchURL(self.cgiurl_hosts[status_type], giveback='raw') 
                    # if it does not work again tell GUI there is a problem
                    if result.status_code < 400 and\
                       result.result.startswith('<'):
                        self.refresh_authentication = True
                        return Result(result=result.result,
                                      error='Authentication error',
                                      status_code=result.status_code)
                
                # purify JSON result of unnecessary control sequence \n
                jsonraw, error, status_code = copy.deepcopy(result.result.replace('\n', '')),\
                                              copy.deepcopy(result.error),\
                                              result.status_code

                if error != '' or status_code >= 400:
                    return Result(result=jsonraw,
                                  error=error,
                                  status_code=status_code)

                # check if any error occured
                self.check_for_error(jsonraw, error, status_code)

                hosts = json.loads(jsonraw)

                for host in hosts:
                    # make dict of tuples for better reading
                    h = dict(host.items())

                    # host
                    if self.use_display_name_host == False:
                        # according to http://sourceforge.net/p/nagstamon/bugs/83/ it might
                        # better be host_name instead of host_display_name
                        # legacy Icinga adjustments
                        if 'host_name' in h: host_name = h['host_name']
                        elif 'host' in h: host_name = h['host']
                    else:
                        # https://github.com/HenriWahl/Nagstamon/issues/46 on the other hand has
                        # problems with that so here we go with extra display_name option
                        host_name = h['host_display_name']

                    # host objects contain service objects
                    if not host_name in self.new_hosts:
                        self.new_hosts[host_name] = GenericHost()
                        self.new_hosts[host_name].name = host_name
                        self.new_hosts[host_name].server = self.name
                        self.new_hosts[host_name].status = self.STATES_MAPPING['hosts'][int(h['host_state'])]
                        self.new_hosts[host_name].last_check = datetime.datetime.fromtimestamp(int(h['host_last_check']))
                        self.new_hosts[host_name].attempt = h['host_attempt']
                        self.new_hosts[host_name].status_information = BeautifulSoup(h['host_output'].replace('\n', ' ').strip(), 'html.parser').text
                        self.new_hosts[host_name].passiveonly = not(int(h['host_active_checks_enabled']))
                        self.new_hosts[host_name].notifications_disabled = not(int(h['host_notifications_enabled']))
                        self.new_hosts[host_name].flapping = int(h['host_is_flapping'])
                        self.new_hosts[host_name].acknowledged = int(h['host_acknowledged'])
                        self.new_hosts[host_name].scheduled_downtime = int(h['host_in_downtime'])
                        self.new_hosts[host_name].status_type = status_type
                        
                        # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                        # acknowledge needs host_description and no display name
                        self.new_hosts[host_name].real_name = h['host_name']
       
                        # extra duration needed for calculation
                        duration = datetime.datetime.now() - datetime.datetime.fromtimestamp(int(h['host_last_state_change']))
                        self.new_hosts[host_name].duration = strfdelta(duration, '{days}d {hours}h {minutes}m {seconds}s')
                        
                    del h, host_name
        except:
            import traceback
            traceback.print_exc(file=sys.stdout)

            # set checking flag back to False
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=result, error=error)

        # services
        try:
            for status_type in 'hard', 'soft':
                result = self.FetchURL(self.cgiurl_services[status_type], giveback='raw')
                # purify JSON result of unnecessary control sequence \n
                jsonraw, error, status_code = copy.deepcopy(result.result.replace('\n', '')),\
                                              copy.deepcopy(result.error),\
                                              result.status_code

                if error != '' or status_code >= 400:
                    return Result(result=jsonraw,
                                  error=error,
                                  status_code=status_code)
                
                # check if any error occured
                self.check_for_error(jsonraw, error, status_code)

                services = copy.deepcopy(json.loads(jsonraw))

                for service in services:
                    # make dict of tuples for better reading
                    s = dict(service.items())

                    if self.use_display_name_host == False:
                        # according to http://sourceforge.net/p/nagstamon/bugs/83/ it might
                        # better be host_name instead of host_display_name
                        # legacy Icinga adjustments
                        if 'host_name' in s: host_name = s['host_name']
                        elif 'host' in s: host_name = s['host']
                    else:
                        # https://github.com/HenriWahl/Nagstamon/issues/46 on the other hand has
                        # problems with that so here we go with extra display_name option
                        host_name = s['host_display_name']

                    # host objects contain service objects
                    # ##if not self.new_hosts.has_key(host_name):
                    if not host_name in self.new_hosts:
                        self.new_hosts[host_name] = GenericHost()
                        self.new_hosts[host_name].name = host_name
                        self.new_hosts[host_name].status = 'UP'
                        # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                        # acknowledge needs host_description and no display name
                        self.new_hosts[host_name].real_name = s['host_name']

                    if self.use_display_name_host == False:
                        # legacy Icinga adjustments
                        if 'service_description' in s: service_name = s['service_description']
                        elif 'description' in s: service_name = s['description']
                        elif 'service' in s: service_name = s['service']
                    else:
                        service_name = s['service_display_name']

                    # if a service does not exist create its object
                    if not service_name in self.new_hosts[host_name].services:
                        self.new_hosts[host_name].services[service_name] = GenericService()
                        self.new_hosts[host_name].services[service_name].host = host_name
                        self.new_hosts[host_name].services[service_name].name = service_name
                        self.new_hosts[host_name].services[service_name].server = self.name
                        self.new_hosts[host_name].services[service_name].status = self.STATES_MAPPING['services'][int(s['service_state'])]
                        self.new_hosts[host_name].services[service_name].last_check = datetime.datetime.fromtimestamp(int(s['service_last_check']))                      
                        self.new_hosts[host_name].services[service_name].attempt = s['service_attempt']
                        self.new_hosts[host_name].services[service_name].status_information = BeautifulSoup(s['service_output'].replace('\n', ' ').strip(), 'html.parser').text
                        self.new_hosts[host_name].services[service_name].passiveonly = not(int(s['service_active_checks_enabled']))
                        self.new_hosts[host_name].services[service_name].notifications_disabled = not(int(s['service_notifications_enabled']))
                        self.new_hosts[host_name].services[service_name].flapping = int(s['service_is_flapping'])
                        self.new_hosts[host_name].services[service_name].acknowledged = int(s['service_acknowledged'])
                        self.new_hosts[host_name].services[service_name].scheduled_downtime = int(s['service_in_downtime'])
                        self.new_hosts[host_name].services[service_name].status_type = status_type
                        
                        # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                        # acknowledge needs service_description and no display name
                        self.new_hosts[host_name].services[service_name].real_name = s['service_description']
                        
                        # extra duration needed for calculation
                        duration = datetime.datetime.now() - datetime.datetime.fromtimestamp(int(s['service_last_state_change']))
                        self.new_hosts[host_name].services[service_name].duration = strfdelta(duration, '{days}d {hours}h {minutes}m {seconds}s')                      
                        
                    del s, host_name, service_name
        except:

            import traceback
            traceback.print_exc(file=sys.stdout)

            # set checking flag back to False
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=result, error=error)

        # some cleanup
        del jsonraw, error, hosts, services

        # dummy return in case all is OK
        return Result()

Example 131

Project: mysql-utilities Source File: serverinfo.py
def show_server_info(servers, options):
    """Show server information for a list of servers

    This method will gather information about a running server. If the
    show_defaults option is specified, the method will also read the
    configuration file and return a list of the server default settings.

    If the format option is set, the output will be in the format specified.

    If the no_headers option is set, the output will not have a header row (no
    column names) except for format = vertical.

    If the basedir and start options are set, the method will attempt to start
    the server in read only mode to get the information. Specifying only
    basedir will not start the server. The extra start option is designed to
    make sure the user wants to start the offline server. The user may not wish
    to do this if there are certain error conditions and/or logs in place that
    may be overwritten.

    servers[in]       list of server connections in the form
                      <user>:<password>@<host>:<port>:<socket>
    options[in]       dictionary of options (no_headers, format, basedir,
                      start, show_defaults)

    Returns tuple ((server information), defaults)
    """
    no_headers = options.get("no_headers", False)
    fmt = options.get("format", "grid")
    show_defaults = options.get("show_defaults", False)
    basedir = options.get("basedir", None)
    datadir = options.get("datadir", None)
    start = options.get("start", False)
    show_servers = options.get("show_servers", 0)

    if show_servers:
        if os.name == 'nt':
            ports = options.get("ports", "3306:3333")
            start_p, end_p = ports.split(":")
            _show_running_servers(start_p, end_p)
        else:
            _show_running_servers()

    ssl_dict = {}
    ssl_dict['ssl_cert'] = options.get("ssl_cert", None)
    ssl_dict['ssl_ca'] = options.get("ssl_ca", None)
    ssl_dict['ssl_key'] = options.get("ssl_key", None)
    ssl_dict['ssl'] = options.get("ssl", None)

    row_dict_lst = []
    warnings = []
    server_val = {}
    for server in servers:
        new_server = None
        try:
            test_connect(server, throw_errors=True, ssl_dict=ssl_dict)
        except UtilError as util_error:
            conn_dict = get_connection_dictionary(server, ssl_dict=ssl_dict)
            server1 = Server(options={'conn_info': conn_dict})
            server_is_off = False
            # If we got errno 2002 it means can not connect through the
            # given socket.
            if util_error.errno == CR_CONNECTION_ERROR:
                socket = conn_dict.get("unix_socket", "")
                if socket:
                    msg = ("Unable to connect to server using socket "
                           "'{0}'.".format(socket))
                    if os.path.isfile(socket):
                        err_msg = ["{0} Socket file is not valid.".format(msg)]
                    else:
                        err_msg = ["{0} Socket file does not "
                                   "exist.".format(msg)]
            # If we got errno 2003 and we do not have
            # socket, instead we check if server is localhost.
            elif (util_error.errno == CR_CONN_HOST_ERROR and
                    server1.is_alias("localhost")):
                server_is_off = True
            # If we got errno 1045 it means Access denied,
            # notify the user if a password was used or not.
            elif util_error.errno == ER_ACCESS_DENIED_ERROR:
                use_pass = 'YES' if conn_dict['passwd'] else 'NO'
                err_msg = ("Access denied for user '{0}'@'{1}' using "
                           "password: {2}".format(conn_dict['user'],
                                                  conn_dict['host'],
                                                  use_pass))
            # Use the error message from the connection attempt.
            else:
                err_msg = [util_error.errmsg]
            # To propose to start a cloned server for extract the info,
            # can not predict if the server is really off, but we can do it
            # in case of socket error, or if one of the related
            # parameter was given.
            if server_is_off or basedir or datadir or start:
                err_msg = ["Server is offline. To connect, "
                           "you must also provide "]

                opts = ["basedir", "datadir", "start"]
                for opt in tuple(opts):
                    try:
                        if locals()[opt] is not None:
                            opts.remove(opt)
                    except KeyError:
                        pass
                if opts:
                    err_msg.append(", ".join(opts[0:-1]))
                    if len(opts) > 1:
                        err_msg.append(" and the ")
                    err_msg.append(opts[-1])
                    err_msg.append(" option")
                    raise UtilError("".join(err_msg))

            if not start:
                raise UtilError("".join(err_msg))
            else:
                try:
                    server_val = parse_connection(server, None, options)
                except:
                    raise UtilError("Source connection values invalid"
                                    " or cannot be parsed.")
                new_server = _start_server(server_val, basedir,
                                           datadir, options)
        info_dict, defaults = _server_info(server, show_defaults, options)
        warnings.extend(info_dict['warnings'])
        if info_dict:
            row_dict_lst.append(info_dict)
        if new_server:
            # Need to stop the server!
            new_server.disconnect()
            _stop_server(server_val, basedir, options)

    # Get the row values stored in the dictionaries
    rows = [[row_dict[key] for key in _COLUMNS] for row_dict in row_dict_lst]

    print_list(sys.stdout, fmt, _COLUMNS, rows, no_headers)
    if warnings:
        print("\n# List of Warnings: \n")
        for warning in warnings:
            print("WARNING: {0}\n".format(warning))

    # Print the default configurations.
    if show_defaults and len(defaults) > 0:
        for row in defaults:
            print("  {0}".format(row))

Example 132

Project: dita-generator Source File: cli.py
def main():
    """Main method."""
    __topic_type = None
    __parent_topic_type = None
    __remove = {}
    __global_atts = None
    __domains = []
    
    # new arguments
    __parser = OptionParser(usage="usage: %prog [options] type topic id title [root]",
                            description="DITA Generator.")
    __parser.add_option("-d", "--domain", action="append", dest="domains",
                        help="Add domain DOMAIN. Multiple occurrances allowed.", metavar="DOMAIN")
    __parser.add_option("-v", "--version", dest="version", choices=("1.1", "1.2"),
                        help="DITA version. Defaults to 1.1.", metavar="VERSION")
    __parser.set_defaults(version="1.1")
    __parser.add_option("-o", "--owner", dest="owner",
                        help="Owner in FPI.", metavar="OWNER")
    __parser.add_option("-u", "--system-identifier", dest="system_identifier",
                        help="System identifier base URI.", metavar="SYSTEM_IDENTIFIER")
    __parser.add_option("-s", "--stylesheet", action="append",  dest="stylesheet", choices=("docbook", "eclipse.plugin", "fo", "rtf", "xhtml"),
                        help="Stylesheet skeleton. Multiple occurrances allowed.", metavar="STYLE")
    __parser.add_option("--plugin-name", dest="plugin_name",
                        help="Plugin name. Defaults to plugin ID.", metavar="PLUGIN_NAME")
    __parser.add_option("--plugin-version", dest="plugin_version",
                        help="Plugin version", metavar="PLUGIN_VERSION")
    __parser.add_option("-n", "--nested", dest="nested", action="store_true",
                        help="Support nested topics.")
    __parser.set_defaults(nested=False)
    __group = OptionGroup(__parser, "Advanced Options")
    __group.add_option("--format", dest="format", choices=("dtd", "mod", "ent", "plugin"),
                       help="Output format, one of dtd, mod, ent, zip, plugin. Defaults to plugin.", metavar="FORMAT")
    __parser.set_defaults(format="plugin")
    __parser.add_option_group(__group)
    (options, args) = __parser.parse_args()
    # read arguments
    if len(args) >= 1:
        if args[0] in ditagen.OUTPUT_MAP:
            __topic_type_class = ditagen.OUTPUT_MAP[args[0]]
        else:
            __parser.error("output type %s not found, supported types: %s."
                           % (args[0], ", ".join(ditagen.OUTPUT_MAP.keys())))
    else:
        __parser.error("output type not set")
        
    if len(args) >= 2:
        if args[1] in ditagen.TOPIC_MAP[options.version]:
            __parent_topic_type = ditagen.TOPIC_MAP[options.version][args[1]]()
        else:
            __parser.error("topic type %s not found, supported topics: %s."
                           % (args[1], ", ".join(TOPIC_MAP[options.version].keys())))
    else:
        __parser.error("topic not set")
        
    if len(args) >= 3:
        options.id = args[2]
    else:
        __parser.error("id not set")
        
    if len(args) >= 4:
        options.title = args[3]
    else:
        __parser.error("title not set")
        
    if len(args) >= 5:
        options.root = args[4]
    elif (args[0] == "specialization"):
        __parser.error("root not set")

    if options.domains != None:
        for __d in options.domains:
            if __d in ditagen.DOMAIN_MAP[options.version]:
                __domains.append(ditagen.DOMAIN_MAP[options.version][__d]())
            else:
                __parser.error("domain %s not found, supported domains: %s.".format(__d, ", ".join(ditagen.DOMAIN_MAP[options.version].keys())))
    
    #if  hasattr(options, "root") and options.root is not None:
    __topic_type = __topic_type_class(options.id, options.title, __parent_topic_type,
                           options.owner, file=options.id) #options.root
    if type(__topic_type) == ditagen.dita.SpecializationType:
        __topic_type.root = ditagen.dita.create_element(__topic_type, options.root, options.id)
    #elif options.format in ("mod", "ent", "zip"):
    #    __parser.error("cannot generate %s for base topic type.".format(options.format))

    # run generator
    if options.format == u"plugin":
        #__dita_gen = UrnDitaGenerator()
        __dita_gen = ditagen.generator.PluginGenerator()
        __dita_gen.out = sys.stdout
        __dita_gen.topic_type = __topic_type
        __dita_gen.domains = __domains
        __dita_gen.nested = options.nested
        __dita_gen.version = options.version
        #if hasattr(options, "title") and  options.title:
        #    __dita_gen.set_title(options.title)
        if options.stylesheet:
            __dita_gen.set_stylesheet(options.stylesheet)
        if options.plugin_name:
            __dita_gen.plugin_name = options.plugin_name
        if options.plugin_version:
            __dita_gen.plugin_version = options.plugin_version

        #__dita_gen.generate_public_identifier = generate_urn_identifier
        __dita_gen.generate_plugin()
    else:
        __dita_gen = ditagen.generator.DitaGenerator()
        __dita_gen.out = sys.stdout
        __dita_gen.topic_type = __topic_type
        __dita_gen.domains = __domains
        __dita_gen.nested = options.nested
        __dita_gen.version = options.version
        #if hasattr(options, "title") and  options.title:
        #    __dita_gen.set_title(options.title)
        
        if options.format == u"dtd":
            #__file_name = __dita_gen.get_file_name(__topic_type, __root, __format)
            __dita_gen.generate_dtd()
        elif options.format == u"mod":
            #__file_name = __dita_gen.get_file_name(__topic_type, __root, __format)
            __dita_gen.generate_mod()
        elif options.format == u"ent":
            #__file_name = __dita_gen.get_file_name(__topic_type, __root, __format)
            __dita_gen.generate_ent()

Example 133

Project: PySnip Source File: run.py
    def __init__(self, interface, config):
        self.config = config
        if config.get('random_rotation', False):
            self.map_rotator_type = random_choice_cycle
        else:
            self.map_rotator_type = itertools.cycle
        self.default_time_limit = config.get('default_time_limit', 20.0)
        self.default_cap_limit = config.get('cap_limit', 10.0)
        self.advance_on_win = int(config.get('advance_on_win', False))
        self.win_count = itertools.count(1)
        self.bans = NetworkDict()
        try:
            self.bans.read_list(json.load(open(os.path.join(RESOURCE_DIR,'bans.txt'), 'rb')))
        except IOError:
            pass
        self.hard_bans = set() # possible DDoS'ers are added here
        self.player_memory = deque(maxlen = 100)
        self.config = config
        if len(self.name) > MAX_SERVER_NAME_SIZE:
            print '(server name too long; it will be truncated to "%s")' % (
                self.name[:MAX_SERVER_NAME_SIZE])
        self.respawn_time = config.get('respawn_time', 8)
        self.respawn_waves = config.get('respawn_waves', False)
        game_mode = config.get('game_mode', 'ctf')
        if game_mode == 'ctf':
            self.game_mode = CTF_MODE
        elif game_mode == 'tc':
            self.game_mode = TC_MODE
        elif self.game_mode is None:
            raise NotImplementedError('invalid game mode: %s' % game_mode)
        self.game_mode_name = game_mode
        team1 = config.get('team1', {})
        team2 = config.get('team2', {})
        self.team1_name = team1.get('name', 'Blue')
        self.team2_name = team2.get('name', 'Green')
        self.team1_color = tuple(team1.get('color', (0, 0, 196)))
        self.team2_color = tuple(team2.get('color', (0, 196, 0)))
        self.friendly_fire = config.get('friendly_fire', True)
        self.friendly_fire_time = config.get('grief_friendly_fire_time', 2.0)
        self.spade_teamkills_on_grief = config.get('spade_teamkills_on_grief',
            False)
        self.fall_damage = config.get('fall_damage', True)
        self.teamswitch_interval = config.get('teamswitch_interval', 0)
        self.max_players = config.get('max_players', 20)
        self.melee_damage = config.get('melee_damage', 100)
        self.max_connections_per_ip = config.get('max_connections_per_ip', 0)
        self.passwords = config.get('passwords', {})
        self.server_prefix = encode(config.get('server_prefix', '[*]'))
        self.time_announcements = config.get('time_announcements',
            [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 60, 120, 180, 240, 300, 600,
             900, 1200, 1800, 2400, 3000])
        self.balanced_teams = config.get('balanced_teams', None)
        self.login_retries = config.get('login_retries', 1)
        
        # voting configuration
        self.default_ban_time = config.get('default_ban_duration', 24*60)
        
        self.speedhack_detect = config.get('speedhack_detect', True)
        if config.get('user_blocks_only', False):
            self.user_blocks = set()
        self.set_god_build = config.get('set_god_build', False)
        self.debug_log = config.get('debug_log', False)
        if self.debug_log:
            pyspades.debug.open_debug_log(os.path.join(RESOURCE_DIR,'debug.log'))
        ssh = config.get('ssh', {})
        if ssh.get('enabled', False):
            from ssh import RemoteConsole
            self.remote_console = RemoteConsole(self, ssh)
        irc = config.get('irc', {})
        if irc.get('enabled', False):
            from irc import IRCRelay
            self.irc_relay = IRCRelay(self, irc)
        status = config.get('status_server', {})
        if status.get('enabled', False):
            from statusserver import StatusServerFactory
            self.status_server = StatusServerFactory(self, status)
        publish = config.get('ban_publish', {})
        if publish.get('enabled', False):
            from banpublish import PublishServer
            self.ban_publish = PublishServer(self, publish)
        ban_subscribe = config.get('ban_subscribe', {})
        if ban_subscribe.get('enabled', True):
            import bansubscribe
            self.ban_manager = bansubscribe.BanManager(self, ban_subscribe)
        # logfile location in resource dir if not abs path given
        logfile = choose_path(RESOURCE_DIR,config.get('logfile', None))
        if logfile is not None and logfile.strip():
            if config.get('rotate_daily', False):
                create_filename_path(logfile)
                logging_file = DailyLogFile(logfile, '.')
            else:
                logging_file = open_create(logfile, 'a')
            log.addObserver(log.FileLogObserver(logging_file).emit)
            log.msg('pyspades server started on %s' % time.strftime('%c'))
        log.startLogging(sys.stdout) # force twisted logging
        
        self.start_time = reactor.seconds()
        self.end_calls = []
        self.console = create_console(self)

        # check for default password usage
        for group, passwords in self.passwords.iteritems():
            if group in DEFAULT_PASSWORDS:
                for password in passwords:
                    if password in DEFAULT_PASSWORDS[group]:
                        print ("WARNING: FOUND DEFAULT PASSWORD '%s'" \
                               " IN GROUP '%s'" % (password, group))
        
        for password in self.passwords.get('admin', []):
            if not password:
                self.everyone_is_admin = True
        commands.rights.update(config.get('rights', {}))
        
        port = self.port = config.get('port', 32887)
        ServerProtocol.__init__(self, port, interface)
        self.host.receiveCallback = self.receive_callback
        ret = self.set_map_rotation(config['maps'])
        if not ret:
            print 'Invalid map in map rotation (%s), exiting.' % ret.map
            raise SystemExit

        self.update_format()
        self.tip_frequency = config.get('tip_frequency', 0)
        if self.tips is not None and self.tip_frequency > 0:
            reactor.callLater(self.tip_frequency * 60, self.send_tip)

        self.master = config.get('master', True)
        self.set_master()
        
        get_external_ip(config.get('network_interface', '')).addCallback(
            self.got_external_ip)

Example 134

Project: ganga Source File: dm_util.py
Function: get_pfns
def get_pfns(lfc_host, guids, nthread=10, dummyOnly=False, debug=False):
    '''
    getting pfns and checksum type/value corresponding to the given list of files represented
    by guids.

    @param lfc_host specifies the host of the local file catalogue
    @param guids is a list of GUIDs
    @param nthread is the amount of parallel threads for querying the LFC, 10 by default
    @param dummyOnly indicates if this routine returns only the dummy registries, default is False
    @param debug indicates if debugging messages are printed

    @return a dictionary of PFNs in the following format:

        pfns = { guid_1: [replica_1_pfn, replica_2_pfn, ...],
                 guid_2: [replica_1_pfn, replica_2_pfn, ...],
                 ... }

        and a dictionary of checksum type/value in the following format:

        csum = { guid_1: {'csumtype': checksum_type, 'csumvalue': checksum_value},
                 guid_2: {'csumtype': checksum_type, 'csumvalue': checksum_value},
                 ... }

    It uses the LFC multi-thread library: lfcthr, each worker thread works
    on the query of 1000 LFC registries.

    If dummyOnly, then only the pfns doublely copied on the
    the same SE are presented (determinated by SE hostname parsed from
    the PFNs).
    '''

    print >> sys.stdout, 'resolving physical locations of replicas'

    try:
        import lfcthr
    except ImportError as exp:
        print >> sys.stderr, '%s' % str(exp)
        print >> sys.stderr, 'unable to load LFC python module. Please check LCG UI environment.'
        print >> sys.stderr, 'python path: %s' % repr(sys.path)
        return {}

    pfns = {}
    csum = {}

    # divide guids into chunks if the list is too large
    chunk_size = 1000
    num_chunks = len(guids) / chunk_size
    if len(guids) % chunk_size > 0:
        num_chunks += 1

    chunk_offsets = []
    for i in range(num_chunks):
        chunk_offsets.append(i*chunk_size)

    # backup the current LFC_HOST
    lfc_backup = None
    try:
        lfc_backup = os.environ['LFC_HOST']
    except:
        pass

    # set to use a proper LFC_HOST
    os.putenv('LFC_HOST', lfc_host)

    def _resolveDummy(_pfns):
        '''resolving the dummy PFNs based on SE hostname'''
        _pfns_dummy = {}
        for _guid in _pfns.keys():
            _replicas = _pfns[_guid]
            _replicas.sort()
            seCache  = None
            pfnCache = None
            id = -1
            for _pfn in _replicas:
                id += 1
                _se = urisplit(_pfn)[1]
                if _se != seCache:
                    seCache  = _se
                    pfnCache = _pfn
                else:
                    # keep the dummy PFN
                    if _guid not in _pfns_dummy:
                        _pfns_dummy[_guid] = [pfnCache]
                    _pfns_dummy[_guid].append(_pfn)
        return _pfns_dummy

    ## setup worker queue for LFC queries
    wq = Queue(len(chunk_offsets))
    for offset in chunk_offsets:
        wq.put(offset)

    mylock = Lock()
    def worker(id):
        # try to connect to LFC
        if lfcthr.lfc_startsess('', '') == 0:
            while not wq.empty():
                try:
                    idx_beg = wq.get(block=True, timeout=1)
                    idx_end = idx_beg + chunk_size
                    if idx_end > len(guids):
                        idx_end = len(guids)

                    result, list1 = lfcthr.lfc_getreplicas(guids[idx_beg:idx_end],"")

                    if len(list1) > 0:
                        ## fill up global pfns dictionary and global csum dictionary
                        mylock.acquire()
                        for s in list1:
                            if s != None:
                                if s.sfn:
                                    if s.guid not in pfns:
                                        pfns[s.guid] = []
                                    pfns[s.guid].append(s.sfn)
                                    csum[s.guid] = {'csumtype':'', 'csumvalue':''}
                                if s.csumtype:
                                    csum[s.guid]['csumtype'] = s.csumtype
                                if s.csumvalue:
                                    csum[s.guid]['csumvalue'] = s.csumvalue
                        mylock.release()
                except Empty:
                    pass
            # close the LFC session
            lfcthr.lfc_endsess()
        else:
            print >> sys.stderr, 'cannot connect to LFC'

    # initialize lfcthr
    lfcthr.init()

    # prepare and run the query threads
    threads = []
    for i in range(nthread):
        t = Thread(target=worker, kwargs={'id': i})
        t.setDaemon(False)
        threads.append(t)

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    if dummyOnly:
        pfns = _resolveDummy(pfns)

    # roll back to the original LFC_HOST setup in the environment
    if lfc_backup:
        os.putenv('LFC_HOST', lfc_host)

    return pfns, csum

Example 135

Project: fusioncatcher Source File: analyze_splits_sam.py
def merge_local_alignment_sam(psl_in, psl_ou, ids_ou = None, ref_ou = None, min_clip = 10, remove_extra = False):
    #
    psl = []
    fou = None
    if psl_ou == '-':
        fou = sys.stdout
    else:
        fou = open(psl_ou,'w')

    limit_psl = 10**5
    
    xi = 0

    for bucket in chunks(psl_in, min_count = 2, ids_out = ids_ou, ref_out = ref_ou, clip_size = min_clip):
    
        xi = 0

        for box in itertools.combinations(bucket,2):
            xi += 1
            
            if xi > 100000: # too many combinations
                break

            if box[0][psl_strand] == box[1][psl_strand]:

                merged = None

                temp = box[0][:]

                r1_start = int(box[0][psl_qStart])
                r2_start = int(box[1][psl_qStart])
                if r1_start > r2_start:
                    box = (box[1],box[0])

                #r1_start = int(box[0][psl_qStart])# redundant
                r1_end = int(box[0][psl_qEnd])
                #r2_start = int(box[1][psl_qStart])# redundant
                r2_end = int(box[1][psl_qEnd])

                t1_start = int(box[0][psl_tStart])
                t1_end = int(box[0][psl_tEnd])
                t2_start = int(box[1][psl_tStart])
                t2_end = int(box[1][psl_tEnd])

                if t1_start > t2_start:
                    continue

                wiggle_gap = 9
                wiggle_overlap = 17
                if r1_end + wiggle_gap > r2_start and r1_end < r2_start:
                    dif = r2_start - r1_end

                    # extend the first
                    #box[0][psl_matches] = str(int(box[0][psl_matches]))
                    #box[0][psl_misMatches] = str(int(box[0][psl_misMatches]) + dif)

                    box[0][psl_qEnd] = str(int(box[0][psl_qEnd]) + dif)
                    box[0][psl_tEnd] = str(int(box[0][psl_tEnd]) + dif)

                    t = box[0][psl_blockSizes].split(',')
                    t[-2] = str(int(t[-2]) + dif)
                    box[0][psl_blockSizes] = ','.join(t)

                    # recompute 1
                    r1_start = int(box[0][psl_qStart])
                    r1_end = int(box[0][psl_qEnd])

                    t1_start = int(box[0][psl_tStart])
                    t1_end = int(box[0][psl_tEnd])

                elif r1_end > r2_start and r1_end < r2_start + wiggle_overlap:
                    dif = r1_end - r2_start
                    
                    if r2_end - r2_start - dif > min_clip:
                        # cut the second
                        box[1][psl_matches] = str(int(box[1][psl_matches]) - dif)
                        box[1][psl_misMatches] = str(int(box[1][psl_misMatches]) + dif)

                        box[1][psl_qStart] = str(int(box[1][psl_qStart]) + dif)
                        box[1][psl_tStart] = str(int(box[1][psl_tStart]) + dif)

                        t = box[1][psl_blockSizes].split(',')
                        t[0] = str(int(t[0]) - dif)
                        box[1][psl_blockSizes] = ','.join(t)

                        t = box[1][psl_qStarts].split(',')
                        t[0] = str(int(t[0]) + dif)
                        box[1][psl_qStarts] = ','.join(t)

                        t = box[1][psl_tStarts].split(',')
                        t[0] = str(int(t[0]) + dif)
                        box[1][psl_tStarts] = ','.join(t)

                        # recompute 2
                        r2_start = int(box[1][psl_qStart])
                        r2_end = int(box[1][psl_qEnd])

                        t2_start = int(box[1][psl_tStart])
                        t2_end = int(box[1][psl_tEnd])
                    else:

                        box[0][psl_matches] = str(int(box[0][psl_matches]) - dif)
                        box[0][psl_misMatches] = str(int(box[0][psl_misMatches]) + dif)

                        box[0][psl_qEnd] = str(int(box[0][psl_qEnd]) - dif)
                        box[0][psl_tEnd] = str(int(box[0][psl_tEnd]) - dif)

                        t = box[0][psl_blockSizes].split(',')
                        t[-2] = str(int(t[-2]) - dif)
                        box[0][psl_blockSizes] = ','.join(t)

                        # recompute 1
                        r1_start = int(box[0][psl_qStart])
                        r1_end = int(box[0][psl_qEnd])

                        t1_start = int(box[0][psl_tStart])
                        t1_end = int(box[0][psl_tEnd])



                if r1_end <= r2_start and t1_end <= t2_start: #and box[0][psl_strand] == "+" :

                    temp[psl_matches] = int(box[0][psl_matches]) + int(box[1][psl_matches])
                    temp[psl_misMatches] = int(box[0][psl_misMatches]) - int(box[1][psl_matches])

                    temp[psl_qNumInsert] = int(box[0][psl_qNumInsert]) + int(box[1][psl_qNumInsert])
                    temp[psl_qBaseInsert] = int(box[0][psl_qBaseInsert]) + int(box[1][psl_qBaseInsert])
                    temp[psl_tNumInsert] = int(box[0][psl_tNumInsert]) + int(box[1][psl_tNumInsert])
                    temp[psl_tBaseInsert] = int(box[0][psl_tBaseInsert]) + int(box[1][psl_tBaseInsert])

                    temp[psl_qStart] = r1_start
                    temp[psl_qEnd] = r2_end

                    temp[psl_tStart] = t1_start
                    temp[psl_tEnd] = t2_end

                    temp[psl_blockCount] = int(box[0][psl_blockCount]) + int(box[1][psl_blockCount])
                    temp[psl_blockSizes] = box[0][psl_blockSizes] + box[1][psl_blockSizes]

                    temp[psl_qStarts] = box[0][psl_qStarts] + box[1][psl_qStarts]

                    temp[psl_tStarts] = box[0][psl_tStarts] + box[1][psl_tStarts]
                    temp[psl_tNumInsert] = '1'

                    merged = temp

#                elif r1_end <= r2_start and box[0][psl_strand] == "-" and t2_end <= t1_start:
#
#                    temp[psl_matches] = int(box[0][psl_matches]) + int(box[1][psl_matches])
#                    temp[psl_misMatches] = int(box[0][psl_misMatches]) - int(box[1][psl_matches])
#
#                    temp[psl_qNumInsert] = int(box[0][psl_qNumInsert]) + int(box[1][psl_qNumInsert])
#                    temp[psl_qBaseInsert] = int(box[0][psl_qBaseInsert]) + int(box[1][psl_qBaseInsert])
#                    temp[psl_tNumInsert] = int(box[0][psl_tNumInsert]) + int(box[1][psl_tNumInsert])
#                    temp[psl_tBaseInsert] = int(box[0][psl_tBaseInsert]) + int(box[1][psl_tBaseInsert])
#
#                    temp[psl_qStart] = r1_start
#                    temp[psl_qEnd] = r2_end
#
#                    temp[psl_tStart] = t2_start
#                    temp[psl_tEnd] = t1_end
#
#                    temp[psl_blockCount] = int(box[0][psl_blockCount]) + int(box[1][psl_blockCount])
#                    temp[psl_blockSizes] = box[1][psl_blockSizes] + box[0][psl_blockSizes]
#
#                    temp[psl_qStarts] = box[0][psl_qStarts] + box[1][psl_qStarts]
#
#                    temp[psl_tStarts] = box[1][psl_tStarts] + box[0][psl_tStarts]
#                    temp[psl_tNumInsert] = '1'
#
#                    merged = temp

                if merged:
                    gc.disable()
                    psl.append(map(str,merged))
                    gc.enable()

                    if len(psl) >= limit_psl:
                        if remove_extra:
                            for line in psl:
                                line[9] = line[9].partition("__")[0]
                        fou.writelines(['\t'.join(line)+'\n' for line in psl])
                        psl = []
    # output PSL
    if psl:
        for line in psl:
            #pass
            line[9] = line[9].partition("__")[0]
        fou.writelines(['\t'.join(line)+'\n' for line in psl])

Example 136

Project: bacpypes Source File: constructeddata.py
Function: arrayof
def ArrayOf(klass):
    """Function to return a class that can encode and decode a list of
    some other type."""
    global _array_of_map
    global _array_of_classes, _sequence_of_classes

    # if this has already been built, return the cached one
    if klass in _array_of_map:
        return _array_of_map[klass]

    # no ArrayOf(ArrayOf(...)) allowed
    if klass in _array_of_classes:
        raise TypeError("nested arrays disallowed")
    # no ArrayOf(SequenceOf(...)) allowed
    if klass in _sequence_of_classes:
        raise TypeError("arrays of SequenceOf disallowed")

    # define a generic class for arrays
    class ArrayOf(Array):

        subtype = None

        def __init__(self, value=None):
            if value is None:
                self.value = [0]
            elif isinstance(value, list):
                self.value = [len(value)]
                self.value.extend(value)
            else:
                raise TypeError("invalid constructor datatype")

        def append(self, value):
            if issubclass(self.subtype, Atomic):
                pass
            elif issubclass(self.subtype, AnyAtomic) and not isinstance(value, Atomic):
                raise TypeError("instance of an atomic type required")
            elif not isinstance(value, self.subtype):
                raise TypeError("%s value required" % (self.subtype.__name__,))
            self.value.append(value)
            self.value[0] = len(self.value) - 1

        def __len__(self):
            return self.value[0]

        def __getitem__(self, item):
            # no wrapping index
            if (item < 0) or (item > self.value[0]):
                raise IndexError("index out of range")

            return self.value[item]

        def __setitem__(self, item, value):
            # no wrapping index
            if (item < 1) or (item > self.value[0]):
                raise IndexError("index out of range")

            # special length handling for index 0
            if item == 0:
                if value < self.value[0]:
                    # trim
                    self.value = self.value[0:value + 1]
                elif value > self.value[0]:
                    # extend
                    self.value.extend( [None] * (value - self.value[0]) )
                else:
                    return
                self.value[0] = value
            else:
                self.value[item] = value

        def __delitem__(self, item):
            # no wrapping index
            if (item < 1) or (item > self.value[0]):
                raise IndexError("index out of range")

            # delete the item and update the length
            del self.value[item]
            self.value[0] -= 1

        def index(self, value):
            # only search through values
            for i in range(1, self.value[0] + 1):
                if value == self.value[i]:
                    return i

            # not found
            raise ValueError("%r not in array" % (value,))

        def encode(self, taglist):
            if _debug: ArrayOf._debug("(%r)encode %r", self.__class__.__name__, taglist)

            for value in self.value[1:]:
                if issubclass(self.subtype, (Atomic, AnyAtomic)):
                    # a helper cooperates between the atomic value and the tag
                    helper = self.subtype(value)

                    # build a tag and encode the data into it
                    tag = Tag()
                    helper.encode(tag)

                    # now encode the tag
                    taglist.append(tag)
                elif isinstance(value, self.subtype):
                    # it must have its own encoder
                    value.encode(taglist)
                else:
                    raise TypeError("%s must be a %s" % (value, self.subtype.__name__))

        def decode(self, taglist):
            if _debug: ArrayOf._debug("(%r)decode %r", self.__class__.__name__, taglist)

            # start with an empty array
            self.value = [0]

            while len(taglist) != 0:
                tag = taglist.Peek()
                if tag.tagClass == Tag.closingTagClass:
                    break

                if issubclass(self.subtype, (Atomic, AnyAtomic)):
                    if _debug: ArrayOf._debug("    - building helper: %r %r", self.subtype, tag)
                    taglist.Pop()

                    # a helper cooperates between the atomic value and the tag
                    helper = self.subtype(tag)

                    # save the value
                    self.value.append(helper.value)
                else:
                    if _debug: ArrayOf._debug("    - building value: %r", self.subtype)
                    # build an element
                    value = self.subtype()

                    # let it decode itself
                    value.decode(taglist)

                    # save what was built
                    self.value.append(value)

            # update the length
            self.value[0] = len(self.value) - 1

        def encode_item(self, item, taglist):
            if _debug: ArrayOf._debug("(%r)encode_item %r %r", self.__class__.__name__, item, taglist)

            if item == 0:
                # a helper cooperates between the atomic value and the tag
                helper = Unsigned(self.value[0])

                # build a tag and encode the data into it
                tag = Tag()
                helper.encode(tag)

                # now encode the tag
                taglist.append(tag)
            else:
                value = self.value[item]

                if issubclass(self.subtype, (Atomic, AnyAtomic)):
                    # a helper cooperates between the atomic value and the tag
                    helper = self.subtype(self.value[item])

                    # build a tag and encode the data into it
                    tag = Tag()
                    helper.encode(tag)

                    # now encode the tag
                    taglist.append(tag)
                elif isinstance(value, self.subtype):
                    # it must have its own encoder
                    value.encode(taglist)
                else:
                    raise TypeError("%s must be a %s" % (value, self.subtype.__name__))

        def decode_item(self, item, taglist):
            if _debug: ArrayOf._debug("(%r)decode_item %r %r", self.__class__.__name__, item, taglist)

            if item == 0:
                # a helper cooperates between the atomic value and the tag
                helper = Unsigned(taglist.Pop())

                # save the value
                self.value = helper.value
            elif issubclass(self.subtype, (Atomic, AnyAtomic)):
                if _debug: ArrayOf._debug("    - building helper: %r", self.subtype)

                # a helper cooperates between the atomic value and the tag
                helper = self.subtype(taglist.Pop())

                # save the value
                self.value = helper.value
            else:
                if _debug: ArrayOf._debug("    - building value: %r", self.subtype)
                # build an element
                value = self.subtype()

                # let it decode itself
                value.decode(taglist)

                # save what was built
                self.value = value

        def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
            try:
                value_list = enumerate(self.value)
            except TypeError:
                file.write("%s(non-sequence) %r\n" % ("    " * indent, self.value))
                return

            for i, value in value_list:
                if i == 0:
                    file.write("%slength = %d\n" % ("    " * indent, value))
                elif issubclass(self.subtype, (Atomic, AnyAtomic)):
                    file.write("%s[%d] = %r\n" % ("    " * indent, i, value))
                elif isinstance(value, self.subtype):
                    file.write("%s[%d]\n" % ("    " * indent, i))
                    value.debug_contents(indent+1, file, _ids)
                else:
                    file.write("%s%s must be a %s" % ("    " * indent, value, self.subtype.__name__))

        def dict_contents(self, use_dict=None, as_class=dict):
            # return arrays as arrays
            mapped_value = []

            for value in self.value:
                if issubclass(self.subtype, Atomic):
                    mapped_value.append(value)              ### ambiguous
                elif issubclass(self.subtype, AnyAtomic):
                    mapped_value.append(value.value)        ### ambiguous
                elif isinstance(value, self.subtype):
                    mapped_value.append(value.dict_contents(as_class=as_class))

            # return what we built
            return mapped_value

    bacpypes_debugging(ArrayOf)

    # constrain it to a list of a specific type of item
    setattr(ArrayOf, 'subtype', klass)
    ArrayOf.__name__ = 'ArrayOf' + klass.__name__

    # cache this type
    _array_of_map[klass] = ArrayOf
    _array_of_classes[ArrayOf] = 1

    # return this new type
    return ArrayOf

Example 137

Project: ns-3-dev-git Source File: wifi-olsr-flowmon.py
def main(argv):

    cmd = ns.core.CommandLine()

    cmd.NumNodesSide = None
    cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")

    cmd.Results = None
    cmd.AddValue("Results", "Write XML results to file")

    cmd.Plot = None
    cmd.AddValue("Plot", "Plot the results using the matplotlib python module")

    cmd.Parse(argv)

    wifi = ns.wifi.WifiHelper.Default()
    wifiMac = ns.wifi.WifiMacHelper()
    wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
    wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
    wifiPhy.SetChannel(wifiChannel.Create())
    ssid = ns.wifi.Ssid("wifi-default")
    wifi.SetRemoteStationManager("ns3::ArfWifiManager")
    wifiMac.SetType ("ns3::AdhocWifiMac",
                     "Ssid", ns.wifi.SsidValue(ssid))

    internet = ns.internet.InternetStackHelper()
    list_routing = ns.internet.Ipv4ListRoutingHelper()
    olsr_routing = ns.olsr.OlsrHelper()
    static_routing = ns.internet.Ipv4StaticRoutingHelper()
    list_routing.Add(static_routing, 0)
    list_routing.Add(olsr_routing, 100)
    internet.SetRoutingHelper(list_routing)

    ipv4Addresses = ns.internet.Ipv4AddressHelper()
    ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))

    port = 9   # Discard port(RFC 863)
    onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
                                  ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
    onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
    onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
    onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))

    addresses = []
    nodes = []

    if cmd.NumNodesSide is None:
        num_nodes_side = NUM_NODES_SIDE
    else:
        num_nodes_side = int(cmd.NumNodesSide)

    for xi in range(num_nodes_side):
        for yi in range(num_nodes_side):

            node = ns.network.Node()
            nodes.append(node)

            internet.Install(ns.network.NodeContainer(node))

            mobility = ns.mobility.ConstantPositionMobilityModel()
            mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
            node.AggregateObject(mobility)
            
            devices = wifi.Install(wifiPhy, wifiMac, node)
            ipv4_interfaces = ipv4Addresses.Assign(devices)
            addresses.append(ipv4_interfaces.GetAddress(0))

    for i, node in enumerate(nodes):
        destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
        #print i, destaddr
        onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
        app = onOffHelper.Install(ns.network.NodeContainer(node))
        urv = ns.core.UniformRandomVariable()
        app.Start(ns.core.Seconds(urv.GetValue(20, 30)))

    #internet.EnablePcapAll("wifi-olsr")
    flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
    #flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
    monitor = flowmon_helper.InstallAll()
    monitor = flowmon_helper.GetMonitor()
    monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
    monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
    monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))

    ns.core.Simulator.Stop(ns.core.Seconds(44.0))
    ns.core.Simulator.Run()

    def print_stats(os, st):
        print >> os, "  Tx Bytes: ", st.txBytes
        print >> os, "  Rx Bytes: ", st.rxBytes
        print >> os, "  Tx Packets: ", st.txPackets
        print >> os, "  Rx Packets: ", st.rxPackets
        print >> os, "  Lost Packets: ", st.lostPackets
        if st.rxPackets > 0:
            print >> os, "  Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
	    print >> os, "  Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
            print >> os, "  Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1

        if 0:
            print >> os, "Delay Histogram"
            for i in range(st.delayHistogram.GetNBins () ):
              print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
                  st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
            print >> os, "Jitter Histogram"
            for i in range(st.jitterHistogram.GetNBins () ):
              print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
                  st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
            print >> os, "PacketSize Histogram"
            for i in range(st.packetSizeHistogram.GetNBins () ):
              print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
                  st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)

        for reason, drops in enumerate(st.packetsDropped):
            print "  Packets dropped by reason %i: %i" % (reason, drops)
        #for reason, drops in enumerate(st.bytesDropped):
        #    print "Bytes dropped by reason %i: %i" % (reason, drops)

    monitor.CheckForLostPackets()
    classifier = flowmon_helper.GetClassifier()

    if cmd.Results is None:
        for flow_id, flow_stats in monitor.GetFlowStats():
            t = classifier.FindFlow(flow_id)
            proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
            print "FlowID: %i (%s %s/%s --> %s/%i)" % \
                (flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
            print_stats(sys.stdout, flow_stats)
    else:
        print monitor.SerializeToXmlFile(cmd.Results, True, True)


    if cmd.Plot is not None:
        import pylab
        delays = []
        for flow_id, flow_stats in monitor.GetFlowStats():
            tupl = classifier.FindFlow(flow_id)
            if tupl.protocol == 17 and tupl.sourcePort == 698:
                continue
            delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
        pylab.hist(delays, 20)
        pylab.xlabel("Delay (s)")
        pylab.ylabel("Number of Flows")
        pylab.show()

    return 0

Example 138

Project: itools Source File: parser.py
Function: run
    def run(self, data, context=None):
        tokenizer = self.grammar.tokenizer
        start_symbol = self.start_symbol
        token_table = self.token_table
        symbol_table = self.symbol_table
        reduce_table = self.reduce_table

        # Initialize the stack, where the stack is a list of tuples:
        #
        #   [...
        #    ([last_nodes], symbol, state, start, value),
        #    ...]
        #
        # The "start" field is a reference to the input stream.
        stack = deque()
        stack.append(([], None, 1, 0, None))
        active_nodes = deque()
        active_nodes.append(0)

        # Debug
#        debug = (start_symbol == 'rulelist')
#        debug = (start_symbol == 'IPv4address')
        if debug:
#            trace = open('/tmp/trace.txt', 'w')
            trace = sys.stdout

        get_token = tokenizer.get_token(data).next
        token, data_idx = get_token()

        result = []
        while token != EOI:
            # Debug
            if debug:
                trace.write('=== shift %s: %s (%s) ===\n'
                            % (token, data_idx, repr(data[data_idx])))

            # Shift on all the parsing paths
            map = {}
            new_active_nodes = deque()
            for node_idx in active_nodes:
                state = stack[node_idx][2]
                next_state = token_table.get((state, token), 0)
                if next_state == 0:
                    continue
                if next_state in map:
                    n = map[next_state]
                    stack[n][0].append(node_idx)
                else:
                    n = len(stack)
                    map[next_state] = n
                    new_active_nodes.append(n)
                    stack.append(
                        ([node_idx], token, next_state, data_idx+1, None))
            active_nodes = new_active_nodes

            # Debug
            if debug:
                pprint_stack(stack, active_nodes, data, trace)
                trace.write('=== reduce ===\n')

            # Next token
            token, data_idx = get_token()

            # Reduce
            new_active_nodes = deque()
            while active_nodes:
                node_idx = active_nodes.pop()
                kk, kk, state, kk, kk = stack[node_idx]
                shift, handles = reduce_table[state]
                # Shift
                if shift:
                    new_active_nodes.append(node_idx)
                # Reduce
                for name, n, look_ahead, method in handles:
                    # Look-Ahead
                    if token not in look_ahead:
                        continue
                    # Fork the stack
                    pointers = [[node_idx, []]]
                    while n > 0:
                        n -= 1
                        new_pointers = []
                        while pointers:
                            node_idx, values = pointers.pop()
                            last_nodes, symbol, kk, kk, value = stack[node_idx]
                            if type(symbol) is str:
                                values.insert(0, value)
                            for last_node in last_nodes:
                                new_pointers.append([last_node, values[:]])
                        pointers = new_pointers

                    for last_node, values in pointers:
                        kk, symbol, state, start, value = stack[last_node]
                        # Semantic action
                        if context is None:
                            value = None
                        elif method is None:
                            aux = [ x for x in values if x is not None ]
                            if len(aux) == 0:
                                value = None
                            else:
                                value = values
                        else:
                            value = method(context, start, data_idx, *values)
                        # Next state
                        next_state = symbol_table.get((state, name), 0)
                        # Stop Condition
                        if last_node==0 and name == start_symbol and token==0:
                            result.append(value)
                        else:
                            active_nodes.append(len(stack))
                            stack.append(
                                ([last_node], name, next_state, data_idx, value))
            active_nodes = new_active_nodes
            # Debug
            if debug:
                pprint_stack(stack, active_nodes, data, trace)
                trace.write('\n')

        if result:
            if len(result) > 1:
                print 'Warning, the grammar "%s" is ambiguous' % start_symbol
            return result[0]

        raise ValueError, 'grammar error'

Example 139

Project: openfisca-france Source File: convert_json_to_yaml.py
def main():
    parser = argparse.ArgumentParser(description = __doc__)
    parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
    args = parser.parse_args()
    logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)

    if not os.path.exists(income_taxes_test_cases_dir):
        os.makedirs(income_taxes_test_cases_dir)

    if os.path.exists(tests_dir):
        for filename in os.listdir(tests_dir):
            if filename.endswith(filename):
                os.remove(os.path.join(tests_dir, filename))
    else:
        os.makedirs(tests_dir)

    if os.path.exists(variables_name_file_path):
        with open(variables_name_file_path) as variables_name_file:
            name_by_year_by_code = yaml.load(variables_name_file)
    else:
        name_by_year_by_code = {}

    name_by_year_by_code_changed = False
    for json_filename in sorted(os.listdir(json_dir)):
        if not json_filename.endswith('.json'):
            continue
        log.info(u"Converting file {}...".format(json_filename))
        with open(os.path.join(json_dir, json_filename)) as json_file:
            data = conv.check(input_to_json_data)(json_file.read())
        scenario = data['scenario']
        tax_calculator_inputs = transform_scenario_to_tax_calculator_inputs(scenario)
        tax_year = scenario.period.start.year + 1
        if tax_year <= 2011:
            # Tax calculator is no more available for years before 2011.
            tax_calculator_outputs = collections.OrderedDict()
            tax_calculator_outputs_infos = data['resultat_officiel']
            for code, infos in tax_calculator_outputs_infos.iteritems():
                float_value = infos['value']
                int_value = int(float_value)
                tax_calculator_outputs[code] = int_value if float_value == int_value else float_value
                name = infos['name'].strip().rstrip(u'*').rstrip()
                name = u' '.join(name.split())  # Remove duplicate spaces.
                if name not in (u'', u'?', u'nom inconnu'):
                    name_by_year = name_by_year_by_code.setdefault(code, {})
                    current_name = name_by_year.get(tax_year)
                    if current_name is not None and current_name != name \
                            and not name.lower().endswith(current_name.lower()):
                        log.warning(u'Ignoring rename of variable {} for year {} from:\n  {}\nto:\n  {}'.format(code,
                            tax_year, current_name, name))
                    elif current_name != name:
                        name_by_year[tax_year] = name
                        name_by_year_by_code_changed = True
        else:
            page = call_tax_calculator(tax_year, tax_calculator_inputs)
            page_doc = etree.parse(cStringIO.StringIO(page), html_parser)

            codes_without_name = set()
            tax_calculator_outputs = collections.OrderedDict()
            for element in page_doc.xpath('//input[@type="hidden"][@name]'):
                code = element.get('name')
                name = None
                parent = element.getparent()
                parent_tag = parent.tag.lower()
                if parent_tag == 'table':
                    tr = parent[parent.index(element) - 1]
                    assert tr.tag.lower() == 'tr', tr
                elif parent_tag == 'tr':
                    tr = parent
                elif code == 'NAPCR':
                    name = u'Contributions sociales supplémentaires'
                else:
                    codes_without_name.add(code)
                    continue
                if name is None:
                    while True:
                        name = etree.tostring(tr[1], encoding = unicode, method = 'text').strip().rstrip(u'*').rstrip()
                        if name:
                            name = u' '.join(name.split())  # Remove duplicate spaces.
                            break
                        table = tr.getparent()
                        tr = table[table.index(tr) - 1]
                codes_without_name.discard(code)
                float_value = float(element.get('value').strip())
                int_value = int(float_value)
                tax_calculator_outputs[code] = int_value if float_value == int_value else float_value
                name_by_year = name_by_year_by_code.setdefault(code, {})
                current_name = name_by_year.get(tax_year)
                if current_name is not None and current_name != name \
                        and not name.lower().endswith(current_name.lower()):
                    log.warning(u'Renaming variable {} for year {} from:\n  {}\nto:\n  {}'.format(code, tax_year,
                        current_name, name))
                if current_name != name and (current_name is None or not name.lower().endswith(current_name.lower())):
                    name_by_year[tax_year] = name
                    name_by_year_by_code_changed = True

            assert not codes_without_name, 'Output variables {} have no name in page:\n{}'.format(
                sorted(codes_without_name), page.decode('iso-8859-1').encode('utf-8'))

        # Create or update test for "calculateur impôt".
        sorted_tax_calculator_inputs = collections.OrderedDict(sorted(tax_calculator_inputs.iteritems()))
        income_taxes_test_case_file_path = os.path.join(income_taxes_test_cases_dir, '{}.yaml'.format(
            hashlib.md5(json.dumps(sorted_tax_calculator_inputs)).hexdigest()))
        if os.path.exists(income_taxes_test_case_file_path):
            with open(income_taxes_test_case_file_path) as income_taxes_test_case_file:
                income_taxes_test_case = yaml.load(income_taxes_test_case_file)
                income_taxes_test_case['output_variables'][str(tax_year)] = collections.OrderedDict(sorted(
                    tax_calculator_outputs.iteritems()))
                income_taxes_test_case['output_variables'] = collections.OrderedDict(sorted(
                    income_taxes_test_case['output_variables'].iteritems()))
        else:
            income_taxes_test_case = collections.OrderedDict((
                ('input_variables', sorted_tax_calculator_inputs),
                ('output_variables', collections.OrderedDict((
                    (str(tax_year), collections.OrderedDict(sorted(tax_calculator_outputs.iteritems()))),
                    ))),
                ))
        with open(income_taxes_test_case_file_path, 'w') as income_taxes_test_case_file:
            yaml.dump(income_taxes_test_case, income_taxes_test_case_file, allow_unicode = True,
                default_flow_style = False, indent = 2, width = 120)

        # Create or update YAML file containing the names associated to each result code of "calculateur impôt".
        if name_by_year_by_code_changed:
            variables_name_data = collections.OrderedDict(
                (code, collections.OrderedDict(sorted(name_by_year.iteritems())))
                for code, name_by_year in sorted(name_by_year_by_code.iteritems())
                )
            with open(variables_name_file_path, 'w') as variables_name_file:
                yaml.dump(variables_name_data, variables_name_file, allow_unicode = True, default_flow_style = False,
                    indent = 2, width = 120)
            name_by_year_by_code_changed = False

        # Create or update YAML file containing OpenFisca test.
        main_input_variable_name = json_filename.split('-', 1)[0]
        test = collections.OrderedDict((
            ('name', main_input_variable_name),
            ))
        test.update(scenario.to_json())
        test['period'] = scenario.period.start.year  # Replace period string with an integer.
        test_case = test.pop('test_case', None)
        for entity_name_plural, entity_variables in test_case.iteritems():
            test[entity_name_plural] = entity_variables
        test['output_variables'] = collections.OrderedDict(sorted(
            (variable_name, variable_value)
            for variable_name, variable_value in (
                (openfisca_variable_name_by_tax_calculator_code[code], value)
                for code, value in tax_calculator_outputs.iteritems()
                )
            if variable_name is not None
            ))
        tests_file_path = os.path.join(tests_dir, '{}.yaml'.format(main_input_variable_name))
        if os.path.exists(tests_file_path):
            with open(tests_file_path) as tests_file:
                tests = yaml.load(tests_file)
                tests.append(test)
                tests.sort(key = lambda test: (test['name'], test['period']))
        else:
            tests = [test]
        with open(tests_file_path, 'w') as tests_file:
            yaml.dump(tests, tests_file, allow_unicode = True, default_flow_style = False, indent = 2, width = 120)

    return 0

Example 140

Project: biocode Source File: convert_genbank_to_gff3.py
def main():
    parser = argparse.ArgumentParser( description='Convert GenBank flat files to GFF3 format')

    ## output file to be written
    parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input GBK file' )
    parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to an output GFF file to be created' )
    parser.add_argument('--with_fasta', dest='fasta', action='store_true', help='Include the FASTA section with genomic sequence at end of file.  (default)' )
    parser.add_argument('--no_fasta', dest='fasta', action='store_false' )
    parser.set_defaults(fasta=True)
    args = parser.parse_args()

    ## output will either be a file or STDOUT
    ofh = sys.stdout
    if args.output_file is not None:
        ofh = open(args.output_file, 'wt')

    ofh.write("##gff-version 3\n")

    assemblies = dict()
    current_assembly = None
    current_gene = None
    current_RNA = None

    rna_count_by_gene = defaultdict(int)
    exon_count_by_RNA = defaultdict(int)

    seqs_pending_writes = False

    features_skipped_count = 0

    # each gb_record is a SeqRecord object
    for gb_record in SeqIO.parse(open(args.input_file, "r"), "genbank"):
        mol_id = gb_record.name

        if mol_id not in assemblies:
            assemblies[mol_id] = biothings.Assembly( id=mol_id )

        if len(str(gb_record.seq)) > 0:
            seqs_pending_writes = True
            assemblies[mol_id].residues = str(gb_record.seq)
            assemblies[mol_id].length = len(str(gb_record.seq))

        current_assembly = assemblies[mol_id]
            
        # each feat is a SeqFeature object
        for feat in gb_record.features:
            #print(feat)
            fmin = int(feat.location.start)
            fmax = int(feat.location.end)

            if feat.location.strand == 1:
                strand = '+'
            elif feat.location.strand == -1:
                strand = '-'
            else:
                raise Exception("ERROR: unstranded feature encountered: {0}".format(feat))

            #print("{0} located at {1}-{2} strand:{3}".format( locus_tag, fmin, fmax, strand ) )
            if feat.type == 'source':
                continue
            
            if feat.type == 'gene':
                # print the previous gene (if there is one)
                if current_gene is not None:
                    gene.print_as(fh=ofh, source='GenBank', format='gff3')
                
                locus_tag = feat.qualifiers['locus_tag'][0]
                gene = biothings.Gene( id=locus_tag )
                gene.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
                current_gene = gene
                current_RNA = None

            elif feat.type == 'mRNA':
                locus_tag = feat.qualifiers['locus_tag'][0]
                rna_count_by_gene[locus_tag] += 1
                feat_id = "{0}.mRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
                
                mRNA = biothings.mRNA( id=feat_id, parent=current_gene )
                mRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
                gene.add_mRNA(mRNA)
                current_RNA = mRNA

                if feat_id in exon_count_by_RNA:
                    raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
                else:
                    exon_count_by_RNA[feat_id] = 0

            elif feat.type == 'tRNA':
                locus_tag = feat.qualifiers['locus_tag'][0]
                rna_count_by_gene[locus_tag] += 1
                feat_id = "{0}.tRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
                
                tRNA = biothings.tRNA( id=feat_id, parent=current_gene )
                tRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
                gene.add_tRNA(tRNA)
                current_RNA = tRNA

                if feat_id in exon_count_by_RNA:
                    raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
                else:
                    exon_count_by_RNA[feat_id] = 0

            elif feat.type == 'rRNA':
                locus_tag = feat.qualifiers['locus_tag'][0]
                rna_count_by_gene[locus_tag] += 1
                feat_id = "{0}.rRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
                
                rRNA = biothings.rRNA( id=feat_id, parent=current_gene )
                rRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
                gene.add_rRNA(rRNA)
                current_RNA = rRNA

                if feat_id in exon_count_by_RNA:
                    raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
                else:
                    exon_count_by_RNA[feat_id] = 0
            
            elif feat.type == 'CDS':
                locus_tag = feat.qualifiers['locus_tag'][0]
                # If processing a prokaryotic GBK, we'll encounter CDS before mRNA, so we have to
                #  manually make one
                if current_RNA is None:
                    feat_id = "{0}.mRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
                    mRNA = biothings.mRNA( id=feat_id, parent=current_gene )
                    mRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
                    gene.add_mRNA(mRNA)
                    current_RNA = mRNA
                
                exon_count_by_RNA[current_RNA.id] += 1
                cds_id = "{0}.CDS.{1}".format( current_RNA.id, exon_count_by_RNA[current_RNA.id] )
                current_CDS_phase = 0
                
                for loc in feat.location.parts:
                    subfmin = int(loc.start)
                    subfmax = int(loc.end)
                    
                    CDS = biothings.CDS( id=cds_id, parent=current_RNA )
                    CDS.locate_on( target=current_assembly, fmin=subfmin, fmax=subfmax, strand=strand, phase=current_CDS_phase )
                    current_RNA.add_CDS(CDS)

                    # calculate the starting phase for the next CDS feature (in case there is one)
                    # 0 + 6 = 0     TTGCAT
                    # 0 + 7 = 2     TTGCATG
                    # 1 + 6 = 1     TTGCAT
                    # 2 + 7 = 1     TTGCATG
                    # general: 3 - ((length - previous phase) % 3)
                    current_CDS_phase = 3 - (((subfmax - subfmin) - current_CDS_phase) % 3)
                    if current_CDS_phase == 3:
                        current_CDS_phase = 0

                    exon_id = "{0}.exon.{1}".format( current_RNA.id, exon_count_by_RNA[current_RNA.id] )
                    exon = biothings.Exon( id=exon_id, parent=current_RNA )
                    exon.locate_on( target=current_assembly, fmin=subfmin, fmax=subfmax, strand=strand )
                    current_RNA.add_exon(exon)
                    exon_count_by_RNA[current_RNA.id] += 1
                
            else:
                print("WARNING: The following feature was skipped:\n{0}".format(feat))
                features_skipped_count += 1

        # don't forget to do the last gene, if there were any
        if current_gene is not None:
            gene.print_as(fh=ofh, source='GenBank', format='gff3')

    if args.fasta is True:
        if seqs_pending_writes is True:
            ofh.write("##FASTA\n")
            for assembly_id in assemblies:
                ofh.write(">{0}\n".format(assembly_id))
                ofh.write("{0}\n".format(biocodeutils.wrapped_fasta(assemblies[assembly_id].residues)))

    if features_skipped_count > 0:
        print("Warning: {0} unsupported feature types were skipped".format(features_skipped_count))

Example 141

Project: openfisca-france Source File: measure_performances.py
def main():
    parser = argparse.ArgumentParser(description = __doc__)
    parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
    global args
    args = parser.parse_args()
    logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)

    print 'salaire_imposable'

    test_irpp(2010, -1181, salaire_imposable =  20000)
    test_irpp(2010, -7934, salaire_imposable =  50000)
    test_irpp(2010, -42338, salaire_imposable =  150000)
    test_irpp(2011, -1181, salaire_imposable =  20000)
    test_irpp(2011, -7934, salaire_imposable =  50000)
    test_irpp(2011, -42338, salaire_imposable =  150000)
    test_irpp(2012, -1181, salaire_imposable =  20000)
    test_irpp(2012, -7934, salaire_imposable =  50000)
    test_irpp(2012, -43222, salaire_imposable =  150000)
    test_irpp(2013, -1170, salaire_imposable =  20000)
    test_irpp(2013, -7889, salaire_imposable =  50000)
    test_irpp(2013, -43076, salaire_imposable =  150000)

    print 'retraite_imposable'

    test_irpp(2010, -1181, retraite_imposable = 20000)
    test_irpp(2010, -8336, retraite_imposable = 50000)
    test_irpp(2010, -46642, retraite_imposable = 150000)
    test_irpp(2011, -1181, retraite_imposable = 20000)
    test_irpp(2011, -8336, retraite_imposable = 50000)
    test_irpp(2011, -46642, retraite_imposable = 150000)
    test_irpp(2012, -1181, retraite_imposable = 20000)
    test_irpp(2012, -8336, retraite_imposable = 50000)
    test_irpp(2012, -46642, retraite_imposable = 150000)
    test_irpp(2013, -1170, retraite_imposable = 20000)
    test_irpp(2013, -8283, retraite_imposable = 50000)
    test_irpp(2013, -46523, retraite_imposable = 150000)

    print 'f2da'

    test_irpp(2010, 0, f2da = 20000)
    test_irpp(2010, 0, f2da = 50000)
    test_irpp(2010, 0, f2da = 150000)
    test_irpp(2011, 0, f2da = 20000)
    test_irpp(2011, 0, f2da = 50000)
    test_irpp(2011, 0, f2da = 150000)
    test_irpp(2012, 0, f2da = 20000)
    test_irpp(2012, 0, f2da = 50000)
    test_irpp(2012, 0, f2da = 150000)
    # test_irpp(2013, 0, f2da = 20000)
    # test_irpp(2013, 0, f2da = 50000)
    # test_irpp(2013, 0, f2da = 150000)

    print 'f2dc'

    test_irpp(2010, 0, f2dc = 20000)
    test_irpp(2010, -2976, f2dc = 50000)
    test_irpp(2010, -22917, f2dc = 150000)
    test_irpp(2011, 0, f2dc = 20000)
    test_irpp(2011, -2976, f2dc = 50000)
    test_irpp(2011, -22917, f2dc = 150000)
    test_irpp(2012, 0, f2dc = 20000)
    test_irpp(2012, -3434, f2dc = 50000)
    test_irpp(2012, -23542, f2dc = 150000)
    # test_irpp(2013, 0, f2dc = 20000)
    # test_irpp(2013, 0, f2dc = 50000)
    # test_irpp(2013, 0, f2dc = 150000)

    print 'f2dh'

    test_irpp(2010, 345, f2dh = 20000)
    test_irpp(2010, 345, f2dh = 50000)
    test_irpp(2010, 345, f2dh = 150000)
    test_irpp(2011, 345, f2dh = 20000)
    test_irpp(2011, 345, f2dh = 50000)
    test_irpp(2011, 345, f2dh = 150000)
    test_irpp(2012, 345, f2dh = 20000)
    test_irpp(2012, 345, f2dh = 50000)
    test_irpp(2012, 345, f2dh = 150000)
    test_irpp(2013, 345, f2dh = 20000)
    test_irpp(2013, 345, f2dh = 50000)
    test_irpp(2013, 345, f2dh = 150000)

    print 'f2tr'

    test_irpp(2010, -1461, f2tr = 20000)
    test_irpp(2010, -9434, f2tr = 50000)
    test_irpp(2010, -48142, f2tr = 150000)
    test_irpp(2011, -1461, f2tr = 20000)
    test_irpp(2011, -9434, f2tr = 50000)
    test_irpp(2011, -48142, f2tr = 150000)
    test_irpp(2012, -1461, f2tr = 20000)
    test_irpp(2012, -9434, f2tr = 50000)
    test_irpp(2012, -48142, f2tr = 150000)
    test_irpp(2013, -1450, f2tr = 20000)
    test_irpp(2013, -9389, f2tr = 50000)
    test_irpp(2013, -48036, f2tr = 150000)

    print 'f2ts'

    test_irpp(2010, -1461, f2ts = 20000)
    test_irpp(2010, -9434, f2ts = 50000)
    test_irpp(2010, -48142, f2ts = 150000)
    test_irpp(2011, -1461, f2ts = 20000)
    test_irpp(2011, -9434, f2ts = 50000)
    test_irpp(2011, -48142, f2ts = 150000)
    test_irpp(2012, -1461, f2ts = 20000)
    test_irpp(2012, -9434, f2ts = 50000)
    test_irpp(2012, -48142, f2ts = 150000)
    test_irpp(2013, -1450, f2ts = 20000)
    test_irpp(2013, -9389, f2ts = 50000)
    test_irpp(2013, -48036, f2ts = 150000)

    print 'f3vg'

    test_irpp(2010, -3600, f3vg = 20000)
    test_irpp(2010, -9000, f3vg = 50000)
    test_irpp(2010, -27000, f3vg = 150000)
    test_irpp(2011, -3800, f3vg = 20000)
    test_irpp(2011, -9500, f3vg = 50000)
    test_irpp(2011, -28500, f3vg = 150000)
    test_irpp(2012, -4800, f3vg = 20000)
    test_irpp(2012, -12000, f3vg = 50000)
    test_irpp(2012, -36000, f3vg = 150000)
    test_irpp(2013, -1450, f3vg = 20000)
    test_irpp(2013, -9389, f3vg = 50000)
    test_irpp(2013, -48036, f3vg = 150000)

    print 'f3vz'

    # test_irpp(2010, 0, f3vz = 20000)
    # test_irpp(2010, 0, f3vz = 50000)
    # test_irpp(2010, 0, f3vz = 150000)
    test_irpp(2011, 0, f3vz = 20000)
    test_irpp(2011, 0, f3vz = 50000)
    test_irpp(2011, 0, f3vz = 150000)
    test_irpp(2012, 0, f3vz = 20000)
    test_irpp(2012, 0, f3vz = 50000)
    test_irpp(2012, 0, f3vz = 150000)
    test_irpp(2013, 0, f3vz = 20000)
    test_irpp(2013, 0, f3vz = 50000)
    test_irpp(2013, 0, f3vz = 150000)

    print 'f4ba'

    test_irpp(2010, -1461, f4ba = 20000)
    test_irpp(2010, -9434, f4ba = 50000)
    test_irpp(2010, -48142, f4ba = 150000)
    test_irpp(2011, -1461, f4ba = 20000)
    test_irpp(2011, -9434, f4ba = 50000)
    test_irpp(2011, -48142, f4ba = 150000)
    test_irpp(2012, -1461, f4ba = 20000)
    test_irpp(2012, -9434, f4ba = 50000)
    test_irpp(2012, -48142, f4ba = 150000)
    test_irpp(2013, -1450, f4ba = 20000)
    test_irpp(2013, -9389, f4ba = 50000)
    test_irpp(2013, -48036, f4ba = 150000)

Example 142

Project: portingdb Source File: py3query.py
Function: run
    def run(self, args):
        if self.opts.help_cmd:
            print(self.parser.format_help())
            return

        reponame = self.opts.py3query_repo
        self.base_query = self.base.sack.query()
        self.pkg_query = self.base_query.filter(reponame=reponame)
        self.src_query = self.base_query.filter(reponame=reponame + '-source').filter(arch=['src'])

        # python_versions: {package: set of Python versions}
        python_versions = collections.defaultdict(set)
        # rpm_pydeps: {package: set of dep names}
        rpm_pydeps = collections.defaultdict(set)
        # dep_versions: {dep name: Python version}
        dep_versions = collections.defaultdict(set)
        for n, seeds in SEED_PACKAGES.items():
            provides = sorted(self.all_provides(reponame, seeds), key=str)

            # This effectively includes packages that still need
            # Python 3.4 while Rawhide only provides Python 3.5
            provides += sorted(seeds)

            for dep in progressbar(provides, 'Getting py{} requires'.format(n)):
                dep_versions[str(dep)] = n
                for pkg in self.whatrequires(dep):
                    python_versions[pkg].add(n)
                    rpm_pydeps[pkg].add(str(dep))

        # srpm_names: {package: srpm name}
        # by_srpm_name: {srpm name: set of packages}
        srpm_names = {}
        by_srpm_name = collections.defaultdict(set)
        for pkg in progressbar(python_versions.keys(), 'Getting SRPMs'):
            srpm_name = hawkey.split_nevra(pkg.sourcerpm).name
            srpm_names[pkg] = srpm_name
            by_srpm_name[srpm_name].add(pkg)

        # deps_of_pkg: {package: set of packages}
        deps_of_pkg = collections.defaultdict(set)
        all_provides = {str(r): r for p in python_versions for r in p.provides
                        if not str(r).startswith(PROVIDES_BLACKLIST)}
        for pkg in progressbar(sorted(python_versions.keys()), 'Getting requirements'):
            reqs = set()
            for provide in pkg.provides:
                reqs.update(self.whatrequires(provide))
            for req in reqs:
                if req in python_versions.keys():
                    deps_of_pkg[req].add(pkg)

        # deps_of_pkg: {srpm name: info}
        json_output = dict()
        for name in progressbar(by_srpm_name, 'Generating output'):
            pkgs = sorted(by_srpm_name[name])
            r = json_output[name] = {}
            set_status(r, pkgs, python_versions)
            r['rpms'] = {format_rpm_name(p):
                         {str(d): dep_versions[d] for d in rpm_pydeps[p]}
                        for p in pkgs}
            r['deps'] = sorted(set(srpm_names[d]
                                   for p in pkgs
                                   for d in deps_of_pkg.get(p, '')
                                   if srpm_names[d] != name))

        # add Bugzilla links
        if self.opts.fetch_bugzilla:
            bar = iter(progressbar(['connecting', 'tracker', 'individual'],
                                   'Getting bugs'))

            next(bar)
            bz = bugzilla.RHBugzilla(BUGZILLA_URL)

            next(bar)
            include_fields = ['id', 'depends_on', 'blocks', 'component',
                              'status', 'resolution', 'last_change_time']
            trackers = bz.getbugs(TRACKER_BUG_IDS,
                                  include_fields=include_fields)
            all_ids = [b for t in trackers for b in t.depends_on]

            next(bar)
            bugs = bz.getbugs(all_ids, include_fields=include_fields)
            bar.close()

            def bug_namegetter(bug):
                return '{bug.id} {bug.status} {bug.component}'.format(bug=bug)

            rank = ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_QA', 'VERIFIED',
                    'RELEASE_PENDING', 'CLOSED']
            def key(bug):
                return rank.index(bug.status), bug.last_change_time
            bugs = sorted(bugs, key=key)

            for bug in progressbar(bugs, 'Merging bugs',
                                   namegetter=bug_namegetter):
                r = json_output.get(bug.component, {})
                if 'bug' in r.get('links', {}):
                    continue
                url = '{bug.weburl}#{bug.status}'.format(bug=bug)
                status = bug.status
                if bug.resolution:
                    status += ' ' + bug.resolution
                # Let's get the datetime of the last comment and convert to string
                last_change_datetime = time.strftime('%Y-%m-%d %H:%M:%S',
                        bug.last_change_time.timetuple())
                r.setdefault('links', {})['bug'] = [bug.weburl, status,
                        last_change_datetime]

                for tb in bug.blocks:
                    if tb in ADDITIONAL_TRACKER_BUGS:
                        r.setdefault('tracking_bugs', []) \
                                .append(BUGZILLA_BUG_URL.format(tb))

                if any(tb in bug.blocks for tb in MISPACKAGED_TRACKER_BUG_IDS):
                    if r.get('status') == 'idle' and bug.status != 'NEW':
                        r['status'] = 'in-progress'
                    elif r.get('status') == 'idle' and bug.status == 'NEW':
                        r['status'] = "mispackaged"
                        r['note'] = ('There is a problem in Fedora packaging, ' +
                                    'not necessarily with the software itself. ' +
                                    'See the linked Fedora bug.')

        # Print out output

        if self.opts.output:
            with open(self.opts.output, 'w') as f:
                json.dump(json_output, f, indent=2, sort_keys=True)
        else:
            json.dump(json_output, sys.stdout, indent=2, sort_keys=True)
            sys.stdout.flush()

Example 143

Project: elastic-recheck Source File: graph.py
def main():
    parser = argparse.ArgumentParser(description='Generate data for graphs.')
    parser.add_argument(dest='queries',
                        help='path to query file')
    parser.add_argument('-o', dest='output',
                        help='output filename. Omit for stdout')
    parser.add_argument('-q', dest='queue',
                        help='limit results to a build queue regex')
    parser.add_argument('--es-query-suffix',
                        help='further limit results with an '
                             'elastic search query suffix. This will be ANDed '
                             'to all queries. '
                             'For example, to limit all queries to a '
                             'specific branch use: '
                             ' --es-query-suffix "build_branch:\\"stable/'
                             'liberty\\""')
    parser.add_argument('-c', '--conf', help="Elastic Recheck Configuration "
                        "file to use for data_source options such as "
                        "elastic search url, logstash url, and database "
                        "uri.")
    parser.add_argument('-v', dest='verbose',
                        action='store_true', default=False,
                        help='print out details as we go')
    args = parser.parse_args()

    config = er_conf.Config(config_file=args.conf)

    classifier = er.Classifier(args.queries, config=config)

    buglist = []

    # if you don't hate timezones, you don't program enough
    epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
    ts = datetime.utcnow().replace(tzinfo=pytz.utc)
    # rawnow is useful for sending to javascript
    rawnow = int(((ts - epoch).total_seconds()) * 1000)

    ts = datetime(ts.year, ts.month, ts.day, ts.hour).replace(tzinfo=pytz.utc)
    # ms since epoch
    now = int(((ts - epoch).total_seconds()) * 1000)
    # number of days to match to, this should be the same as we are
    # indexing in logstash
    days = 10
    # How far back to start in the graphs
    start = now - (days * 24 * STEP)
    # ER timeframe for search
    timeframe = days * 24 * STEP / 1000

    last_indexed = int(
        ((classifier.most_recent() - epoch).total_seconds()) * 1000)
    behind = now - last_indexed

    # the data we're going to return, including interesting headers
    jsondata = {
        'now': rawnow,
        'last_indexed': last_indexed,
        'behind': behind,
        'buglist': []
    }

    # Get the cluster health for the header
    es = pyelasticsearch.ElasticSearch(config.es_url)
    jsondata['status'] = es.health()['status']

    for query in classifier.queries:
        if args.queue:
            query['query'] += ' AND build_queue:%s' % args.queue
        if args.es_query_suffix:
            query['query'] += ' AND (%s)' % args.es_query_suffix

        if query.get('suppress-graph'):
            continue
        if args.verbose:
            LOG.debug("Starting query for bug %s" % query['bug'])
        logstash_query = qb.encode_logstash_query(query['query'],
                                                  timeframe=timeframe)
        logstash_url = ("%s/#/dashboard/file/logstash.json?%s"
                        % (config.ls_url, logstash_query))
        bug_data = get_launchpad_bug(query['bug'])
        bug = dict(number=query['bug'],
                   query=query['query'],
                   logstash_url=logstash_url,
                   bug_data=bug_data,
                   fails=0,
                   fails24=0,
                   data=[],
                   voting=(False if query.get('allow-nonvoting') else True))
        buglist.append(bug)
        try:
            results = classifier.hits_by_query(query['query'],
                                               args.queue,
                                               size=3000,
                                               days=days)
        except pyelasticsearch.exceptions.InvalidJsonResponseError:
            LOG.exception("Invalid Json while collecting metrics for query %s"
                          % query['query'])
            continue
        except requests.exceptions.ReadTimeout:
            LOG.exception("Timeout while collecting metrics for query %s" %
                          query['query'])
            continue

        facets_for_fail = er_results.FacetSet()
        facets_for_fail.detect_facets(results,
                                      ["build_status", "build_uuid"])
        if "FAILURE" in facets_for_fail:
            bug['fails'] = len(facets_for_fail['FAILURE'])

        facets = er_results.FacetSet()
        facets.detect_facets(results,
                             ["build_status", "timestamp", "build_uuid"])

        for status in facets.keys():
            data = []
            for ts in range(start, now, STEP):
                if ts in facets[status]:
                    fails = len(facets[status][ts])
                    data.append([ts, fails])
                    # get the last 24 hr count as well, can't wait to have
                    # the pandas code and able to do it that way
                    if status == "FAILURE" and ts > (now - (24 * STEP)):
                        bug['fails24'] += fails
                else:
                    data.append([ts, 0])
            bug["data"].append(dict(label=status, data=data))

    # the sort order is a little odd, but basically sort by failures in
    # the last 24 hours, then with all failures for ones that we haven't
    # seen in the last 24 hours.
    buglist = sorted(buglist,
                     key=lambda bug: -(bug['fails24'] * 100000 + bug['fails']))

    jsondata['buglist'] = buglist
    if args.output:
        out = open(args.output, 'w')
    else:
        out = sys.stdout

    try:
        # indent the json output if we're writing to a file
        indent = 4 if args.output else None
        out.write(json.dumps(jsondata, indent=indent))
    finally:
        out.close()

Example 144

Project: karesansui Source File: delete_bonding.py
    def process(self):
        (opts, args) = getopts()
        chkopts(opts)
        self.up_progress(10)

        exist_bond_list = get_ifconfig_info("regex:^bond")
        if opts.dev not in exist_bond_list:
            raise KssCommandOptException('Target bonding device not found. target=%s' % opts.dev)

        self.up_progress(10)
        dop = DictOp()
        ifcfg_parser = ifcfgParser()
        dop.addconf("ifcfg", ifcfg_parser.read_conf())
        if dop.getconf("ifcfg") == {}:
            raise KssCommandException('Failure read network config file.')

        if dop.get("ifcfg", opts.dev) is False:
            raise KssCommandException('Target device ifcfg file not found.')

        self.up_progress(10)
        restore_dev_list = []
        for dev in dop.getconf("ifcfg").keys():
            if dop.get("ifcfg", [dev, "MASTER"]) == opts.dev:
                restore_dev_list.append(dev)

        self.up_progress(10)
        if opts.succession is True:
            bond_bridge = dop.get("ifcfg", [opts.dev, "BRIDGE"])
            bond_dev = opts.dev
            if bond_bridge:
                bond_dev = bond_bridge

            ipaddr = dop.get("ifcfg",  [bond_dev, "IPADDR"])
            netmask = dop.get("ifcfg", [bond_dev, "NETMASK"])
            gateway = dop.get("ifcfg", [bond_dev, "GATEWAY"])
            bonding_opts = dop.get("ifcfg", [opts.dev, "BONDING_OPTS"])
            bonding_opts = bonding_opts.strip('"')
            primary_dev = None
            for combination in bonding_opts.split(" "):
                if re.match("primary", combination):
                    (key,val) = combination.split("=")
                    val = val.strip()
                    primary_dev = val

        self.up_progress(10)
        for restore_dev in restore_dev_list:
            if move_file("%s/ifcfg-%s" % (VENDOR_DATA_BONDING_EVACUATION_DIR, restore_dev), NETWORK_IFCFG_DIR) is False:
                raise KssCommandException('Failure restore ifcfg file.')
            if os.path.isfile("%s/ifcfg-p%s" % (VENDOR_DATA_BONDING_EVACUATION_DIR, restore_dev)):
                if move_file("%s/ifcfg-p%s" % (VENDOR_DATA_BONDING_EVACUATION_DIR, restore_dev), NETWORK_IFCFG_DIR) is False:
                    raise KssCommandException('Failure restore ifcfg file.')

        self.up_progress(10)
        if opts.succession is True and primary_dev is not None:
            dop = DictOp()
            ifcfg_parser = ifcfgParser()
            dop.addconf("ifcfg", ifcfg_parser.read_conf())
            if dop.getconf("ifcfg") == {}:
                raise KssCommandException('Failure read network config file.')

            if ipaddr:
                dop.set("ifcfg", [primary_dev, "IPADDR"],  ipaddr)
            if netmask:
                dop.set("ifcfg", [primary_dev, "NETMASK"], netmask)
            if gateway:
                dop.set("ifcfg", [primary_dev, "GATEWAY"], gateway)

            if ifcfg_parser.write_conf(dop.getconf("ifcfg")) is False:
                raise KssCommandException('Failure write network config file.')

        self.up_progress(10)
        remove_file("%s/ifcfg-%s" % (NETWORK_IFCFG_DIR, opts.dev))

        self.up_progress(10)
        dop = DictOp()
        modprobe_parser = modprobe_confParser()
        dop.addconf("modprobe_conf", modprobe_parser.read_conf())
        if dop.getconf("modprobe_conf") == {}:
            raise KssCommandException('Failure read modprobe config file.')

        dop.unset("modprobe_conf", ["alias", opts.dev])

        if modprobe_parser.write_conf(dop.getconf("modprobe_conf")) is False:
            raise KssCommandException('Failure write modprobe config file.')

        self.up_progress(10)

        #
        # Delete bridge device
        #
        bridge_list = get_bridge_info()
        bond_bridge = None

        for bridge in bridge_list:
            if opts.dev in bridge_list[bridge]:
                bond_bridge = bridge

        if bond_bridge:
            ifdown_cmd = (NETWORK_IFDOWN_COMMAND,
                          bond_bridge,
                          )
            (ifdown_rc, ifdown_res) = execute_command(ifdown_cmd)
            if ifdown_rc != 0:
                raise KssCommandException('Failure stop interface. interface:%s' % (dev))

            for brif in bridge_list[bond_bridge]:
                brctl_delif_cmd = (NETWORK_BRCTL_COMMAND,
                                   "delif",
                                   bond_bridge,
                                   brif,
                                   )
                (brctl_rc, brctl_res) = execute_command(brctl_delif_cmd)
                if brctl_rc != 0:
                    raise KssCommandException('Failure delete bridge port. bridge:%s port:%s' % (dev, brif))

            brctl_delbr_cmd = (NETWORK_BRCTL_COMMAND,
                               "delbr",
                               bond_bridge,
                               )
            (brctl_rc, brctl_res) = execute_command(brctl_delbr_cmd)
            if brctl_rc != 0:
                raise KssCommandException('Failure delete bridge. bridge:%s' % (dev, brif))

            remove_file("%s/ifcfg-%s" % (NETWORK_IFCFG_DIR, bond_bridge))

        #
        # Unload bonding module
        #
        remove_bonding_cmd = (SYSTEM_COMMAND_REMOVE_MODULE,
                              "bonding",
                              )
        (rmmod_rc, rmmod_res) = execute_command(remove_bonding_cmd)
        if rmmod_rc != 0:
            raise KssCommandException('Failure remove bonding module.')

        #
        # Restart network
        #
        network_restart_cmd = (NETWORK_COMMAND,
                               "restart",
                               )
        (net_rc, net_res) = execute_command(network_restart_cmd)
        if net_rc != 0:
            raise KssCommandException('Failure restart network.')

        self.logger.info("Deleted bonding device. - bond=%s dev=%s" % (opts.dev, ','.join(restore_dev_list)))
        print >>sys.stdout, _("Deleted bonding device. - bond=%s dev=%s" % (opts.dev, ','.join(restore_dev_list)))

        return True

Example 145

Project: ns-3 Source File: wifi-olsr-flowmon.py
def main(argv):

    cmd = ns.core.CommandLine()

    cmd.NumNodesSide = None
    cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")

    cmd.Results = None
    cmd.AddValue("Results", "Write XML results to file")

    cmd.Plot = None
    cmd.AddValue("Plot", "Plot the results using the matplotlib python module")

    cmd.Parse(argv)

    wifi = ns.wifi.WifiHelper.Default()
    wifiMac = ns.wifi.NqosWifiMacHelper.Default()
    wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
    wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
    wifiPhy.SetChannel(wifiChannel.Create())
    ssid = ns.wifi.Ssid("wifi-default")
    wifi.SetRemoteStationManager("ns3::ArfWifiManager")
    wifiMac.SetType ("ns3::AdhocWifiMac",
                     "Ssid", ns.wifi.SsidValue(ssid))

    internet = ns.internet.InternetStackHelper()
    list_routing = ns.internet.Ipv4ListRoutingHelper()
    olsr_routing = ns.olsr.OlsrHelper()
    static_routing = ns.internet.Ipv4StaticRoutingHelper()
    list_routing.Add(static_routing, 0)
    list_routing.Add(olsr_routing, 100)
    internet.SetRoutingHelper(list_routing)

    ipv4Addresses = ns.internet.Ipv4AddressHelper()
    ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))

    port = 9   # Discard port(RFC 863)
    onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
                                  ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
    onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
    onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
    onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))

    addresses = []
    nodes = []

    if cmd.NumNodesSide is None:
        num_nodes_side = NUM_NODES_SIDE
    else:
        num_nodes_side = int(cmd.NumNodesSide)

    for xi in range(num_nodes_side):
        for yi in range(num_nodes_side):

            node = ns.network.Node()
            nodes.append(node)

            internet.Install(ns.network.NodeContainer(node))

            mobility = ns.mobility.ConstantPositionMobilityModel()
            mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
            node.AggregateObject(mobility)
            
            devices = wifi.Install(wifiPhy, wifiMac, node)
            ipv4_interfaces = ipv4Addresses.Assign(devices)
            addresses.append(ipv4_interfaces.GetAddress(0))

    for i, node in enumerate(nodes):
        destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
        #print i, destaddr
        onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
        app = onOffHelper.Install(ns.network.NodeContainer(node))
        urv = ns.core.UniformRandomVariable()
        app.Start(ns.core.Seconds(urv.GetValue(20, 30)))

    #internet.EnablePcapAll("wifi-olsr")
    flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
    #flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
    monitor = flowmon_helper.InstallAll()
    monitor = flowmon_helper.GetMonitor()
    monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
    monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
    monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))

    ns.core.Simulator.Stop(ns.core.Seconds(44.0))
    ns.core.Simulator.Run()

    def print_stats(os, st):
        print >> os, "  Tx Bytes: ", st.txBytes
        print >> os, "  Rx Bytes: ", st.rxBytes
        print >> os, "  Tx Packets: ", st.txPackets
        print >> os, "  Rx Packets: ", st.rxPackets
        print >> os, "  Lost Packets: ", st.lostPackets
        if st.rxPackets > 0:
            print >> os, "  Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
	    print >> os, "  Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
            print >> os, "  Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1

        if 0:
            print >> os, "Delay Histogram"
            for i in range(st.delayHistogram.GetNBins () ):
              print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
                  st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
            print >> os, "Jitter Histogram"
            for i in range(st.jitterHistogram.GetNBins () ):
              print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
                  st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
            print >> os, "PacketSize Histogram"
            for i in range(st.packetSizeHistogram.GetNBins () ):
              print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
                  st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)

        for reason, drops in enumerate(st.packetsDropped):
            print "  Packets dropped by reason %i: %i" % (reason, drops)
        #for reason, drops in enumerate(st.bytesDropped):
        #    print "Bytes dropped by reason %i: %i" % (reason, drops)

    monitor.CheckForLostPackets()
    classifier = flowmon_helper.GetClassifier()

    if cmd.Results is None:
        for flow_id, flow_stats in monitor.GetFlowStats():
            t = classifier.FindFlow(flow_id)
            proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
            print "FlowID: %i (%s %s/%s --> %s/%i)" % \
                (flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
            print_stats(sys.stdout, flow_stats)
    else:
        print monitor.SerializeToXmlFile(cmd.Results, True, True)


    if cmd.Plot is not None:
        import pylab
        delays = []
        for flow_id, flow_stats in monitor.GetFlowStats():
            tupl = classifier.FindFlow(flow_id)
            if tupl.protocol == 17 and tupl.sourcePort == 698:
                continue
            delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
        pylab.hist(delays, 20)
        pylab.xlabel("Delay (s)")
        pylab.ylabel("Number of Flows")
        pylab.show()

    return 0

Example 146

Project: p2ptv-pi Source File: track.py
    def __init__(self, config, rawserver):
        self.config = config
        self.response_size = config['tracker_response_size']
        self.dfile = config['tracker_dfile']
        self.natcheck = config['tracker_nat_check']
        favicon = config['tracker_favicon']
        self.parse_dir_interval = config['tracker_parse_dir_interval']
        self.favicon = None
        if favicon:
            try:
                h = open(favicon, 'rb')
                self.favicon = h.read()
                h.close()
            except:
                print '**warning** specified favicon file -- %s -- does not exist.' % favicon

        self.rawserver = rawserver
        self.cached = {}
        self.cached_t = {}
        self.times = {}
        self.state = {}
        self.seedcount = {}
        self.allowed_IPs = None
        self.banned_IPs = None
        if config['tracker_allowed_ips'] or config['tracker_banned_ips']:
            self.allowed_ip_mtime = 0
            self.banned_ip_mtime = 0
            self.read_ip_lists()
        self.only_local_override_ip = config['tracker_only_local_override_ip']
        if self.only_local_override_ip == 2:
            self.only_local_override_ip = not config['tracker_nat_check']
        if exists(self.dfile):
            try:
                h = open(self.dfile, 'rb')
                if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE:
                    ds = h.read()
                    tempstate = bdecode(ds)
                else:
                    tempstate = pickle.load(h)
                h.close()
                if not tempstate.has_key('peers'):
                    tempstate = {'peers': tempstate}
                statefiletemplate(tempstate)
                self.state = tempstate
            except:
                print '**warning** statefile ' + self.dfile + ' corrupt; resetting'

        self.downloads = self.state.setdefault('peers', {})
        self.completed = self.state.setdefault('completed', {})
        self.becache = {}
        for infohash, ds in self.downloads.items():
            self.seedcount[infohash] = 0
            for x, y in ds.items():
                ip = y['ip']
                if self.allowed_IPs and not self.allowed_IPs.includes(ip) or self.banned_IPs and self.banned_IPs.includes(ip):
                    del ds[x]
                    continue
                if not y['left']:
                    self.seedcount[infohash] += 1
                if y.get('nat', -1):
                    continue
                gip = y.get('given_ip')
                if is_valid_ip(gip) and (not self.only_local_override_ip or local_IPs.includes(ip)):
                    ip = gip
                self.natcheckOK(infohash, x, ip, y['port'], y['left'])

        for x in self.downloads.keys():
            self.times[x] = {}
            for y in self.downloads[x].keys():
                self.times[x][y] = 0

        self.trackerid = createPeerID('-T-')
        seed(self.trackerid)
        self.reannounce_interval = config['tracker_reannounce_interval']
        self.save_dfile_interval = config['tracker_save_dfile_interval']
        self.show_names = config['tracker_show_names']
        rawserver.add_task(self.save_state, self.save_dfile_interval)
        self.prevtime = clock()
        self.timeout_downloaders_interval = config['tracker_timeout_downloaders_interval']
        rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
        self.logfile = None
        self.log = None
        if config['tracker_logfile'] and config['tracker_logfile'] != '-':
            try:
                self.logfile = config['tracker_logfile']
                self.log = open(self.logfile, 'a')
                sys.stdout = self.log
                print '# Log Started: ', isotime()
            except:
                print '**warning** could not redirect stdout to log file: ', sys.exc_info()[0]

        if config['tracker_hupmonitor']:

            def huphandler(signum, frame, self = self):
                try:
                    self.log.close()
                    self.log = open(self.logfile, 'a')
                    sys.stdout = self.log
                    print '# Log reopened: ', isotime()
                except:
                    print '**warning** could not reopen logfile'

            signal.signal(signal.SIGHUP, huphandler)
        self.allow_get = config['tracker_allow_get']
        self.t2tlist = T2TList(config['tracker_multitracker_enabled'], self.trackerid, config['tracker_multitracker_reannounce_interval'], config['tracker_multitracker_maxpeers'], config['tracker_multitracker_http_timeout'], self.rawserver)
        if config['tracker_allowed_list']:
            if config['tracker_allowed_dir']:
                print '**warning** allowed_dir and allowed_list options cannot be used together'
                print '**warning** disregarding allowed_dir'
                config['tracker_allowed_dir'] = ''
            self.allowed = self.state.setdefault('allowed_list', {})
            self.allowed_list_mtime = 0
            self.parse_allowed()
            self.remove_from_state('allowed', 'allowed_dir_files')
            if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT:
                config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE
            config['tracker_allowed_controls'] = 0
        elif config['tracker_allowed_dir']:
            self.allowed = self.state.setdefault('allowed', {})
            self.allowed_dir_files = self.state.setdefault('allowed_dir_files', {})
            self.allowed_dir_blocked = {}
            self.parse_allowed()
            self.remove_from_state('allowed_list')
        else:
            self.allowed = None
            self.remove_from_state('allowed', 'allowed_dir_files', 'allowed_list')
            if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT:
                config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE
            config['tracker_allowed_controls'] = 0
        self.uq_broken = unquote('+') != ' '
        self.keep_dead = config['tracker_keep_dead']
        self.Filter = Filter(rawserver.add_task)
        aggregator = config['tracker_aggregator']
        if aggregator == 0:
            self.is_aggregator = False
            self.aggregator_key = None
        else:
            self.is_aggregator = True
            if aggregator == 1:
                self.aggregator_key = None
            else:
                self.aggregator_key = aggregator
            self.natcheck = False
        send = config['tracker_aggregate_forward']
        if not send:
            self.aggregate_forward = None
        else:
            try:
                self.aggregate_forward, self.aggregate_password = send
            except:
                self.aggregate_forward = send
                self.aggregate_password = None

        self.cachetime = 0
        self.track_cachetimeupdate()

Example 147

Project: BLEHeartRateLogger Source File: BLEHeartRateLogger.py
def main(addr=None, sqlfile=None, gatttool="gatttool", check_battery=False, hr_handle=None, debug_gatttool=False):
    """
    main routine to which orchestrates everything
    """

    if sqlfile is not None:
        # Init database connection
        sq = sqlite3.connect(sqlfile)
        with sq:
            sq.execute("CREATE TABLE IF NOT EXISTS hrm (tstamp INTEGER, hr INTEGER, rr INTEGER)")
            sq.execute("CREATE TABLE IF NOT EXISTS sql (tstamp INTEGER, commit_time REAL, commit_every INTEGER)")

    if addr is None:
        # In case no address has been provided, we scan to find any BLE devices
        addr = get_ble_hr_mac()
        if addr == None:
            sq.close()
            return

    retry = True
    while retry:

        while 1:
            log.info("Establishing connection to " + addr)
            gt = pexpect.spawn(gatttool + " -b " + addr + " --interactive")
            if debug_gatttool:
                gt.logfile = sys.stdout

            gt.expect(r"\[LE\]>")
            gt.sendline("connect")

            try:
                i = gt.expect(["Connection successful.", r"\[CON\]"], timeout=30)
                if i == 0:
                    gt.expect(r"\[LE\]>", timeout=30)

            except pexpect.TIMEOUT:
                log.info("Connection timeout. Retrying.")
                continue

            except KeyboardInterrupt:
                log.info("Received keyboard interrupt. Quitting cleanly.")
                retry = False
                break
            break

        if not retry:
            break

        log.info("Connected to " + addr)

        if check_battery:
            gt.sendline("char-read-uuid 00002a19-0000-1000-8000-00805f9b34fb")
            try:
                gt.expect("value: ([0-9]+)")
                battery_level = gt.match.group(1)
                log.info("Battery level: " + str(int(battery_level, 16)))

            except pexpect.TIMEOUT:
                log.error("Couldn't read battery level.")

        if hr_handle == None:
            # We determine which handle we should read for getting the heart rate
            # measurement characteristic.
            gt.sendline("characteristics")

            while 1:
                try:
                    gt.expect(r"handle: ([x0-9]+), uuid: ([0-9a-f]{8})", timeout=10)
                except pexpect.TIMEOUT:
                    break
                handle = gt.match.group(1)
                uuid = gt.match.group(2)

                if uuid == "00002902":
                    # We send the request to get HRM notifications
                    gt.sendline("char-write-req " + handle + " 0100")

                elif uuid == "00002a37":
                    hr_handle = handle

            if hr_handle == None:
                log.error("Couldn't find the heart rate measurement handle?!")
                return

        # Time period between two measures. This will be updated automatically.
        period = 1.
        last_measure = time.time() - period
        hr_expect = "Notification handle = " + hr_handle + " value: ([0-9a-f ]+)"

        while 1:
            try:
                gt.expect(hr_expect, timeout=10)

            except pexpect.TIMEOUT:
                # If the timer expires, it means that we have lost the
                # connection with the HR monitor
                log.warn("Connection lost with " + addr + ". Reconnecting.")
                sq.commit()
                gt.sendline("quit")
                try:
                    gt.wait()
                except:
                    pass
                time.sleep(1)
                break

            except KeyboardInterrupt:
                log.info("Received keyboard interrupt. Quitting cleanly.")
                retry = False
                break

            # We measure here the time between two measures. As the sensor
            # sometimes sends a small burst, we have a simple low-pass filter
            # to smooth the measure.
            tmeasure = time.time()
            period = period + 1 / 16. * ((tmeasure - last_measure) - period)
            last_measure = tmeasure

            # Get data from gatttool
            datahex = gt.match.group(1).strip()
            data = map(lambda x: int(x, 16), datahex.split(' '))
            res = interpret(data)


            if sqlfile is None:
                log.info("Heart rate: " + str(res["hr"]))
                continue

            log.debug(res)

            # Push the data to the database
            insert_db(sq, res, period)



    if sqlfile is not None:
        # We close the database properly
        sq.commit()
        sq.close()

    # We quit close the BLE connection properly
    gt.sendline("quit")
    try:
        gt.wait()
    except:
        pass

Example 148

Project: svtools Source File: vcf_group_multiline.py
def sv_genotype(vcf_file):
    in_header = True
    header = []
    breakend_dict = {} # cache to hold unmatched generic breakends for genotyping
    vcf = Vcf()
    vcf_out = sys.stdout

    # read input VCF
    for line in vcf_file:
        if in_header:
            if line[0] == '#':
                header.append(line) 
                if line[1] != '#':
                    vcf_samples = line.rstrip().split('\t')[9:]
                continue
            else:
                in_header = False
                vcf.add_header(header)
                # if detailed:
                vcf.add_format('GQ', 1, 'Float', 'Genotype quality')
                vcf.add_format('SQ', 1, 'Float', 'Phred-scaled probability that this site is variant (non-reference in this sample')
                vcf.add_format('GL', 'G', 'Float', 'Genotype Likelihood, log10-scaled likelihoods of the data given the called genotype for each possible gen\
otype generated from the reference and alternate alleles given the sample ploidy')
                vcf.add_format('DP', 1, 'Integer', 'Read depth')
                vcf.add_format('RO', 1, 'Integer', 'Reference allele observation count, with partial observations recorded fractionally')
                vcf.add_format('AO', 'A', 'Integer', 'Alternate allele observations, with partial observations recorded fractionally')
                vcf.add_format('QR', 1, 'Integer', 'Sum of quality of reference observations')
                vcf.add_format('QA', 'A', 'Integer', 'Sum of quality of alternate observations')
                vcf.add_format('RS', 1, 'Integer', 'Reference allele split-read observation count, with partial observations recorded fractionally')
                vcf.add_format('AS', 'A', 'Integer', 'Alternate allele split-read observation count, with partial observations recorded fractionally')
                vcf.add_format('RP', 1, 'Integer', 'Reference allele paired-end observation count, with partial observations recorded fractionally')
                vcf.add_format('AP', 'A', 'Integer', 'Alternate allele paired-end observation count, with partial observations recorded fractionally')
                vcf.add_format('AB', 'A', 'Float', 'Allele balance, fraction of observations from alternate allele, QA/(QR+QA)')

                # write the output header
                if len(vcf_samples) > 0:
                    vcf_out.write(vcf.get_header(include_samples=True) + '\n')
                else:
                    vcf_out.write(vcf.get_header(include_samples=False) + '\n')

        v = line.rstrip().split('\t')
        var = Variant(v, vcf)

        # genotype generic breakends
        if var.info['SVTYPE']=='BND':
            if var.info['MATEID'] in breakend_dict:
                var2 = var
                var = breakend_dict[var.info['MATEID']]
                chromA = var.chrom
                chromB = var2.chrom
                posA = var.pos
                posB = var2.pos
                # confidence intervals
                ciA = [posA + ci for ci in map(int, var.info['CIPOS'].split(','))]
                ciB = [posB + ci for ci in map(int, var2.info['CIPOS'].split(','))]

                # infer the strands from the alt allele
                if var.alt[-1] == '[' or var.alt[-1] == ']':
                    o1 = '+'
                else: o1 = '-'
                if var2.alt[-1] == '[' or var2.alt[-1] == ']':
                    o2 = '+'
                else: o2 = '-'
            else:
                breakend_dict[var.var_id] = var
                continue
        else:
            chromA = var.chrom
            chromB = var.chrom
            posA = var.pos
            posB = int(var.get_info('END'))
            # confidence intervals
            ciA = [posA + ci for ci in map(int, var.info['CIPOS'].split(','))]
            ciB = [posB + ci for ci in map(int, var.info['CIEND'].split(','))]
            if var.get_info('SVTYPE') == 'DEL':
                o1, o2 =  '+', '-'
            elif var.get_info('SVTYPE') == 'DUP':
                o1, o2 =  '-', '+'
            elif var.get_info('SVTYPE') == 'INV':
                o1, o2 =  '+', '+'

        # # increment the negative strand values (note position in VCF should be the base immediately left of the breakpoint junction)
        # if o1 == '-': posA += 1
        # if o2 == '-': posB += 1
        # # if debug: print posA, posB

        # # for i in xrange(len(bam_list)):
        # for sample in sample_list:
        #     '''
        #     Breakend A
        #     '''
        #     # Count splitters
        #     ref_counter_a = Counter()
        #     spl_counter_a = Counter()
        #     ref_scaled_counter_a = Counter()
        #     spl_scaled_counter_a = Counter()

        #     for ref_read in sample.bam.fetch(chromA, max(posA - padding, 0), posA + padding + 1):
        #         if not ref_read.is_duplicate and not ref_read.is_unmapped:
        #             for p in xrange(ref_read.pos + 1, ref_read.aend + 1):
        #                 if p - ref_read.pos >= splflank and ref_read.aend - p >= splflank:
        #                     ref_counter_a[p] += 1
        #                     ref_scaled_counter_a[p] += (1-10**(-ref_read.mapq/10.0))
        #     for spl_read in sample.spl_bam.fetch(chromA, max(posA - padding, 0), posA + padding + 1):
        #         if not spl_read.is_duplicate and not spl_read.is_unmapped:
        #             if o1 == '+' and spl_read.cigar[0][0] == 0:
        #                 # if debug: print 'o1+', spl_read.aend
        #                 spl_counter_a[spl_read.aend] += 1
        #                 spl_scaled_counter_a[spl_read.aend] += (1-10**(-spl_read.mapq/10.0))
        #             elif o1 == '-' and spl_read.cigar[-1][0] == 0:
        #                 # if debug: print 'o1-', spl_read.pos + 1
        #                 spl_counter_a[spl_read.pos + 1] += 1
        #                 spl_scaled_counter_a[spl_read.pos + 1] += (1-10**(-spl_read.mapq/10.0))

        #     # Count paired-end discordant and concordants
        #     (conc_counter_a,
        #      disc_counter_a,
        #      conc_scaled_counter_a,
        #      disc_scaled_counter_a) = count_pairedend(chromA, posA, ciA,
        #                                               chromB, posB, ciB,
        #                                               o1, o2,
        #                                               var.info['SVTYPE'],
        #                                               sample,
        #                                               z, discflank)
        #     '''
        #     Breakend B
        #     '''
        #     # Count splitters
        #     ref_counter_b = Counter()
        #     spl_counter_b = Counter()
        #     ref_scaled_counter_b = Counter()
        #     spl_scaled_counter_b = Counter()

        #     for ref_read in sample.bam.fetch(chromB, max(posB - padding, 0), posB + padding + 1):
        #         if not ref_read.is_duplicate and not ref_read.is_unmapped:
        #             for p in xrange(ref_read.pos + 1, ref_read.aend + 1):
        #                 if p - ref_read.pos >= splflank and ref_read.aend - p >= splflank:
        #                     ref_counter_b[p] += 1
        #                     ref_scaled_counter_b[p] += (1-10**(-ref_read.mapq/10.0))
        #     for spl_read in sample.spl_bam.fetch(chromB, max(posB - padding, 0), posB + padding + 1):
        #         if not spl_read.is_duplicate and not spl_read.is_unmapped:
        #             if o2 == '+' and spl_read.cigar[0][0] == 0:
        #                 spl_counter_b[spl_read.aend] += 1
        #                 # if debug: print 'o2+', spl_read.aend
        #                 spl_scaled_counter_b[spl_read.aend] += (1-10**(-spl_read.mapq/10.0))
        #             elif o2 == '-' and spl_read.cigar[-1][0] == 0:
        #                 # if debug: print 'o2-', spl_read.pos + 1
        #                 spl_counter_b[spl_read.pos + 1] += 1
        #                 spl_scaled_counter_b[spl_read.pos + 1] += (1-10**(-spl_read.mapq/10.0))
            
        #     # tally up the splitters
        #     sr_ref_a = int(round(sum(ref_counter_a[p] for p in xrange(posA - split_slop, posA + split_slop + 1)) / float(2 * split_slop + 1)))
        #     sr_spl_a = sum(spl_counter_a[p] for p in xrange(posA-split_slop, posA+split_slop + 1))
        #     sr_ref_b = int(round(sum(ref_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1)) / float(2 * split_slop + 1)))
        #     sr_spl_b = sum(spl_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1))

        #     sr_ref_scaled_a = sum(ref_scaled_counter_a[p] for p in xrange(posA - split_slop, posA + split_slop + 1)) / float(2 * split_slop + 1)
        #     sr_spl_scaled_a = sum(spl_scaled_counter_a[p] for p in xrange(posA-split_slop, posA+split_slop + 1))
        #     sr_ref_scaled_b = sum(ref_scaled_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1)) / float(2 * split_slop + 1)
        #     sr_spl_scaled_b = sum(spl_scaled_counter_b[p] for p in xrange(posB - split_slop, posB + split_slop + 1))

        #     # Count paired-end discordants and concordants
        #     (conc_counter_b,
        #      disc_counter_b,
        #      conc_scaled_counter_b,
        #      disc_scaled_counter_b) = count_pairedend(chromB, posB, ciB,
        #                                               chromA, posA, ciA,
        #                                               o2, o1,
        #                                               var.info['SVTYPE'],
        #                                               sample,
        #                                               z, discflank)
        #     if debug:
        #         print '--------------------'
        #         print sample.name
        #         print 'sr_a', '(ref, alt)', sr_ref_a, sr_spl_a
        #         print 'pe_a', '(ref, alt)', conc_counter_a, disc_counter_a
        #         print 'sr_b', '(ref, alt)', sr_ref_b, sr_spl_b
        #         print 'pe_b', '(ref, alt)', conc_counter_b, disc_counter_b
        #         print 'sr_a_scaled', '(ref, alt)', sr_ref_scaled_a, sr_spl_scaled_a
        #         print 'pe_a_scaled', '(ref, alt)', conc_scaled_counter_a, disc_scaled_counter_a
        #         print 'sr_b_scaled', '(ref, alt)', sr_ref_scaled_b, sr_spl_scaled_b
        #         print 'pe_b_scaled', '(ref, alt)', conc_scaled_counter_b, disc_scaled_counter_b

        #     # merge the breakend support
        #     split_ref = 0 # set these to zero unless there are informative alt bases for the ev type
        #     disc_ref = 0
        #     split_alt = sr_spl_a + sr_spl_b
        #     if split_alt > 0:
        #         split_ref = sr_ref_a + sr_ref_b
        #     disc_alt = disc_counter_a + disc_counter_b
        #     if disc_alt > 0:
        #         disc_ref = conc_counter_a + conc_counter_b
        #     if split_alt == 0 and disc_alt == 0:
        #         split_ref = sr_ref_a + sr_ref_b
        #         disc_ref = conc_counter_a + conc_counter_b

        #     split_scaled_ref = 0 # set these to zero unless there are informative alt bases for the ev type
        #     disc_scaled_ref = 0
        #     split_scaled_alt = sr_spl_scaled_a + sr_spl_scaled_b
        #     if int(split_scaled_alt) > 0:
        #         split_scaled_ref = sr_ref_scaled_a + sr_ref_scaled_b
        #     disc_scaled_alt = disc_scaled_counter_a + disc_scaled_counter_b
        #     if int(disc_scaled_alt) > 0:
        #         disc_scaled_ref = conc_scaled_counter_a + conc_scaled_counter_b
        #     if int(split_scaled_alt) == 0 and int(disc_scaled_alt) == 0: # if no alt alleles, set reference
        #         split_scaled_ref = sr_ref_scaled_a + sr_ref_scaled_b
        #         disc_scaled_ref = conc_scaled_counter_a + conc_scaled_counter_b

        #     if split_scaled_alt + split_scaled_ref + disc_scaled_alt + disc_scaled_ref > 0:
        #         # get bayesian classifier
        #         if var.info['SVTYPE'] == "DUP": is_dup = True
        #         else: is_dup = False
        #         gt_lplist = bayes_gt(int(split_weight * split_scaled_ref) + int(disc_weight * disc_scaled_ref), int(split_weight * split_scaled_alt) + int(disc_weight * disc_scaled_alt), is_dup)
        #         gt_idx = gt_lplist.index(max(gt_lplist))

        #         # print log probabilities of homref, het, homalt
        #         if debug:
        #             print gt_lplist

        #         # set the overall variant QUAL score and sample specific fields
        #         var.genotype(sample.name).set_format('GL', ','.join(['%.0f' % x for x in gt_lplist]))
        #         var.genotype(sample.name).set_format('DP', int(split_scaled_ref + split_scaled_alt + disc_scaled_ref + disc_scaled_alt))
        #         var.genotype(sample.name).set_format('AO', int(split_scaled_alt + disc_scaled_alt))
        #         var.genotype(sample.name).set_format('RO', int(split_scaled_ref + disc_scaled_ref))
        #         # if detailed:
        #         var.genotype(sample.name).set_format('AS', int(split_scaled_alt))
        #         var.genotype(sample.name).set_format('RS', int(split_scaled_ref))
        #         var.genotype(sample.name).set_format('AP', int(disc_scaled_alt))
        #         var.genotype(sample.name).set_format('RP', int(disc_scaled_ref))

        #         # assign genotypes
        #         gt_sum = 0
        #         for gt in gt_lplist:
        #             try:
        #                 gt_sum += 10**gt
        #             except OverflowError:
        #                 gt_sum += 0
        #         if gt_sum > 0:
        #             gt_sum_log = math.log(gt_sum, 10)
        #             sample_qual = abs(-10 * (gt_lplist[0] - gt_sum_log)) # phred-scaled probability site is non-reference in this sample
        #             if 1 - (10**gt_lplist[gt_idx] / 10**gt_sum_log) == 0:
        #                 phred_gq = 200                    
        #             else:
        #                 phred_gq = abs(-10 * math.log(1 - (10**gt_lplist[gt_idx] / 10**gt_sum_log), 10))
        #             var.genotype(sample.name).set_format('GQ', phred_gq)
        #             var.genotype(sample.name).set_format('SQ', sample_qual)
        #             var.qual += sample_qual
        #             if gt_idx == 1:
        #                 var.genotype(sample.name).set_format('GT', '0/1')
        #             elif gt_idx == 2:
        #                 var.genotype(sample.name).set_format('GT', '1/1')
        #             elif gt_idx == 0:
        #                 var.genotype(sample.name).set_format('GT', '0/0')
        #         else:
        #             var.genotype(sample.name).set_format('GQ', '.')
        #             var.genotype(sample.name).set_format('SQ', '.')
        #             var.genotype(sample.name).set_format('GT', './.')
        #     else:
            # var.genotype(sample.name).set_format('GT', './.')
            # var.qual = 0
            # var.genotype(sample.name).set_format('GQ', '.')
            # var.genotype(sample.name).set_format('GL', '.')
            # var.genotype(sample.name).set_format('DP', 0)
            # var.genotype(sample.name).set_format('AO', 0)
            # var.genotype(sample.name).set_format('RO', 0)
            # # if detailed:
            # var.genotype(sample.name).set_format('AS', 0)
            # var.genotype(sample.name).set_format('RS', 0)
            # var.genotype(sample.name).set_format('AP', 0)
            # var.genotype(sample.name).set_format('RP', 0)

        # after all samples have been processed, write
        vcf_out.write(var.get_var_string() + '\n')
        if var.info['SVTYPE'] == 'BND':
            var2.qual = var.qual
            var2.active_formats = var.active_formats
            var2.genotype = var.genotype
            vcf_out.write(var2.get_var_string() + '\n')
    vcf_out.close()
    
    return

Example 149

Project: glow Source File: frontend.py
    def extract(self, argv):
        """Subcommand for extracting messages from source files and generating
        a POT file.

        :param argv: the command arguments
        """
        parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
                              description=self.commands['extract'])
        parser.add_option('--charset', dest='charset',
                          help='charset to use in the output (default '
                               '"%default")')
        parser.add_option('-k', '--keyword', dest='keywords', action='append',
                          help='keywords to look for in addition to the '
                               'defaults. You can specify multiple -k flags on '
                               'the command line.')
        parser.add_option('--no-default-keywords', dest='no_default_keywords',
                          action='store_true',
                          help="do not include the default keywords")
        parser.add_option('--mapping', '-F', dest='mapping_file',
                          help='path to the extraction mapping file')
        parser.add_option('--no-location', dest='no_location',
                          action='store_true',
                          help='do not include location comments with filename '
                               'and line number')
        parser.add_option('--omit-header', dest='omit_header',
                          action='store_true',
                          help='do not include msgid "" entry in header')
        parser.add_option('-o', '--output', dest='output',
                          help='path to the output POT file')
        parser.add_option('-w', '--width', dest='width', type='int',
                          help="set output line width (default %default)")
        parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
                          help='do not break long message lines, longer than '
                               'the output line width, into several lines')
        parser.add_option('--sort-output', dest='sort_output',
                          action='store_true',
                          help='generate sorted output (default False)')
        parser.add_option('--sort-by-file', dest='sort_by_file',
                          action='store_true',
                          help='sort output by file location (default False)')
        parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
                          metavar='EMAIL@ADDRESS',
                          help='set report address for msgid')
        parser.add_option('--copyright-holder', dest='copyright_holder',
                          help='set copyright holder in output')
        parser.add_option('--add-comments', '-c', dest='comment_tags',
                          metavar='TAG', action='append',
                          help='place comment block with TAG (or those '
                               'preceding keyword lines) in output file. One '
                               'TAG per argument call')
        parser.add_option('--strip-comment-tags', '-s',
                          dest='strip_comment_tags', action='store_true',
                          help='Strip the comment tags from the comments.')

        parser.set_defaults(charset='utf-8', keywords=[],
                            no_default_keywords=False, no_location=False,
                            omit_header = False, width=76, no_wrap=False,
                            sort_output=False, sort_by_file=False,
                            comment_tags=[], strip_comment_tags=False)
        options, args = parser.parse_args(argv)
        if not args:
            parser.error('incorrect number of arguments')

        if options.output not in (None, '-'):
            outfile = open(options.output, 'w')
        else:
            outfile = sys.stdout

        keywords = DEFAULT_KEYWORDS.copy()
        if options.no_default_keywords:
            if not options.keywords:
                parser.error('you must specify new keywords if you disable the '
                             'default ones')
            keywords = {}
        if options.keywords:
            keywords.update(parse_keywords(options.keywords))

        if options.mapping_file:
            fileobj = open(options.mapping_file, 'U')
            try:
                method_map, options_map = parse_mapping(fileobj)
            finally:
                fileobj.close()
        else:
            method_map = DEFAULT_MAPPING
            options_map = {}

        if options.width and options.no_wrap:
            parser.error("'--no-wrap' and '--width' are mutually exclusive.")
        elif not options.width and not options.no_wrap:
            options.width = 76
        elif not options.width and options.no_wrap:
            options.width = 0

        if options.sort_output and options.sort_by_file:
            parser.error("'--sort-output' and '--sort-by-file' are mutually "
                         "exclusive")

        try:
            catalog = Catalog(msgid_bugs_address=options.msgid_bugs_address,
                              copyright_holder=options.copyright_holder,
                              charset=options.charset)

            for dirname in args:
                if not os.path.isdir(dirname):
                    parser.error('%r is not a directory' % dirname)

                def callback(filename, method, options):
                    if method == 'ignore':
                        return
                    filepath = os.path.normpath(os.path.join(dirname, filename))
                    optstr = ''
                    if options:
                        optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
                                                      k, v in options.items()])
                    self.log.info('extracting messages from %s%s', filepath,
                                  optstr)

                extracted = extract_from_dir(dirname, method_map, options_map,
                                             keywords, options.comment_tags,
                                             callback=callback,
                                             strip_comment_tags=
                                                options.strip_comment_tags)
                for filename, lineno, message, comments in extracted:
                    filepath = os.path.normpath(os.path.join(dirname, filename))
                    catalog.add(message, None, [(filepath, lineno)],
                                auto_comments=comments)

            if options.output not in (None, '-'):
                self.log.info('writing PO template file to %s' % options.output)
            write_po(outfile, catalog, width=options.width,
                     no_location=options.no_location,
                     omit_header=options.omit_header,
                     sort_output=options.sort_output,
                     sort_by_file=options.sort_by_file)
        finally:
            if options.output:
                outfile.close()

Example 150

Project: ocitysmap Source File: render.py
def main():
    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

    # Paper sizes, sorted in increasing widths
    KNOWN_PAPER_SIZE_NAMES = \
        map(lambda p: p[0],
            sorted(ocitysmap.layoutlib.PAPER_SIZES,
                   key=lambda p: p[1]))

    # Known renderer names
    KNOWN_RENDERERS_NAMES = \
        map(lambda r: "%s (%s)" % (r.name, r.description),
            ocitysmap.layoutlib.renderers.get_renderers())

    # Known paper orientations
    KNOWN_PAPER_ORIENTATIONS = ['portrait', 'landscape']

    usage = '%prog [options] [-b <lat1,long1 lat2,long2>|--osmid <osmid>]'
    parser = optparse.OptionParser(usage=usage,
                                   version='%%prog %s' % __version__)
    parser.add_option('-C', '--config', dest='config_file', metavar='FILE',
                      help='specify the location of the config file.')
    parser.add_option('-p', '--prefix', dest='output_prefix', metavar='PREFIX',
                      help='set a prefix to the generated file names. '
                           'Defaults to "citymap".',
                      default='citymap')
    parser.add_option('-f', '--format', dest='output_formats', metavar='FMT',
                      help='specify the output formats. Supported file '
                           'formats: svg, svgz, pdf, ps, ps.gz, png, and csv. '
                           'Defaults to PDF. May be specified multiple times.',
                      action='append')
    parser.add_option('-t', '--title', dest='output_title', metavar='TITLE',
                      help='specify the title displayed in the output files.',
                      default="My Map")
    parser.add_option('--osmid', dest='osmid', metavar='OSMID',
                      help='OSM ID representing the polygon of the city '
                      'to render.', type="int"),
    parser.add_option('-b', '--bounding-box', dest='bbox',  nargs=2,
                      metavar='LAT1,LON1 LAT2,LON2',
                      help='bounding box (EPSG: 4326).')
    parser.add_option('-L', '--language', dest='language',
                      metavar='LANGUAGE_CODE',
                      help='language to use when generating the index '
                           '(default=fr_FR.UTF-8). The map language is '
                           'driven by the system\' locale setting.',
                      default='fr_FR.UTF-8')
    parser.add_option('-s', '--stylesheet', dest='stylesheet',
                      metavar='NAME',
                      help='specify which stylesheet to use. Defaults to the '
                      'first specified in the configuration file.')
    parser.add_option('-l', '--layout', dest='layout',
                      metavar='NAME',
                      default=KNOWN_RENDERERS_NAMES[0].split()[0],
                      help=('specify which layout to use. Available layouts '
                            'are: %s. Defaults to %s.' %
                            (', '.join(KNOWN_RENDERERS_NAMES),
                             KNOWN_RENDERERS_NAMES[0].split()[0])))
    parser.add_option('--paper-format', metavar='FMT',
                      help='set the output paper format. Either "default", '
                           'or one of %s.' % ', '.join(KNOWN_PAPER_SIZE_NAMES),
                      default='default')
    parser.add_option('--orientation', metavar='ORIENTATION',
                      help='set the output paper orientation. Either '
                            '"portrait" or "landscape". Defaults to portrait.',
                      default='portrait')

    (options, args) = parser.parse_args()
    if len(args):
        parser.print_help()
        return 1

    # Make sure either -b or -c is given
    optcnt = 0
    for var in options.bbox, options.osmid:
        if var:
            optcnt += 1

    if optcnt == 0:
        parser.error("One of --bounding-box "
                     "or --osmid is mandatory")

    if optcnt > 1:
        parser.error("Options --bounding-box "
                     "or --osmid are exclusive")

    # Parse config file and instanciate main object
    mapper = ocitysmap.OCitySMap(
        [options.config_file or os.path.join(os.environ["HOME"], '.ocitysmap.conf')])

    # Parse bounding box arguments when given
    bbox = None
    if options.bbox:
        try:
            bbox = BoundingBox.parse_latlon_strtuple(options.bbox)
        except ValueError:
            parser.error('Invalid bounding box!')
        # Check that latitude and langitude are different
        lat1, lon1 = bbox.get_top_left()
        lat2, lon2 = bbox.get_bottom_right()
        if lat1 == lat2:
            parser.error('Same latitude in bounding box corners')
        if lon1 == lon2:
            parser.error('Same longitude in bounding box corners')

    # Parse OSM id when given
    if options.osmid:
        try:
            bbox  = BoundingBox.parse_wkt(
                mapper.get_geographic_info(options.osmid)[0])
        except LookupError:
            parser.error('No such OSM id: %d' % options.osmid)

    # Parse stylesheet (defaults to 1st one)
    if options.stylesheet is None:
        stylesheet = mapper.get_all_style_configurations()[0]
    else:
        try:
            stylesheet = mapper.get_stylesheet_by_name(options.stylesheet)
        except LookupError, ex:
            parser.error("%s. Available stylesheets: %s."
                 % (ex, ', '.join(map(lambda s: s.name,
                      mapper.STYLESHEET_REGISTRY))))

    # Parse rendering layout
    if options.layout is None:
        cls_renderer = ocitysmap.layoutlib.renderers.get_renderers()[0]
    else:
        try:
            cls_renderer = ocitysmap.layoutlib.renderers.get_renderer_class_by_name(options.layout)
        except LookupError, ex:
            parser.error("%s\nAvailable layouts: %s."
                 % (ex, ', '.join(map(lambda lo: "%s (%s)"
                          % (lo.name, lo.description),
                          ocitysmap.layoutlib.renderers.get_renderers()))))

    # Output file formats
    if not options.output_formats:
        options.output_formats = ['pdf']
    options.output_formats = set(options.output_formats)

    # Reject output formats that are not supported by the renderer
    compatible_output_formats = cls_renderer.get_compatible_output_formats()
    for format in options.output_formats:
        if format not in compatible_output_formats:
            parser.error("Output format %s not supported by layout %s" %
                         (format, cls_renderer.name))

    # Parse paper size
    if (options.paper_format != 'default') \
            and options.paper_format not in KNOWN_PAPER_SIZE_NAMES:
        parser.error("Invalid paper format. Allowed formats = default, %s"
                     % ', '.join(KNOWN_PAPER_SIZE_NAMES))

    # Determine actual paper size
    compat_papers = cls_renderer.get_compatible_paper_sizes(bbox)
    if not compat_papers:
        parser.error("No paper size compatible with this rendering.")

    paper_descr = None
    if options.paper_format == 'default':
        for p in compat_papers:
            if p[5]:
                paper_descr = p
                break
    else:
        # Make sure the requested paper size is in list
        for p in compat_papers:
            if p[0] == options.paper_format:
                paper_descr = p
                break
    if not paper_descr:
        parser.error("Requested paper format not compatible with rendering. Compatible paper formats are: %s."
             % ', '.join(map(lambda p: "%s (%.1fx%.1fcm²)"
                % (p[0], p[1]/10., p[2]/10.),
                compat_papers)))
    assert paper_descr[3] or paper_descr[4] # Portrait or Landscape accepted

    # Validate requested orientation
    if options.orientation not in KNOWN_PAPER_ORIENTATIONS:
        parser.error("Invalid paper orientation. Allowed orientations: %s"
                     % KNOWN_PAPER_ORIENTATIONS)

    if (options.orientation == 'portrait' and not paper_descr[3]) or \
        (options.orientation == 'landscape' and not paper_descr[4]):
        parser.error("Requested paper orientation %s not compatible with this rendering at this paper size." % options.orientation)

    # Prepare the rendering config
    rc              = ocitysmap.RenderingConfiguration()
    rc.title        = options.output_title
    rc.osmid        = options.osmid or None # Force to None if absent
    rc.bounding_box = bbox
    rc.language     = options.language
    rc.stylesheet   = stylesheet
    if options.orientation == 'portrait':
        rc.paper_width_mm  = paper_descr[1]
        rc.paper_height_mm = paper_descr[2]
    else:
        rc.paper_width_mm  = paper_descr[2]
        rc.paper_height_mm = paper_descr[1]

    # Go !...
    mapper.render(rc, cls_renderer.name, options.output_formats,
                  options.output_prefix)

    return 0
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4