io.StringIO

Here are the examples of the python api io.StringIO taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

154 Examples 7

Example 101

Project: openmc Source File: angle_distribution.py
    @classmethod
    def from_endf(cls, ev, mt):
        """Generate an angular distribution from an ENDF evaluation

        Parameters
        ----------
        ev : openmc.data.endf.Evaluation
            ENDF evaluation
        mt : int
            The MT value of the reaction to get angular distributions for

        Returns
        -------
        openmc.data.AngleDistribution
            Angular distribution

        """
        file_obj = StringIO(ev.section[4, mt])

        # Read HEAD record
        items = get_head_record(file_obj)
        lvt = items[2]
        ltt = items[3]

        # Read CONT record
        items = get_cont_record(file_obj)
        li = items[2]
        nk = items[4]
        center_of_mass = (items[3] == 2)

        # Check for obsolete energy transformation matrix. If present, just skip
        # it and keep reading
        if lvt > 0:
            warn('Obsolete energy transformation matrix in MF=4 angular '
                 'distribution.')
            for _ in range((nk + 5)//6):
                file_obj.readline()

        if ltt == 0 and li == 1:
            # Purely isotropic
            energy = np.array([0., ev.info['energy_max']])
            mu = [Uniform(-1., 1.), Uniform(-1., 1.)]

        elif ltt == 1 and li == 0:
            # Legendre polynomial coefficients
            params, tab2 = get_tab2_record(file_obj)
            n_energy = params[5]

            energy = np.zeros(n_energy)
            mu = []
            for i in range(n_energy):
                items, al = get_list_record(file_obj)
                temperature = items[0]
                energy[i] = items[1]
                coefficients = np.asarray([1.0] + al)
                mu.append(Legendre(coefficients))

        elif ltt == 2 and li == 0:
            # Tabulated probability distribution
            params, tab2 = get_tab2_record(file_obj)
            n_energy = params[5]

            energy = np.zeros(n_energy)
            mu = []
            for i in range(n_energy):
                params, f = get_tab1_record(file_obj)
                temperature = params[0]
                energy[i] = params[1]
                if f.n_regions > 1:
                    raise NotImplementedError('Angular distribution with multiple '
                                              'interpolation regions not supported.')
                mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))

        elif ltt == 3 and li == 0:
            # Legendre for low energies / tabulated for high energies
            params, tab2 = get_tab2_record(file_obj)
            n_energy_legendre = params[5]

            energy_legendre = np.zeros(n_energy_legendre)
            mu = []
            for i in range(n_energy_legendre):
                items, al = get_list_record(file_obj)
                temperature = items[0]
                energy_legendre[i] = items[1]
                coefficients = np.asarray([1.0] + al)
                mu.append(Legendre(coefficients))

            params, tab2 = get_tab2_record(file_obj)
            n_energy_tabulated = params[5]

            energy_tabulated = np.zeros(n_energy_tabulated)
            for i in range(n_energy_tabulated):
                params, f = get_tab1_record(file_obj)
                temperature = params[0]
                energy_tabulated[i] = params[1]
                if f.n_regions > 1:
                    raise NotImplementedError('Angular distribution with multiple '
                                              'interpolation regions not supported.')
                mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))

            energy = np.concatenate((energy_legendre, energy_tabulated))

        return AngleDistribution(energy, mu)

Example 102

Project: yournextrepresentative Source File: st_paul_load_candidates.py
    def handle(self, **options):
        from slumber.exceptions import HttpClientError
        from candidates.cache import get_post_cached, UnknownPostException
        from candidates.election_specific import PARTY_DATA, shorten_post_label
        from candidates.models import PopItPerson
        from candidates.popit import create_popit_api_object

        spreadsheet_url = 'https://docs.google.com/spreadsheets/d/{0}/pub?output=csv'\
                              .format(GOOGLE_DOC_ID)

        candidate_list = requests.get(spreadsheet_url)

        content = StringIO(candidate_list.text)
        reader = csv.DictReader(content)

        api = create_popit_api_object()

        for row in reader:

            try:
                election_data = Election.objects.get_by_slug('council-member-2015')
                post_id_format = 'ocd-division,country:us,state:mn,place:st_paul,ward:{area_id}'
                ocd_division = post_id_format.format(area_id=row['Ward'])
                post_data = get_post_cached(api, ocd_division)['result']
            except (UnknownPostException, memcache.Client.MemcachedKeyCharacterError):
                election_data = Election.objects.get_by_slug('school-board-2015')
                post_id = 'ocd-division,country:us,state:mn,place:st_paul'
                post_data = get_post_cached(api, post_id)['result']

            person_id = slugify(row['Name'])

            person = get_existing_popit_person(person_id)

            if person:
                print("Found an existing person:", row['Name'])
            else:
                print("No existing person, creating a new one:", row['Name'])
                person = PopItPerson()

            person.name = row['Name']

            # TODO: Get these attributes in the spreadsheet
            # person.gender = gender
            # if birth_date:
            #     person.birth_date = str(birth_date)
            # else:
            #     person.birth_date = None

            person.email = row['Campaign Email']
            person.facebook_personal_url = row["Candidate's Personal Facebook Profile"]
            person.facebook_page_url = row['Campaign Facebook Page']


            person.twitter_username = row['Campaign Twitter']\
                                          .replace('N', '')\
                                          .replace('N/A', '')\
                                          .replace('http://twitter.com/', '')\
                                          .replace('https://twitter.com/', '')

            person.linkedin_url = row['LinkedIn']
            person.homepage_url = row['Campaign Website\n']

            standing_in_election = {
                'post_id': post_data['id'],
                'name': shorten_post_label(post_data['label']),
            }
            if 'area' in post_data:
                standing_in_election['mapit_url'] = post_data['area']['identifier']
            person.standing_in = {
                election_data.slug: standing_in_election
            }

            if 'dfl' in row['Party'].lower():
                party_id = 'party:101'
            elif 'green' in row['Party'].lower():
                party_id = 'party:201'
            elif 'independence' in row['Party'].lower():
                party_id = 'party:301'
            else:
                party_id = 'party:401'


            party_name = PARTY_DATA.party_id_to_name[party_id]

            person.party_memberships = {
                election_data.slug: {
                    'id': party_id,
                    'name': party_name,
                }
            }

            person.set_identifier('import-id', person_id)
            change_metadata = get_change_metadata(
                None,
                'Imported candidate from Google Spreadsheet',
            )

            person.record_version(change_metadata)
            try:
                person.save_to_popit(api)

                # TODO: Get candidate Images
                # if image_url:
                #     enqueue_image(person, user, image_url)
            except HttpClientError as hce:
                print("Got an HttpClientError:", hce.content)
                raise

Example 103

Project: pydub Source File: audio_segment.py
    def __init__(self, data=None, *args, **kwargs):
        self.sample_width = kwargs.pop("sample_width", None)
        self.frame_rate = kwargs.pop("frame_rate", None)
        self.channels = kwargs.pop("channels", None)

        audio_params = (self.sample_width, self.frame_rate, self.channels)

        # prevent partial specification of arguments
        if any(audio_params) and None in audio_params:
            raise MissingAudioParameter("Either all audio parameters or no parameter must be specified")

        # all arguments are given
        elif self.sample_width is not None:
            if len(data) % (self.sample_width * self.channels) != 0:
                raise ValueError("data length must be a multiple of '(sample_width * channels)'")

            self.frame_width = self.channels * self.sample_width
            self._data = data

        # keep support for 'metadata' until audio params are used everywhere
        elif kwargs.get('metadata', False):
            # internal use only
            self._data = data
            for attr, val in kwargs.pop('metadata').items():
                setattr(self, attr, val)
        else:
            # normal construction
            try:
                data = data if isinstance(data, (basestring, bytes)) else data.read()
            except(OSError):
                d = b''
                reader = data.read(2**31-1)
                while reader:
                    d += reader
                    reader = data.read(2**31-1)
                data = d

            raw = wave.open(StringIO(data), 'rb')

            raw.rewind()
            self.channels = raw.getnchannels()
            self.sample_width = raw.getsampwidth()
            self.frame_rate = raw.getframerate()
            self.frame_width = self.channels * self.sample_width

            raw.rewind()

            # the "or b''" base case is a work-around for a python 3.4
            # see https://github.com/jiaaro/pydub/pull/107
            self._data = raw.readframes(float('inf')) or b''

        # Convert 24-bit audio to 32-bit audio.
        # (stdlib audioop and array modules do not support 24-bit data)
        if self.sample_width == 3:
            byte_buffer = BytesIO()

            # Workaround for python 2 vs python 3. _data in 2.x are length-1 strings,
            # And in 3.x are ints.
            pack_fmt = 'BBB' if isinstance(self._data[0], int) else 'ccc'

            # This conversion maintains the 24 bit values.  The values are
            # not scaled up to the 32 bit range.  Other conversions could be
            # implemented.
            i = iter(self._data)
            padding = {False: b'\x00', True: b'\xFF'}
            for b0, b1, b2 in izip(i, i, i):
                byte_buffer.write(padding[b2 > b'\x7f'[0]])
                old_bytes = struct.pack(pack_fmt, b0, b1, b2)
                byte_buffer.write(old_bytes)


            self._data = byte_buffer.getvalue()
            self.sample_width = 4
            self.frame_width = self.channels * self.sample_width

        super(AudioSegment, self).__init__(*args, **kwargs)

Example 104

Project: Nurevam Source File: repl.py
    async def repl(self, ctx):
        msg = ctx.message

        variables = {
            'ctx': ctx,
            'bot': self.bot,
            'message': msg,
            'server': msg.server,
            'channel': msg.channel,
            'author': msg.author,
            '_': None,
            'redis':self.redis
        }

        if msg.channel.id in self.sessions:
            await self.bot.say('Already running a REPL session in this channel. Exit it with `quit`.')
            return

        self.sessions.add(msg.channel.id)
        await self.bot.say('Enter code to execute or evaluate. `exit()` or `quit` to exit.')
        while True:
            response = await self.bot.wait_for_message(author=msg.author, channel=msg.channel,
                                                       check=lambda m: m.content.startswith('`'))

            cleaned = self.cleanup_code(response.content)

            if cleaned in ('quit', 'exit', 'exit()'):
                await self.bot.say('Exiting.')
                self.sessions.remove(msg.channel.id)
                return

            executor = exec
            if cleaned.count('\n') == 0:
                # single statement, potentially 'eval'
                try:
                    code = compile(cleaned, '<repl session>', 'eval')
                except SyntaxError:
                    pass
                else:
                    executor = eval

            if executor is exec:
                try:
                    code = compile(cleaned, '<repl session>', 'exec')
                except SyntaxError as e:
                    await self.bot.say(self.get_syntax_error(e))
                    continue

            variables['message'] = response

            fmt = None
            stdout = io.StringIO()

            try:
                with redirect_stdout(stdout):
                    result = executor(code, variables)
                    if inspect.isawaitable(result):
                        result = await result
            except Exception as e:
                value = stdout.getvalue()
                fmt = '```py\n{}{}\n```'.format(value, traceback.format_exc())
            else:
                value = stdout.getvalue()
                if result is not None:
                    fmt = '```py\n{}{}\n```'.format(value, result)
                    variables['last'] = result
                elif value:
                    fmt = '```py\n{}\n```'.format(value)

            try:
                if fmt is not None:
                    if len(fmt) > 2000:
                        await self.bot.send_message(msg.channel, 'Content too big to be printed.')
                    else:
                        await self.bot.send_message(msg.channel, fmt)
            except discord.Forbidden:
                pass
            except discord.HTTPException as e:
                await self.bot.send_message(msg.channel, 'Unexpected error: `{}`'.format(e))

Example 105

Project: furion Source File: config.py
    @classmethod
    def init(cls, path):

        default_cfg = StringIO(default_config)

        cls.config = ConfigParser()
        cls.config.readfp(default_cfg)
        cls.config.read(path)
        cls.config_dir = dirname(abspath(path))

        auth_plugin = cls.config.get('plugin', 'auth_plugin')

        cls.authority = None
        if auth_plugin == 'simpleauth':
            cls.password_path = cls.get_path(cls.config.get('simpleauth', 'password_path', 'simpleauth.passwd'))
            cls.authority = SimpleAuth(cls.password_path)

        cls.local_ip = cls.config.get('main', 'local_ip')
        cls.local_port = cls.config.getint('main', 'local_port')
        cls.rpc_port = cls.config.getint('main', 'rpc_port')

        cls.local_ssl = cls.config.getboolean('main', 'local_ssl')
        cls.local_auth = cls.config.getboolean('main', 'local_auth')
        cls.pem_path = cls.get_path(cls.config.get('main', 'pem_path'))
        if cls.pem_path and not exists(cls.pem_path):
            print('Fatal error: pem "%s" cannot be found.' % cls.pem_path)
            time.sleep(3)
            sys.exit(-1)
        ports = cls.config.get('main', 'allowed_ports').strip()
        if ports == 'all' or ports == '':
            cls.allowed_ports = []
        else:
            cls.allowed_ports = [int(port) for port in ports.split(',')]
        cls.ping_server = cls.config.getboolean('main', 'ping_server')
        cls.dns_server = cls.config.getboolean('main', 'dns_server')
        cls.dns_server_port = cls.config.getint('main', 'dns_server_port')
        cls.dns_proxy = cls.config.getboolean('main', 'dns_proxy')
        cls.dns_proxy_port = cls.config.getint('main', 'dns_proxy_port')
        cls.remote_tcp_dns = cls.config.get('main', 'remote_tcp_dns')
        cls.log_level = cls.config.getint('main', 'log_level')
        cls.log_path = cls.get_path(cls.config.get('main', 'log_path'))

        cls.central_url = cls.config.get('upstream', 'central_url')
        cls.autoupdate_upstream_list = cls.config.getboolean('upstream', 'autoupdate_upstream_list')
        cls.update_frequency = cls.config.get('upstream', 'update_frequency')
        cls.upstream_list_path = cls.get_path(cls.config.get('upstream', 'upstream_list_path'))

        cls.upstream_list = None
        if exists(cls.upstream_list_path):
            cls.upstream_list = json.loads(open(cls.upstream_list_path).read())['upstream_list']
        elif cls.autoupdate_upstream_list:
            get_upstream_from_central(cls)
        # cls.upstream_ip = cls.config.get('upstream', 'upstream_ip')
        # cls.upstream_port = cls.config.getint('upstream', 'upstream_port')
        # cls.upstream_ssl = cls.config.getboolean('upstream', 'upstream_ssl')
        # cls.upstream_auth = cls.config.getboolean('upstream', 'upstream_auth')
        # cls.upstream_username = cls.config.get('upstream', 'upstream_username')
        # cls.upstream_password = cls.config.get('upstream', 'upstream_password')

        cls.local_addr = (cls.local_ip, cls.local_port)
        cls.upstream_addr = None
        cls.upstream_ping = None

Example 106

Project: ironpython3 Source File: pyclbr.py
def _readmodule(module, path, inpackage=None):
    '''Do the hard work for readmodule[_ex].

    If INPACKAGE is given, it must be the dotted name of the package in
    which we are searching for a submodule, and then PATH must be the
    package search path; otherwise, we are searching for a top-level
    module, and PATH is combined with sys.path.
    '''
    # Compute the full module name (prepending inpackage if set)
    if inpackage is not None:
        fullmodule = "%s.%s" % (inpackage, module)
    else:
        fullmodule = module

    # Check in the cache
    if fullmodule in _modules:
        return _modules[fullmodule]

    # Initialize the dict for this module's contents
    dict = {}

    # Check if it is a built-in module; we don't do much for these
    if module in sys.builtin_module_names and inpackage is None:
        _modules[module] = dict
        return dict

    # Check for a dotted module name
    i = module.rfind('.')
    if i >= 0:
        package = module[:i]
        submodule = module[i+1:]
        parent = _readmodule(package, path, inpackage)
        if inpackage is not None:
            package = "%s.%s" % (inpackage, package)
        if not '__path__' in parent:
            raise ImportError('No package named {}'.format(package))
        return _readmodule(submodule, parent['__path__'], package)

    # Search the path for the module
    f = None
    if inpackage is not None:
        search_path = path
    else:
        search_path = path + sys.path
    # XXX This will change once issue19944 lands.
    spec = importlib.util._find_spec_from_path(fullmodule, search_path)
    fname = spec.loader.get_filename(fullmodule)
    _modules[fullmodule] = dict
    if spec.loader.is_package(fullmodule):
        dict['__path__'] = [os.path.dirname(fname)]
    try:
        source = spec.loader.get_source(fullmodule)
        if source is None:
            return dict
    except (AttributeError, ImportError):
        # not Python source, can't do anything with this module
        return dict

    f = io.StringIO(source)

    stack = [] # stack of (class, indent) pairs

    g = tokenize.generate_tokens(f.readline)
    try:
        for tokentype, token, start, _end, _line in g:
            if tokentype == DEDENT:
                lineno, thisindent = start
                # close nested classes and defs
                while stack and stack[-1][1] >= thisindent:
                    del stack[-1]
            elif token == 'def':
                lineno, thisindent = start
                # close previous nested classes and defs
                while stack and stack[-1][1] >= thisindent:
                    del stack[-1]
                tokentype, meth_name, start = next(g)[0:3]
                if tokentype != NAME:
                    continue # Syntax error
                if stack:
                    cur_class = stack[-1][0]
                    if isinstance(cur_class, Class):
                        # it's a method
                        cur_class._addmethod(meth_name, lineno)
                    # else it's a nested def
                else:
                    # it's a function
                    dict[meth_name] = Function(fullmodule, meth_name,
                                               fname, lineno)
                stack.append((None, thisindent)) # Marker for nested fns
            elif token == 'class':
                lineno, thisindent = start
                # close previous nested classes and defs
                while stack and stack[-1][1] >= thisindent:
                    del stack[-1]
                tokentype, class_name, start = next(g)[0:3]
                if tokentype != NAME:
                    continue # Syntax error
                # parse what follows the class name
                tokentype, token, start = next(g)[0:3]
                inherit = None
                if token == '(':
                    names = [] # List of superclasses
                    # there's a list of superclasses
                    level = 1
                    super = [] # Tokens making up current superclass
                    while True:
                        tokentype, token, start = next(g)[0:3]
                        if token in (')', ',') and level == 1:
                            n = "".join(super)
                            if n in dict:
                                # we know this super class
                                n = dict[n]
                            else:
                                c = n.split('.')
                                if len(c) > 1:
                                    # super class is of the form
                                    # module.class: look in module for
                                    # class
                                    m = c[-2]
                                    c = c[-1]
                                    if m in _modules:
                                        d = _modules[m]
                                        if c in d:
                                            n = d[c]
                            names.append(n)
                            super = []
                        if token == '(':
                            level += 1
                        elif token == ')':
                            level -= 1
                            if level == 0:
                                break
                        elif token == ',' and level == 1:
                            pass
                        # only use NAME and OP (== dot) tokens for type name
                        elif tokentype in (NAME, OP) and level == 1:
                            super.append(token)
                        # expressions in the base list are not supported
                    inherit = names
                cur_class = Class(fullmodule, class_name, inherit,
                                  fname, lineno)
                if not stack:
                    dict[class_name] = cur_class
                stack.append((cur_class, thisindent))
            elif token == 'import' and start[1] == 0:
                modules = _getnamelist(g)
                for mod, _mod2 in modules:
                    try:
                        # Recursively read the imported module
                        if inpackage is None:
                            _readmodule(mod, path)
                        else:
                            try:
                                _readmodule(mod, path, inpackage)
                            except ImportError:
                                _readmodule(mod, [])
                    except:
                        # If we can't find or parse the imported module,
                        # too bad -- don't die here.
                        pass
            elif token == 'from' and start[1] == 0:
                mod, token = _getname(g)
                if not mod or token != "import":
                    continue
                names = _getnamelist(g)
                try:
                    # Recursively read the imported module
                    d = _readmodule(mod, path, inpackage)
                except:
                    # If we can't find or parse the imported module,
                    # too bad -- don't die here.
                    continue
                # add any classes that were defined in the imported module
                # to our name space if they were mentioned in the list
                for n, n2 in names:
                    if n in d:
                        dict[n2 or n] = d[n]
                    elif n == '*':
                        # don't add names that start with _
                        for n in d:
                            if n[0] != '_':
                                dict[n] = d[n]
    except StopIteration:
        pass

    f.close()
    return dict

Example 107

Project: Arelle Source File: updateTableLB.py
def generateUpdatedTableLB(dts, updatedTableLinkbaseFile):
    import os, io
    from arelle import XmlUtil, XbrlConst
    from arelle.ViewUtil import viewReferences, referenceURI
    from arelle.ModelRenderingObject import ModelEuAxisCoord
    
    if dts.fileSource.isArchive:
        dts.error("genTblLB:outFileIsArchive",
                 _("Updated Table Linkbase file cannot be an archive: %(tableLBOutputFile)s."),
                 modelObject=dts, tableLBOutputFile=updatedTableLinkbaseFile)
        return
    tblAxisRelSet = dts.relationshipSet(XbrlConst.euTableAxis)
    axisMbrRelSet = dts.relationshipSet(XbrlConst.euAxisMember)
    if len(tblAxisRelSet.modelRelationships) == 0:
        dts.error("genTblLB:noInputTables",
                 _("DTS does not contain Eurofiling 2010 tables and axes: %(entryFile)s."),
                 modelObject=dts, entryFile=dts.uri)
        return

    file = io.StringIO('''
<nsmap>
<link:linkbase 
xmlns:label="http://xbrl.org/2008/label"
xmlns:gen="http://xbrl.org/2008/generic"
xmlns:df="http://xbrl.org/2008/filter/dimension"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:reference="http://xbrl.org/2008/reference"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:link="http://www.xbrl.org/2003/linkbase"
xmlns:table="http://xbrl.org/2011/table"
xmlns:formula="http://xbrl.org/2008/formula"
xsi:schemaLocation="             
http://www.xbrl.org/2003/linkbase http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/reference http://www.xbrl.org/2008/generic-reference.xsd
http://xbrl.org/2008/label http://www.xbrl.org/2008/generic-label.xsd
http://xbrl.org/2011/table http://www.xbrl.org/2011/table.xsd
http://xbrl.org/2008/filter/dimension http://www.xbrl.org/2008/dimension-filter.xsd">
  <link:arcroleRef arcroleURI="http://xbrl.org/arcrole/2011/table-filter" xlink:type="simple" 
   xlink:href="http://www.xbrl.org/2011/table.xsd#table-filter"/>
  <link:arcroleRef arcroleURI="http://xbrl.org/arcrole/2011/table-axis" xlink:type="simple"
   xlink:href="http://www.xbrl.org/2011/table.xsd#table-axis"/>
  <link:arcroleRef arcroleURI="http://xbrl.org/arcrole/2011/axis-subtree" xlink:type="simple"
   xlink:href="http://www.xbrl.org/2011/table.xsd#axis-subtree"/>
  <link:arcroleRef arcroleURI="http://xbrl.org/arcrole/2011/axis-filter" xlink:type="simple"
   xlink:href="http://www.xbrl.org/2011/filter-axis.xsd#axis-filter"/>
</link:linkbase>
</nsmap>
<!--  Generated by Arelle(r) http://arelle.org --> 
'''
     )
    from arelle.ModelObjectFactory import parser
    parser, parserLookupName, parserLookupClass = parser(dts,None)
    from lxml import etree
    xmlDocuement = etree.parse(file,parser=parser,base_url=updatedTableLinkbaseFile)
    file.close()
    nsmapElt = xmlDocuement.getroot()
    #xmlDocuement.getroot().init(self)  ## is this needed ??
    for lbElement in  xmlDocuement.iter(tag="{http://www.xbrl.org/2003/linkbase}linkbase"):
        break
    
    class DocObj:  # fake ModelDocuement for namespaces
        def __init__(self):
            self.xmlRootElement = lbElement
            self.xmlDocuement = xmlDocuement    
    docObj = DocObj()
    
    numELRs = 0
    numTables = 0
    
    def copyAttrs(fromElt, toElt, attrTags):
        for attr in attrTags:
            if fromElt.get(attr):
                toElt.set(attr, fromElt.get(attr))

    def generateTable(newLinkElt, newTblElt, srcTblElt, tblAxisRelSet, axisMbrRelSet, visited):
        if srcTblElt is not None:
            for rel in tblAxisRelSet.fromModelObject(srcTblElt):
                srcAxisElt = rel.toModelObject
                if isinstance(srcAxisElt, ModelEuAxisCoord):
                    visited.add(srcAxisElt)
                    newAxisElt = etree.SubElement(newLinkElt, "{http://xbrl.org/2011/table}ruleAxis")
                    copyAttrs(srcAxisElt, newAxisElt, ("id", 
                                                       "{http://www.w3.org/1999/xlink}type",
                                                       "{http://www.w3.org/1999/xlink}label"))
                    newAxisElt.set("abstract", "true") # always true on root element
                    newArcElt = etree.SubElement(newLinkElt, "{http://xbrl.org/2011/table}axisArc")
                    copyAttrs(rel, newArcElt, ("id", 
                                               "{http://www.w3.org/1999/xlink}type",
                                               "{http://www.w3.org/1999/xlink}from",
                                               "{http://www.w3.org/1999/xlink}to",
                                               "order"))
                    newArcElt.set("{http://www.w3.org/1999/xlink}arcrole", XbrlConst.tableBreakdown)
                    newArcElt.set("axisDisposition", rel.axisDisposition)
                    generateAxis(newLinkElt, newAxisElt, srcAxisElt, axisMbrRelSet, visited)
                    visited.discard(srcAxisElt)

    def generateAxis(newLinkElt, newAxisParentElt, srcAxisElt, axisMbrRelSet, visited):
        for rel in axisMbrRelSet.fromModelObject(srcAxisElt):
            tgtAxisElt = rel.toModelObject
            if isinstance(tgtAxisElt, ModelEuAxisCoord) and tgtAxisElt not in visited:
                visited.add(tgtAxisElt)
                newAxisElt = etree.SubElement(newLinkElt, "{http://xbrl.org/2011/table}ruleAxis")
                copyAttrs(tgtAxisElt, newAxisElt, ("id", 
                                                   "abstract",
                                                   "{http://www.w3.org/1999/xlink}type",
                                                   "{http://www.w3.org/1999/xlink}label"))
                if tgtAxisElt.primaryItemQname:
                    newRuleElt = etree.SubElement(newAxisElt, "{http://xbrl.org/2008/formula}concept")
                    newQnameElt = etree.SubElement(newRuleElt, "{http://xbrl.org/2008/formula}qname")
                    newQnameElt.text = XmlUtil.addQnameValue(docObj, tgtAxisElt.primaryItemQname)
                for dimQname, memQname in tgtAxisElt.explicitDims:
                    newRuleElt = etree.SubElement(newAxisElt, "{http://xbrl.org/2008/formula}explicitDimension")
                    newRuleElt.set("dimension", XmlUtil.addQnameValue(docObj, dimQname))
                    newMbrElt = etree.SubElement(newRuleElt, "{http://xbrl.org/2008/formula}member")
                    newQnameElt = etree.SubElement(newMbrElt, "{http://xbrl.org/2008/formula}qname")
                    newQnameElt.text = XmlUtil.addQnameValue(docObj, memQname)
                newArcElt = etree.SubElement(newLinkElt, "{http://xbrl.org/2011/table}axisArc")
                copyAttrs(rel, newArcElt, ("id", 
                                           "{http://www.w3.org/1999/xlink}type",
                                           "{http://www.w3.org/1999/xlink}from",
                                           "{http://www.w3.org/1999/xlink}to",
                                           "order"))
                newArcElt.set("{http://www.w3.org/1999/xlink}arcrole", XbrlConst.tableAxisSubtree)
                generateAxis(newLinkElt, newAxisElt, tgtAxisElt, axisMbrRelSet, visited)
                visited.discard(tgtAxisElt)
        
    # sort URIs
    linkroleUris = sorted([linkroleUri
                           for linkroleUri in tblAxisRelSet.linkRoleUris])
    
    firstNewLinkElt = None
    roleRefUris = set()
    for linkroleUri in linkroleUris:
        numELRs += 1
        newLinkElt = etree.SubElement(lbElement, "{http://xbrl.org/2008/generic}link")
        newLinkElt.set("{http://www.w3.org/1999/xlink}type", "extended")
        newLinkElt.set("{http://www.w3.org/1999/xlink}role", linkroleUri)
        if firstNewLinkElt is None: firstNewLinkElt = newLinkElt
        # To do: add roleRef if needed
        tblAxisRelSet = dts.relationshipSet(XbrlConst.euTableAxis, linkroleUri)
        axisMbrRelSet = dts.relationshipSet(XbrlConst.euAxisMember, linkroleUri)
        for srcTblElt in tblAxisRelSet.rootConcepts:
            if srcTblElt.tag == "{http://www.eurofiling.info/2010/rendering}table":
                numTables += 1
                newTblElt = etree.SubElement(newLinkElt, "{http://xbrl.org/2011/table}table")
                newTblElt.set("aspectModel", "dimensional")
                copyAttrs(srcTblElt, newTblElt, ("id", 
                                                 "{http://www.w3.org/1999/xlink}type",
                                                 "{http://www.w3.org/1999/xlink}label"))
                generateTable(newLinkElt, newTblElt, srcTblElt, tblAxisRelSet, axisMbrRelSet, set())
                
                if linkroleUri not in roleRefUris:
                    srcRoleRefElt = XmlUtil.descendant(srcTblElt.getroottree(), XbrlConst.link, "roleRef", "roleURI", linkroleUri)
                    if srcRoleRefElt is not None:
                        roleRefUris.add(linkroleUri)
                        newRoleRefElt = etree.Element("{http://www.xbrl.org/2003/linkbase}roleRef")
                        copyAttrs(srcRoleRefElt, newRoleRefElt, ("roleURI", 
                                                                 "{http://www.w3.org/1999/xlink}type",
                                                                 "{http://www.w3.org/1999/xlink}href"))
                        firstNewLinkElt.addprevious(newRoleRefElt)
            
    fh = open(updatedTableLinkbaseFile, "w", encoding="utf-8")
    XmlUtil.writexml(fh, xmlDocuement, encoding="utf-8")
    fh.close()
    
    dts.info("info:updateTableLinkbase",
             _("Updated Table Linkbase of %(entryFile)s has %(numberOfLinkroles)s linkroles, %(numberOfTables)s tables in file %(tableLBOutputFile)s."),
             modelObject=dts,
             entryFile=dts.uri, numberOfLinkroles=numELRs, numberOfTables=numTables, tableLBOutputFile=updatedTableLinkbaseFile)

Example 108

Project: PerfKitBenchmarker Source File: netperf_benchmark.py
def _ParseNetperfOutput(stdout, metadata, benchmark_name,
                        enable_latency_histograms):
  """Parses the stdout of a single netperf process.

  Args:
    stdout: the stdout of the netperf process
    metadata: metadata for any sample.Sample objects we create

  Returns:
    A tuple containing (throughput_sample, latency_samples, latency_histogram)
  """
  # Don't modify the metadata dict that was passed in
  metadata = metadata.copy()

  # Extract stats from stdout
  # Sample output:
  #
  # "MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 20001
  # AF_INET to 104.154.50.86 () port 20001 AF_INET : +/-2.500% @ 99% conf.
  # : first burst 0",\n
  # Throughput,Throughput Units,Throughput Confidence Width (%),
  # Confidence Iterations Run,Stddev Latency Microseconds,
  # 50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,
  # 99th Percentile Latency Microseconds,Minimum Latency Microseconds,
  # Maximum Latency Microseconds\n
  # 1405.50,Trans/s,2.522,4,783.80,683,735,841,600,900\n
  try:
    fp = io.StringIO(stdout)
    # "-o" flag above specifies CSV output, but there is one extra header line:
    banner = next(fp)
    assert banner.startswith('MIGRATED'), stdout
    r = csv.DictReader(fp)
    results = next(r)
    logging.info('Netperf Results: %s', results)
    assert 'Throughput' in results
  except:
    raise Exception('Netperf ERROR: Failed to parse stdout. STDOUT: %s' %
                    stdout)

  # Update the metadata with some additional infos
  meta_keys = [('Confidence Iterations Run', 'confidence_iter'),
               ('Throughput Confidence Width (%)', 'confidence_width_percent')]
  metadata.update({meta_key: results[netperf_key]
                   for netperf_key, meta_key in meta_keys})

  # Create the throughput sample
  throughput = float(results['Throughput'])
  throughput_units = results['Throughput Units']
  if throughput_units == '10^6bits/s':
    # TCP_STREAM benchmark
    unit = MBPS
    metric = '%s_Throughput' % benchmark_name
  elif throughput_units == 'Trans/s':
    # *RR benchmarks
    unit = TRANSACTIONS_PER_SECOND
    metric = '%s_Transaction_Rate' % benchmark_name
  else:
    raise ValueError('Netperf output specifies unrecognized throughput units %s'
                     % throughput_units)
  throughput_sample = sample.Sample(metric, throughput, unit, metadata)

  latency_hist = None
  latency_samples = []
  if enable_latency_histograms:
    # Parse the latency histogram. {latency: count} where "latency" is the
    # latency in microseconds with only 2 significant figures and "count" is the
    # number of response times that fell in that latency range.
    latency_hist = netperf.ParseHistogram(stdout)
    hist_metadata = {'histogram': json.dumps(latency_hist)}
    hist_metadata.update(metadata)
    latency_samples.append(sample.Sample(
        '%s_Latency_Histogram' % benchmark_name, 0, 'us', hist_metadata))
  if unit != MBPS:
    for metric_key, metric_name in [
        ('50th Percentile Latency Microseconds', 'p50'),
        ('90th Percentile Latency Microseconds', 'p90'),
        ('99th Percentile Latency Microseconds', 'p99'),
        ('Minimum Latency Microseconds', 'min'),
        ('Maximum Latency Microseconds', 'max'),
        ('Stddev Latency Microseconds', 'stddev')]:
      if metric_key in results:
        latency_samples.append(
            sample.Sample('%s_Latency_%s' % (benchmark_name, metric_name),
                          float(results[metric_key]), 'us', metadata))

  return (throughput_sample, latency_samples, latency_hist)

Example 109

Project: sverchok Source File: text.py
    def load_csv_data(self):
        n_id = node_id(self)

        csv_data = collections.OrderedDict()

        if n_id in self.csv_data:
            del self.csv_data[n_id]

        f = io.StringIO(bpy.data.texts[self.text].as_string())

        # setup CSV options

        if self.csv_dialect == 'user':
            if self.csv_delimiter == 'CUSTOM':
                d = self.csv_custom_delimiter
            else:
                d = self.csv_delimiter

            reader = csv.reader(f, delimiter=d)
        elif self.csv_dialect == 'semicolon':
            self.csv_decimalmark = ','
            reader = csv.reader(f, delimiter=';')
        else:
            reader = csv.reader(f, dialect=self.csv_dialect)
            self.csv_decimalmark = '.'

        # setup parse decimalmark

        if self.csv_decimalmark == ',':
            get_number = lambda s: float(s.replace(',', '.'))
        elif self.csv_decimalmark == 'LOCALE':
            get_number = lambda s: locale.atof(s)
        elif self.csv_decimalmark == 'CUSTOM':
            if self.csv_custom_decimalmark:
                get_number = lambda s: float(s.replace(self.csv_custom_decimalmark, '.'))
        else:  # . default
            get_number = float

    # load data
        for i, row in enumerate(reader):
            if i == 0:  # setup names
                if self.csv_header:
                    for name in row:
                        tmp = name
                        c = 1
                        while tmp in csv_data:
                            tmp = name+str(c)
                            c += 1
                        csv_data[str(tmp)] = []
                    continue  # first row is names
                else:
                    for j in range(len(row)):
                        csv_data["Col "+str(j)] = []
            # load data

            for j, name in enumerate(csv_data):
                try:
                    n = get_number(row[j])
                    csv_data[name].append(n)
                except (ValueError, IndexError):
                    pass  # discard strings other than first row

        if csv_data:
            # check for actual data otherwise fail.
            if not csv_data[list(csv_data.keys())[0]]:
                return
            self.current_text = self.text
            self.csv_data[n_id] = csv_data

Example 110

Project: sedge Source File: engine.py
    def parse(self, fd):
        "very simple parser - but why would we want it to be complex?"

        def resolve_args(args):
            # FIXME break this out, it's in common with the templating stuff elsewhere
            root = self.sections[0]
            val_dict = dict(('<' + t + '>', u) for (t, u) in root.get_variables().items())
            resolved_args = []
            for arg in args:
                for subst, value in val_dict.items():
                    arg = arg.replace(subst, value)
                resolved_args.append(arg)
            return resolved_args

        def handle_section_defn(keyword, parts):
            if keyword == '@HostAttrs':
                if len(parts) != 1:
                    raise ParserException(
                        'usage: @HostAttrs <hostname>')
                if self.sections[0].has_pending_with():
                    raise ParserException(
                        '@with not supported with @HostAttrs')
                self.sections.append(HostAttrs(parts[0]))
                return True
            if keyword == 'Host':
                if len(parts) != 1:
                    raise ParserException('usage: Host <hostname>')
                self.sections.append(
                    Host(parts[0], self.sections[0].pop_pending_with()))
                return True

        def handle_vardef(root, keyword, parts):
            if keyword == '@with':
                root.add_pending_with(parts)
                return True

        def handle_set_args(section, parts):
            if len(parts) == 0:
                raise ParserException('usage: @args arg-name ...')
            if not self.is_include():
                return
            if self._args is None or len(self._args) != len(parts):
                raise ParserException('required arguments not passed to include %s (%s)' % (self._url, ', '.join(parts)))
            root = self.sections[0]
            for key, value in zip(parts, self._args):
                root.set_value(key, value)

        def handle_set_value(section, parts):
            if len(parts) != 2:
                raise ParserException('usage: @set <key> <value>')
            root = self.sections[0]
            root.set_value(*resolve_args(parts))

        def handle_add_type(section, parts):
            if len(parts) != 1:
                raise ParserException('usage: @is <HostAttrName>')
            section.add_type(parts[0])

        def handle_via(section, parts):
            if len(parts) != 1:
                raise ParserException('usage: @via <Hostname>')
            section.add_line(
                'ProxyCommand',
                ('ssh %s nc %%h %%p 2> /dev/null' %
                    (pipes.quote(resolve_args(parts)[0])),))

        def handle_identity(section, parts):
            if len(parts) != 1:
                raise ParserException('usage: @identity <name>')
            section.add_identity(resolve_args(parts)[0])

        def handle_include(section, parts):
            if len(parts) == 0:
                raise ParserException('usage: @include <https://...|/path/to/file.sedge> [arg ...]')
            url = parts[0]
            parsed_url = urllib.parse.urlparse(url)
            if parsed_url.scheme == 'https':
                req = requests.get(url, verify=self._verify_ssl)
                text = req.text
            elif parsed_url.scheme == 'file':
                with open(parsed_url.path) as fd:
                    text = fd.read()
            elif parsed_url.scheme == '':
                path = os.path.expanduser(url)
                with open(path) as fd:
                    text = fd.read()
            else:
                raise SecurityException('error: @includes may only use paths or https:// or file:// URLs')
            subconfig = SedgeEngine(
                self._key_library,
                StringIO(text),
                self._verify_ssl,
                url=url,
                args=resolve_args(parts[1:]),
                parent_keydefs=self.keydefs,
                via_include=True)
            self.includes.append((url, subconfig))

        def handle_keydef(section, parts):
            if len(parts) < 2:
                raise ParserException('usage: @key <name> [fingerprint]...')
            name = parts[0]
            fingerprints = parts[1:]
            self.keydefs[name] = fingerprints

        def handle_keyword(section, keyword, parts):
            handlers = {
                '@set': handle_set_value,
                '@args': handle_set_args,
                '@is': handle_add_type,
                '@via': handle_via,
                '@include': handle_include,
                '@key': handle_keydef,
                '@identity': handle_identity
            }
            if keyword in handlers:
                handlers[keyword](section, parts)
                return True

        for line in (t.strip() for t in fd):
            if line.startswith('#') or line == '':
                continue
            keyword, parts = SedgeEngine.parse_config_line(line)
            if handle_section_defn(keyword, parts):
                continue
            if handle_vardef(self.sections[0], keyword, parts):
                continue
            current_section = self.sections[-1]
            if handle_keyword(current_section, keyword, parts):
                continue
            if keyword.startswith('@'):
                raise ParserException(
                    "unknown expansion keyword '%s'" % (keyword))
            # use other rather than parts to avoid messing up user
            # whitespace; we don't handle quotes in here as we don't
            # need to
            current_section.add_line(keyword, parts)

Example 111

Project: Red9_StudioPack Source File: audio_segment.py
Function: overlay
    def overlay(self, seg, position=0, loop=False, times=None):
        """
        Overlay the provided segment on to this segment starting at the
        specificed position and using the specfied looping beahvior.

        seg (AudioSegment):
            The audio segment to overlay on to this one.

        position (optional int):
            The position to start overlaying the provided segment in to this
            one.

        loop (optional bool):
            Loop seg as many times as necessary to match this segment's length.
            Overrides loops param.

        times (optional int):
            Loop seg the specified number of times or until it matches this
            segment's length. 1 means once, 2 means twice, ... 0 would make the
            call a no-op
        """

        if loop:
            # match loop=True's behavior with new times (count) mechinism.
            times = -1
        elif times is None:
            # no times specified, just once through
            times = 1
        elif times == 0:
            # it's a no-op, make a copy since we never mutate
            return self._spawn(self._data)

        output = StringIO()

        seg1, seg2 = AudioSegment._sync(self, seg)
        sample_width = seg1.sample_width
        spawn = seg1._spawn

        output.write(seg1[:position]._data)

        # drop down to the raw data
        seg1 = seg1[position:]._data
        seg2 = seg2._data
        pos = 0
        seg1_len = len(seg1)
        seg2_len = len(seg2)
        while times:
            remaining = max(0, seg1_len - pos)
            if seg2_len >= remaining:
                seg2 = seg2[:remaining]
                seg2_len = remaining
                # we've hit the end, we're done looping (if we were) and this
                # is our last go-around
                times = 1

            output.write(audioop.add(seg1[pos:pos + seg2_len], seg2,
                                     sample_width))
            pos += seg2_len

            # dec times to break our while loop (eventually)
            times -= 1

        output.write(seg1[pos:])

        return spawn(data=output)

Example 112

Project: pyminifier Source File: minification.py
Function: reduce_operators
def reduce_operators(source):
    """
    Remove spaces between operators in *source* and returns the result.
    Example::

        def foo(foo, bar, blah):
            test = "This is a %s" % foo

    Will become::

        def foo(foo,bar,blah):
            test="This is a %s"%foo

    ..  note::

        Also removes trailing commas and joins disjointed strings like
        ``("foo" "bar")``.
    """
    io_obj = io.StringIO(source)
    prev_tok = None
    out_tokens = []
    out = ""
    last_lineno = -1
    last_col = 0
    nl_types = (tokenize.NL, tokenize.NEWLINE)
    joining_strings = False
    new_string = ""
    for tok in tokenize.generate_tokens(io_obj.readline):
        token_type = tok[0]
        token_string = tok[1]
        start_line, start_col = tok[2]
        end_line, end_col = tok[3]
        if start_line > last_lineno:
            last_col = 0
        if token_type != tokenize.OP:
            if start_col > last_col and token_type not in nl_types:
                if prev_tok[0] != tokenize.OP:
                    out += (" " * (start_col - last_col))
            if token_type == tokenize.STRING:
                if prev_tok[0] == tokenize.STRING:
                    # Join the strings into one
                    string_type = token_string[0] # '' or ""
                    prev_string_type = prev_tok[1][0]
                    out = out.rstrip(" ") # Remove any spaces we inserted prev
                    if not joining_strings:
                        # Remove prev token and start the new combined string
                        out = out[:(len(out)-len(prev_tok[1]))]
                        prev_string = prev_tok[1].strip(prev_string_type)
                        new_string = (
                            prev_string + token_string.strip(string_type))
                        joining_strings = True
                    else:
                        new_string += token_string.strip(string_type)
        else:
            if token_string in ('}', ')', ']'):
                if prev_tok[1] == ',':
                    out = out.rstrip(',')
            if joining_strings:
                # NOTE: Using triple quotes so that this logic works with
                # mixed strings using both single quotes and double quotes.
                out += "'''" + new_string + "'''"
                joining_strings = False
            if token_string == '@': # Decorators need special handling
                if prev_tok[0] == tokenize.NEWLINE:
                    # Ensure it gets indented properly
                    out += (" " * (start_col - last_col))
        if not joining_strings:
            out += token_string
        last_col = end_col
        last_lineno = end_line
        prev_tok = tok
    return out

Example 113

Project: groper Source File: groper.py
def OptionsMeta(print_func=None):
    '''Creates a private scope for the options manupulation functions and returns them.

    This function us used to create a module-wide global options object and its 
    manipulation functions. It may be used to generate local options objects, for 
    example for unit testing.
    '''

    print_func = print_func or print # Pass in a custom print function to use, e.g. stderr

    option_definitions = {}
    cp = RawConfigParser()
    adapters = {
        bool: cp.getboolean,
        float: cp.getfloat,
        int: cp.getint,
    }

    config_file_def = {
        'section': None,
        'optname': None,
        'filename': None,
    }

    # Variables we will return
    options = OptionObject()
    cmdargs = []
    cmdarg_defs = {
        'count': None,
        'args': None,
    }
    _type = type

    try:
        _basestring = basestring
    except NameError:
        _basestring = str

    try:
        _long = long
    except NameError:
        _long = int

    def generate_sample_config():
        '''Returns a string containing a sample configuration file based on the defined options.'''
        
        f = StringIO()
        try:
            for section in option_definitions:
                f.write('[{0}]\n'.format(section))

                for name, opt in option_definitions[section].items():
                    if opt.cmd_only:
                        continue

                    opt_name = name if hasattr(opt, 'default') else '#{0}'.format(name)
                    opt_val = '{0}'.format(opt.default) if hasattr(opt, 'default') else '<{0}>'.format(name.upper())
                    
                    f.write('{0} = {1}\n'.format(opt_name, opt_val))

                f.write("\n")

            return f.getvalue()
        finally:
            f.close()
        
    def _option_usage(option):
        '''Create an option usage line part based on option definition.
        
            Returns a tuple of (short_str, long_str) to be added.
        '''
        s, l = None, None

        wrap_optional = lambda option, s: s if option.required else ('[{0}]'.format(s))

        if option.cmd_short_name:
            if option.type != bool:
                s = wrap_optional(option, '-{0} <{1}>'.format(option.cmd_short_name, option.cmd_name or option.name))
            else:
                s = wrap_optional(option, '-{0}'.format(option.cmd_short_name))
        elif option.cmd_name and option.required:
            if option.type != bool:
                s = wrap_optional(option, '--{0}=<{1}>'.format(option.cmd_name, option.cmd_name or option.name))
            else:
                s = wrap_optional(option, '--{0}'.format(option.cmd_name))
       
        
        if option.cmd_name:
            if option.type != bool:
                l = wrap_optional(option, '--{0}=<{1}>'.format(option.cmd_name, option.cmd_name or option.name))
            else:
                l = wrap_optional(option, '--{0}'.format(option.cmd_name))
        elif option.cmd_short_name and option.required:
            if option.type != bool:
                l = wrap_optional(option, '-{0} <{1}>'.format((option.cmd_short_name, option.cmd_name or option.name)))
            else:
                l = wrap_optional(option, '-{0}'.format(option.cmd_short_name))

        return s, l

    def _args_usage(cmdargs_def):
        if cmdarg_defs['count'] == -1:
            return '[{0}] ...'.format(cmdarg_defs['args'][0])
        elif cmdarg_defs['count'] == -2:
            return '<{0}> [{1}] ...'.format(cmdarg_defs['args'][0], cmdarg_defs['args'][0])
        elif cmdarg_defs['args']:
            return ' '.join(['<{0}>'.format(s) for s in cmdarg_defs['args']])

    def usage(cmd_name=None):
        '''Returns usage/help string based on defined options.'''

        cmd_name = cmd_name or os.path.basename(sys.argv[0])
        
        lines = ['Usage:', '',]

        # Group all options
        cmd_options = {}
        for section in option_definitions:
            for name, opt in option_definitions[section].items():
                if opt.cmd_name or opt.cmd_short_name:
                    if opt.cmd_group not in cmd_options:
                        cmd_options[opt.cmd_group] = []
                    cmd_options[opt.cmd_group].append(opt)

        if not cmd_options and cmdarg_defs['count']:
            arg_line = _args_usage(cmdarg_defs)
            lines.append('{0} {1}'.format(cmd_name, arg_line))

        # Create lines
        for group in cmd_options.values():
            short_line = []
            long_line = []

            group.sort(key=lambda a: a.name) # Sort alphabetically
            group.sort(key=lambda a: int(a.required)) # Sort by required options first
            
            for option in group:
                s, l = _option_usage(option)
                if s:
                    short_line.append(s)
                if l:
                    long_line.append(l)

            arg_line = _args_usage(cmdarg_defs)

            if arg_line:
                short_line.append(arg_line)
                long_line.append(arg_line)

            if short_line:
                lines.append('{0} {1}'.format(cmd_name, ' '.join(short_line)))
            if long_line:
                lines.append('{0} {1}'.format(cmd_name, ' '.join(long_line)))

        return '\n'.join(lines)
 
    def define_args(args=None):
        '''Defines required/optional arguments.

        The args parameter can be in the following forms:
          - (num, name): num is the number of arguments expected, and name is the name
            to be printed when program usage is being shown.
            NOTE: num can be -1 for "0 or more agruments" and -2 for "one or more arguments"
          - (arg1, arg2, arg3): Require three arguments, each with a different name.
        '''

        if len(args) == 2 and type(args[0]) in set((int, _long)) and isinstance(args[1], _basestring):
            cmdarg_defs['count'] = args[0]
            cmdarg_defs['args'] = [args[1]] * abs(args[0])
            return
        elif hasattr(args, '__iter__'):
            cmdarg_defs['count'] = len(args)
            cmdarg_defs['args'] = tuple(args)
            return

        raise OptionsError('Define either (count, argname) (use -1 for zero or more, -2 for one or more) or a list of argument names.')

    def define_opt(section, name, cmd_name=None, cmd_short_name=None, cmd_only=False, type=_type(''), is_config_file=False, is_help=False, help=None, cmd_group='default', **kwargs):
        '''Defines an option. Should be run before init_options().
        
           Note that you may pass in one additional kwarg: default.
           If this argument is not specified, the option is required, and
           will have to be set from either a config file or the command line.
        '''

        if not isinstance(section, _basestring):
            raise OptionsError('Section name {0} must be a string, not a {1}'.format(section, _type(section)))

        if not isinstance(name, _basestring):
            raise OptionsError('Option name {0} must be a string, not a {1}'.format(name, _type(name)))

        if cmd_name and not isinstance(cmd_name, _basestring):
            raise OptionsError('cmd_name {0} must be a string, not a {1}'.format(cmd_name, _type(cmd_name)))

        if cmd_short_name and not isinstance(cmd_short_name, _basestring):
            raise OptionsError('cmd_short_name {0} must be a string, not a {1}'.format(cmd_short_name, _type(cmd_short_name)))

        section = section.lower().strip()
        name = name.lower().strip()
        if cmd_name:
            cmd_name = cmd_name.lower().strip()

        if not re.match('^[a-z_]+[a-z0-9_]*$', section):
            raise OptionsError('{0} is not a valid section name. It must contain only letters, numbers and underscores.'.format(section))
        
        if not re.match('^[a-z_]+[a-z0-9_]*$', name):
            raise OptionsError('{0} is not a valid name. It must contain only letters, numbers and underscores.'.format(name))

        if cmd_name and not re.match('^[a-z0-9]+[a-z0-9-]*$', cmd_name):
            raise OptionsError('{0} is not a valid cmd_name. It must contain only letters, numbers and dashes.'.format(cmd_short_name))

        if cmd_short_name and (len(cmd_short_name) != 1 or not re.match('^[a-zA-Z0-9]{1}$', cmd_short_name)):
            raise OptionsError('{0} is not a valid cmd_short_name. It must contain only letters or numbers and be of length 1.'.format(cmd_short_name))

        if not hasattr(options, section):
            setattr(options, section, OptionObject())
            option_definitions[section] = {}

        if name in option_definitions[section]:
            raise OptionsError('Option {0}.{1} is already defined.'.format(section, name))

        if cmd_only and not (cmd_name or cmd_short_name):
            raise OptionsError('Option {0}.{1} is defined as cmd_only, but neither cmd_name nor cmd_short_name are set.'.format(section, name))

        if is_config_file and not isinstance(type(), _basestring):
            raise OptionsError('Option {0}.{1} is defined as is_config_file, but with {2} instead of {3}.'.format(section, name, type, _type('')))

        if is_config_file and config_file_def['section']:
            raise OptionsError('Duplicate is_config_file options {0}.{1} and {2}.{3}.'.format(section, name, config_file_def['section'], config_file_def['name']))

        if is_config_file and not (cmd_name or cmd_short_name):
            raise OptionsError('Option {0}.{1} is defined as is_config_file, but cmd_name and cmd_short_name are not specified.'.format(section, name))

        if is_help and not isinstance(type(), bool):
            raise OptionsError('Option {0}.{1} is defined as is_help, but with {2} instead of {3}.'.format(section, name, type, bool))

        option_definitions[section][name] = OptionObject(
            section=section,
            name=name,
            cmd_name=cmd_name,
            cmd_short_name=cmd_short_name,
            required=False,
            type=type,
            is_config_file=is_config_file,
            is_help=is_help,
            cmd_group=cmd_group,
            cmd_only=cmd_only or is_config_file or is_help,
            set_by=None,
        )

        if 'default' in kwargs:
            option_definitions[section][name].default = kwargs['default']
        elif type == bool:
            option_definitions[section][name].default = False
        else:
            option_definitions[section][name].required = True
            

        if is_config_file:
            config_file_def['section'] = section
            config_file_def['optname'] = name
            
            if 'default' in kwargs:
                config_file_def['filename'] = kwargs['default']

    def parse_config(config_file=None):
        '''Parses a configuration file.
        
        This function sets option values if not already set by the parse_args() function.'''

        if not config_file:
            if not config_file_def['filename']:
                raise OptionsError('You must pass a config_file path to parse_config() or define a command line option is_config_file=True with an optional default.')
            config_file = config_file_def['filename']

        config_file = os.path.abspath(config_file)
        if not os.path.exists(config_file):
            raise OptionsUserError('Configuration file {0} does not exist.'.format(config_file))

        cp.readfp(codecs.open(config_file, 'r', 'utf-8'))

        for section in option_definitions:
            if cp.has_section(section):
                for name in option_definitions[section]:
                    if option_definitions[section][name].set_by is not None:
                        continue

                    if option_definitions[section][name].cmd_only:
                        continue

                    if name in option_definitions[section]:
                        opt = option_definitions[section][name]

                        try:
                            if opt.type in adapters:
                                setattr(getattr(options, section), name, adapters[opt.type](section, name))
                            else:
                                value = cp.get(section, name)
                                setattr(getattr(options, section), name, opt.type(value))
                        except ValueError as e:
                            print(e)
                            raise OptionsUserError('Could not parse configuration file {0}: section {1} option {2} must be of type {3}, not {4}'.format(config_file, section, name, opt.type.__name__, type(getattr(getattr(options, section), name))))
                        except NoOptionError:
                            if option_definitions[section][name].set_by or hasattr(option_definitions[section][name], 'default'):
                                continue
                            raise OptionsUserError('Could not parse configuration file {0}: section {1} option {2} was not found'.format(config_file, section, name))
                        option_definitions[section][name].set_by = parse_config

    def parse_args(argv):
        '''Parses command line arguments and sets option values as well as the cmdargs list.'''

        short_args = []
        long_args = []
        cmd_options = {}

        for section in option_definitions:
            for name, opt in option_definitions[section].items():
                if not opt.cmd_name and not opt.cmd_short_name:
                    continue

                if opt.cmd_name:
                    if opt.type == bool:
                        long_args.append(opt.cmd_name)
                    else:
                        long_args.append('{0}='.format(opt.cmd_name))
                    cmd_options['--{0}'.format(opt.cmd_name)] = opt

                if opt.cmd_short_name:
                    if opt.type == bool:
                        short_args.append(opt.cmd_short_name)
                    else:
                        short_args.append('{0}:'.format(opt.cmd_short_name))
                    cmd_options['-{0}'.format(opt.cmd_short_name)] = opt

        try:
            opts, args = getopt.getopt(argv, ''.join(short_args), long_args)
        except getopt.GetoptError as err:
            raise OptionsUserError(err)

        # Empty a non-local scope list, in case parse_args is called twice
        if len(cmdargs) > 0:
            [cmdargs.pop() for _ in range(len(cmdargs))]
        
        for arg in args:
            cmdargs.append(arg)

        for key, val in opts:
            if key in cmd_options:
                opt = cmd_options[key]
                if opt.is_help:
                    print_func(usage())
                    sys.exit(0)

                if opt.type == bool:
                    setattr(getattr(options, opt.section), opt.name, True)
                else:
                    try:
                        setattr(getattr(options, opt.section), opt.name, opt.type(val))
                    except ValueError:
                        raise OptionsUserError('Could not parse command line option {0}: it must be of type {1}.'.format(opt.name, opt.type.__name__))
                option_definitions[opt.section][opt.name].set_by = parse_args
            else:
                raise OptionsUserError('Unknown command line parameter {0}.'.format(key))

        if config_file_def['section'] and hasattr(getattr(options, config_file_def['section']), config_file_def['optname']):
            config_file_def['filename'] = getattr(getattr(options, config_file_def['section']), config_file_def['optname'])

    def init_options(argv=None, config_file=None):
        """Shortcut method for initializing all the options.

        Uses no configuration file unless a command line option has been defined 
        as is_config_file=True.
        """
        
        if argv is None:
            argv = sys.argv[1:]

        try:
            parse_args(argv)
            if config_file or config_file_def['filename']:
                parse_config(config_file)

            set_defaults()
            verify_all_options()

        except OptionsUserError as e:
            print_func(e)
            print_func('')
            print_func(usage())
            sys.exit(os.EX_USAGE)

    def set_defaults():
        '''Sets the default option values if they have not already been specified.'''

        for section in option_definitions:
            for name, opt in option_definitions[section].items():
                if not hasattr(option_definitions[section][name], 'default'):
                    continue

                if option_definitions[section][name].set_by is not None:
                    continue

                default = getattr(option_definitions[section][name], 'default')
                setattr(getattr(options, section), name, default)

    def verify_all_options():
        '''Raises an error if required options have not been specified by the user.'''

        if config_file_def['section'] and not config_file_def['filename']:
            option = option_definitions[config_file_def['section']][config_file_def['optname']]

            if option.cmd_name:
                error = 'Required command line option --{0} was not specified.'.format(option.cmd_name)
            elif option.cmd_short_name:
                error = 'Required command line option -{0} was not specified.'.format(option.cmd_short_name)
            raise OptionsUserError(error)

        errors = []
        for section in option_definitions:
            for name, opt in option_definitions[section].items():
                if option_definitions[section][name].required:
                    if not hasattr(getattr(options, section), name):

                        if not option_definitions[section][name].cmd_only:
                            final_words = ', and {0}.{1} could not be found in the config file.'.format(section, name)
                        else:
                            final_words = '.'

                        if option_definitions[section][name].cmd_name:
                            error = 'Required command line option --{0} was not specified{1}'.format(option_definitions[section][name].cmd_name, final_words)
                        elif option_definitions[section][name].cmd_short_name:
                            error = 'Required command line option -{0} was not specified{1}'.format(option_definitions[section][name].cmd_short_name, final_words)
                        else:
                            error = 'Required option {0}.{1} was not specified in the config file.'.format(section, name,)

                        errors.append(error)

        if cmdarg_defs['count'] == -1:
            pass # zero args required
        elif cmdarg_defs['count'] == -2:
            if len(cmdargs) < 1:
                errors.append('At least one <{0}> argument required.'.format(cmdarg_defs['args']))
        elif cmdarg_defs['args'] is not None:
            if len(cmdargs) != cmdarg_defs['count']:
                errors.append('Required arguments were not specified: {0}.'.format(' '.join(['<{0}>'.format(s) for s in cmdarg_defs['args']])))

        if len(errors) > 0:
            raise OptionsUserError('\n'.join(errors))

    return options, cmdargs, define_opt, define_args, parse_config, parse_args, set_defaults, verify_all_options, init_options, generate_sample_config, usage

Example 114

Project: python-mediawiki-utilities Source File: test_iterator.py
def test_complete():
    f = io.StringIO(SAMPLE_XML)

    dump = Iterator.from_file(f)
    eq_([0, 1], list(ns.id for ns in dump.namespaces))

    page = next(dump)
    eq_(page.title, "Foo")
    eq_(page.namespace, 0)
    eq_(page.id, 1)
    eq_(page.redirect, None)
    eq_(page.restrictions, [])

    revision = next(page)
    eq_(revision.id, 1)
    eq_(revision.timestamp, Timestamp("2004-08-09T09:04:08Z"))
    eq_(revision.contributor.id, 92182)
    eq_(revision.contributor.user_text, "Gen0cide")
    assert_is_instance(revision.text, Text)
    eq_(revision.text, "Revision 1 text")
    eq_(revision.text.bytes, 234)
    eq_(revision.text.id, 55)
    eq_(revision.text, "Revision 1 text")
    eq_(revision.sha1, "g9chqqg94myzq11c56ixvq7o1yg75n9")
    eq_(revision.comment, None)
    eq_(revision.model, "wikitext")
    eq_(revision.format, "text/x-wiki")
    eq_(revision.beginningofpage, True)

    revision = next(page)
    eq_(revision.id, 2)
    eq_(revision.timestamp, Timestamp("2004-08-10T09:04:08Z"))
    eq_(revision.contributor.id, None)
    eq_(revision.contributor.user_text, "222.152.210.109")
    eq_(revision.text, "Revision 2 text")
    eq_(revision.text.bytes, 235)
    eq_(revision.text.id, 56)
    eq_(revision.sha1, "g9chqqg94myzq11c56ixvq7o1yg75n9")
    assert_is_instance(revision.comment, Comment)
    eq_(revision.comment, "Comment 2")
    eq_(revision.model, "wikitext")
    eq_(revision.format, "text/x-wiki")
    eq_(revision.beginningofpage, False)

    page = next(dump)
    assert_is_instance(page, Page)
    eq_(page.title, "Bar")
    eq_(page.namespace, 1)
    eq_(page.id, 2)
    eq_(page.redirect.title, "Computer accessibility")
    eq_(page.restrictions, ["edit=sysop:move=sysop"])

    revision = next(page)
    assert_is_instance(revision, Revision)
    eq_(revision.id, 3)
    eq_(revision.timestamp, Timestamp("2004-08-11T09:04:08Z"))
    eq_(revision.contributor.id, None)
    eq_(revision.contributor.user_text, "222.152.210.22")
    assert_is_instance(revision.text, Text)
    eq_(revision.text.bytes, 236)
    eq_(revision.text.id, 57)
    eq_(revision.text, "Revision 3 text")
    eq_(revision.sha1, "g9chqqg94myzq11c56ixvq7o1yg75n9")
    eq_(revision.comment, None)
    eq_(revision.model, "wikitext")
    eq_(revision.format, "text/x-wiki")
    assert_is_instance(str(page), str)

    revision = next(page)
    assert_is_instance(revision, Revision)
    eq_(revision.id, 4)
    eq_(revision.timestamp, Timestamp("2004-08-12T09:04:08Z"))
    eq_(revision.contributor, None)
    assert_is_instance(revision.text, Text)
    eq_(revision.text.bytes, 237)
    eq_(revision.text.id, 58)
    eq_(revision.text, "")
    eq_(revision.sha1, "6ixvq7o1yg75n9g9chqqg94myzq11c5")
    eq_(revision.comment, None)
    eq_(revision.model, "wikitext")
    eq_(revision.format, "text/x-wiki")
    assert_is_instance(str(revision), str)

Example 115

Project: Pixie Source File: owner.py
    async def repl(self, ctx):
        msg = ctx.message

        variables = {
            'ctx': ctx,
            'bot': self.bot,
            'message': msg,
            'server': msg.server,
            'channel': msg.channel,
            'author': msg.author,
            'last': None,
        }

        if msg.channel.id in self.sessions:
            await self.bot.say('Already running a REPL session in this channel. Exit it with `quit`.')
            return

        self.sessions.add(msg.channel.id)
        await self.bot.say('Enter code to execute or evaluate. `exit()` or `quit` to exit.')
        while True:
            response = await self.bot.wait_for_message(author=msg.author, channel=msg.channel,
                                                       check=lambda m: m.content.startswith('`'))

            cleaned = self.cleanup_code(response.content)

            if cleaned in ('quit', 'exit', 'exit()'):
                await self.bot.say('Exiting.')
                self.sessions.remove(msg.channel.id)
                return

            executor = exec
            if cleaned.count('\n') == 0:
                # single statement, potentially 'eval'
                try:
                    code = compile(cleaned, '<repl session>', 'eval')
                except SyntaxError:
                    pass
                else:
                    executor = eval

            if executor is exec:
                try:
                    code = compile(cleaned, '<repl session>', 'exec')
                except SyntaxError as e:
                    await self.bot.say(self.get_syntax_error(e))
                    continue

            variables['message'] = response

            fmt = None
            stdout = io.StringIO()

            try:
                with redirect_stdout(stdout):
                    result = executor(code, variables)
                    if inspect.isawaitable(result):
                        result = await result
            except Exception as e:
                value = stdout.getvalue()
                fmt = '```py\n{}{}\n```'.format(value, traceback.format_exc())
            else:
                value = stdout.getvalue()
                if result is not None:
                    fmt = '```py\n{}{}\n```'.format(value, result)
                    variables['last'] = result
                elif value:
                    fmt = '```py\n{}\n```'.format(value)

            try:
                if fmt is not None:
                    if len(fmt) > 2000:
                        await self.bot.send_message(msg.channel, 'Content too big to be printed.')
                    else:
                        await self.bot.send_message(msg.channel, fmt)
            except discord.Forbidden:
                pass
            except discord.HTTPException as e:
                await self.bot.send_message(msg.channel, 'Unexpected error: `{}`'.format(e))

Example 116

Project: openmc Source File: reaction.py
def _get_fission_products_endf(ev):
    """Generate fission products from an ENDF evaluation

    Parameters
    ----------
    ev : openmc.data.endf.Evaluation

    Returns
    -------
    products : list of openmc.data.Product
        Prompt and delayed fission neutrons
    derived_products : list of openmc.data.Product
        "Total" fission neutron

    """
    products = []
    derived_products = []

    if (1, 456) in ev.section:
        prompt_neutron = Product('neutron')
        prompt_neutron.emission_mode = 'prompt'

        # Prompt nu values
        file_obj = StringIO(ev.section[1, 456])
        lnu = get_head_record(file_obj)[3]
        if lnu == 1:
            # Polynomial representation
            items, coefficients = get_list_record(file_obj)
            prompt_neutron.yield_ = Polynomial(coefficients)
        elif lnu == 2:
            # Tabulated representation
            params, prompt_neutron.yield_ = get_tab1_record(file_obj)

        products.append(prompt_neutron)

    if (1, 452) in ev.section:
        total_neutron = Product('neutron')
        total_neutron.emission_mode = 'total'

        # Total nu values
        file_obj = StringIO(ev.section[1, 452])
        lnu = get_head_record(file_obj)[3]
        if lnu == 1:
            # Polynomial representation
            items, coefficients = get_list_record(file_obj)
            total_neutron.yield_ = Polynomial(coefficients)
        elif lnu == 2:
            # Tabulated representation
            params, total_neutron.yield_ = get_tab1_record(file_obj)

        if (1, 456) in ev.section:
            derived_products.append(total_neutron)
        else:
            products.append(total_neutron)

    if (1, 455) in ev.section:
        file_obj = StringIO(ev.section[1, 455])

        # Determine representation of delayed nu data
        items = get_head_record(file_obj)
        ldg = items[2]
        lnu = items[3]

        if ldg == 0:
            # Delayed-group constants energy independent
            items, decay_constants = get_list_record(file_obj)
            for constant in decay_constants:
                delayed_neutron = Product('neutron')
                delayed_neutron.emission_mode = 'delayed'
                delayed_neutron.decay_rate = constant
                products.append(delayed_neutron)
        elif ldg == 1:
            # Delayed-group constants energy dependent
            raise NotImplementedError('Delayed neutron with energy-dependent '
                                      'group constants.')

        # In MF=1, MT=455, the delayed-group abundances are actually not
        # specified if the group constants are energy-independent. In this case,
        # the abundances must be inferred from MF=5, MT=455 where multiple
        # energy distributions are given.
        if lnu == 1:
            # Nu represented as polynomial
            items, coefficients = get_list_record(file_obj)
            yield_ = Polynomial(coefficients)
            for neutron in products[-6:]:
                neutron.yield_ = deepcopy(yield_)
        elif lnu == 2:
            # Nu represented by tabulation
            params, yield_ = get_tab1_record(file_obj)
            for neutron in products[-6:]:
                neutron.yield_ = deepcopy(yield_)

        if (5, 455) in ev.section:
            file_obj = StringIO(ev.section[5, 455])
            items = get_head_record(file_obj)
            nk = items[4]
            if nk != len(decay_constants):
                raise ValueError(
                    'Number of delayed neutron fission spectra ({}) does not '
                    'match number of delayed neutron precursors ({}).'.format(
                        nk, len(decay_constants)))
            for i in range(nk):
                params, applicability = get_tab1_record(file_obj)
                dist = UncorrelatedAngleEnergy()
                dist.energy = EnergyDistribution.from_endf(file_obj, params)

                delayed_neutron = products[1 + i]
                yield_ = delayed_neutron.yield_

                # Here we handle the fact that the delayed neutron yield is the
                # product of the total delayed neutron yield and the
                # "applicability" of the energy distribution law in file 5.
                if isinstance(yield_, Tabulated1D):
                    if np.all(applicability.y == applicability.y[0]):
                        yield_.y *= applicability.y[0]
                    else:
                        # Get union energy grid and ensure energies are within
                        # interpolable range of both functions
                        max_energy = min(yield_.x[-1], applicability.x[-1])
                        energy = np.union1d(yield_.x, applicability.x)
                        energy = energy[energy <= max_energy]

                        # Calculate group yield
                        group_yield = yield_(energy) * applicability(energy)
                        delayed_neutron.yield_ = Tabulated1D(energy, group_yield)
                elif isinstance(yield_, Polynomial):
                    if len(yield_) == 1:
                        delayed_neutron.yield_ = deepcopy(applicability)
                        delayed_neutron.yield_.y *= yield_.coef[0]
                    else:
                        if np.all(applicability.y == applicability.y[0]):
                            yield_.coef[0] *= applicability.y[0]
                        else:
                            raise NotImplementedError(
                                'Total delayed neutron yield and delayed group '
                                'probability are both energy-dependent.')

                delayed_neutron.distribution.append(dist)

    return products, derived_products

Example 117

Project: python-jamo Source File: test_jamo.py
    def test_jamo_to_hangul(self):
        """jamo_to_hangul tests
        Arguments may be jamo characters including HCJ. Throws an
        InvalidJamoError if there is no corresponding Hangul character to the
        inputs.

        Outputs a single Hangul character.
        """

        # Support jamo -> Hangul conversion.
        chr_cases = ((chr(0x110c), chr(0x1161), chr(0)),
                     (chr(0x1106), chr(0x1169), chr(0)),
                     (chr(0x1112), chr(0x1161), chr(0x11ab)),
                     (chr(0x1100), chr(0x1173), chr(0x11af)),
                     (chr(0x1109), chr(0x1165), chr(0)),
                     (chr(0x110b), chr(0x116e), chr(0x11af)),
                     (chr(0x1111), chr(0x1167), chr(0x11bc)),
                     (chr(0x110b), chr(0x1163), chr(0x11bc)))
        # Support HCJ -> Hangul conversion.
        hcj_cases = (('ㅈ', 'ㅏ', ''),
                     ('ㅁ', 'ㅗ', ''),
                     ('ㅎ', 'ㅏ', 'ㄴ'),
                     ('ㄱ', 'ㅡ', 'ㄹ'),
                     ('ㅅ', 'ㅓ', ''),
                     ('ㅇ', 'ㅜ', 'ㄹ'),
                     ('ㅍ', 'ㅕ', 'ㅇ'),
                     ('ㅇ', 'ㅑ', 'ㅇ'))
        desired_hangul1 = ("자",
                           "모",
                           "한",
                           "글",
                           "서",
                           "울",
                           "평",
                           "양")
        # Test the arity 2 version.
        arity2_cases = (('ㅎ', 'ㅏ'),)
        desired_hangul2 = ("하",)
        # Support mixed jamo and hcj conversion.
        mixed_cases = (('ᄒ', 'ㅏ', 'ㄴ'),)
        desired_hangul3 = ("한",)

        invalid_cases = [('a', 'b', 'c'), ('a', 'b'),
                         ('ㄴ', 'ㄴ', 'ㄴ'), ('ㅏ', 'ㄴ')]

        all_tests = itertools.chain(zip(chr_cases, desired_hangul1),
                                    zip(hcj_cases, desired_hangul1),
                                    zip(arity2_cases, desired_hangul2),
                                    zip(mixed_cases, desired_hangul3))

        for args, hangul in all_tests:
            trial = jamo.jamo_to_hangul(*args)
            assert hangul == trial,\
                ("Conversion from hcj to Hangul failed. "
                 "Incorrect conversion from"
                 "({lead}, {vowel}, {tail}) to "
                 "({hangul}). "
                 "Got {failure}.").format(lead=lead,
                                          vowel=vowel,
                                          tail=tail,
                                          hangul=hangul,
                                          failure=trial)

        # Negative tests
        _stderr = jamo.jamo.stderr
        jamo.jamo.stderr = io.StringIO()
        for _ in invalid_cases:
            try:
                print(_)
                jamo.jamo_to_hangul(*_)
                assert False, "Accepted bad input without throwing exception."
            except jamo.InvalidJamoError:
                pass
        jamo.jamo.stderr = _stderr

Example 118

Project: Arelle Source File: FormulaGenerator.py
def generateFormulaLB(cntlr, sphinxFiles, generatedSphinxFormulasDirectory):

    if sys.version[0] >= '3':
        from arelle.pyparsing.pyparsing_py3 import lineno
    else: 
        from pyparsing import lineno
        
    from .SphinxContext import SphinxContext
    from .SphinxValidator import validate

    msgFile = None
    sourceString = None

    # logMessage operates without an instance docuement or ModelXbrl, so it simulates log function
    
    def logMessage(severity, code, text, **kwargs):
        if "sourceFileLines" in kwargs:  # use pairs of file and line number
            fileLines = ", ".join((file + (" " + str(line)) if line else "")
                                  for file, line in kwargs["sourceFileLines"])
        elif "sourceFileLine" in kwargs:
            file, line in kwargs["sourceFileLine"]
            fileLines = file + (" " + str(line)) if line else ""
        else:
            fileLines = ""
        if not fileLines:
            fileLines = ", " + fileLines
        try:
            cntlr.addToLog("[{0}] {1}{2}".format(
                                      code,
                                      text % kwargs,
                                      fileLines))
        except KeyError as err:
            cntlr.addToLog("[{0}] {1}: Missing message parameter: {2}; {3}".format(
                                      code,
                                      text,
                                      err,
                                      fileLines))

    from arelle import XmlUtil

    sc = SphinxContext( parse(cntlr, logMessage, sphinxFiles) )
    sc.logMessage = logMessage
    validate(logMessage, sc)
    assertionIDs = set()
        
    for prog in sc.sphinxProgs:
        sphinxFile = prog[0].fileName
        sphinxXmlns = { "xlink": 'http://www.w3.org/1999/xlink',
                        "link": 'http://www.xbrl.org/2003/linkbase',
                        "xbrli": 'http://www.xbrl.org/2003/instance',
                        "generic": 'http://xbrl.org/2008/generic',
                        "formula": 'http://xbrl.org/2008/formula',
                        "validation": 'http://xbrl.org/2008/validation',
                        "variable": 'http://xbrl.org/2008/variable',
                        "label": 'http://xbrl.org/2008/label',
                        "ca": 'http://xbrl.org/2008/assertion/consistency',
                        "ea": 'http://xbrl.org/2008/assertion/existence',
                        "va": 'http://xbrl.org/2008/assertion/value',
                        "msg": 'http://xbrl.org/2010/message',
                        "bf": 'http://xbrl.org/2008/filter/boolean',
                        "cf": 'http://xbrl.org/2008/filter/concept',
                        "df": 'http://xbrl.org/2008/filter/dimension',
                        "gf": 'http://xbrl.org/2008/filter/general',
                        "pf": 'http://xbrl.org/2008/filter/period',
                        "uf": 'http://xbrl.org/2008/filter/unit',
                        "xfi": 'http://www.xbrl.org/2008/function/instance',
                        "xsi": 'http://www.w3.org/2001/XMLSchema-instance',
                        "xs": 'http://www.w3.org/2001/XMLSchema',
                        }
        for node in prog:
            if isinstance(node, astNamespaceDeclaration):
                sphinxXmlns[node.prefix] = node.namespace
    
        formulaFile = sphinxFile.rpartition(".")[0] + "-formula.xml"
        
        # save in generatedSphinxFormulasDirectory if specified and valid (exists)
        if generatedSphinxFormulasDirectory and os.path.isdir(generatedSphinxFormulasDirectory):
            formulaFile = os.path.join(generatedSphinxFormulasDirectory, 
                                       os.path.basename(formulaFile))
        
        xbrlLBfile = io.StringIO('''
<nsmap>
<link:linkbase

{0}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd"
>
<link:arcroleRef arcroleURI='http://xbrl.org/arcrole/2008/element-label'
    xlink:href='http://www.xbrl.org/2008/generic-label.xsd#element-label'
    xlink:type='simple'/>
<link:arcroleRef arcroleURI='http://xbrl.org/arcrole/2008/variable-set'
    xlink:href='http://www.xbrl.org/2008/variable.xsd#variable-set'
    xlink:type='simple'/>
<link:arcroleRef arcroleURI='http://xbrl.org/arcrole/2008/variable-filter'
    xlink:href='http://www.xbrl.org/2008/variable.xsd#variable-filter'
    xlink:type='simple'/>
<link:arcroleRef arcroleURI='http://xbrl.org/arcrole/2008/variable-set-precondition'
    xlink:href='http://www.xbrl.org/2008/variable.xsd#variable-set-precondition'
    xlink:type='simple'/>
<link:arcroleRef arcroleURI='http://xbrl.org/arcrole/2008/consistency-assertion-formula'
    xlink:href='http://www.xbrl.org/2008/consistency-assertion.xsd#consistency-assertion-formula'
    xlink:type='simple'/>
<link:roleRef roleURI='http://www.xbrl.org/2008/role/link'
    xlink:href='http://www.xbrl.org/2008/generic-link.xsd#standard-link-role'
    xlink:type='simple'/>
<link:roleRef roleURI='http://www.xbrl.org/2008/role/label'
    xlink:href='http://www.xbrl.org/2008/generic-label.xsd#standard-label'
    xlink:type='simple'/>
<link:roleRef roleURI="http://www.xbrl.org/2010/role/message"
    xlink:type="simple"
    xlink:href="http://www.xbrl.org/2010/generic-message.xsd#standard-message"/>
<link:arcroleRef arcroleURI="http://xbrl.org/arcrole/2010/assertion-unsatisfied-message"
     xlink:type="simple"
     xlink:href="http://www.xbrl.org/2010/validation-message.xsd#assertion-unsatisfied-message"/>
<link:arcroleRef arcroleURI="http://xbrl.org/arcrole/2010/assertion-satisfied-message"
     xlink:type="simple"
     xlink:href="http://www.xbrl.org/2010/validation-message.xsd#assertion-satisfied-message"/>
<link:arcroleRef arcroleURI='http://xbrl.org/arcrole/2008/boolean-filter'
     xlink:href='http://www.xbrl.org/2008/boolean-filter.xsd#boolean-filter'
     xlink:type='simple'/>

<generic:link xlink:type="extended" xlink:role="http://www.xbrl.org/2003/role/link"/>
</link:linkbase>
</nsmap>
<!--  Generated by Arelle(r) http://arelle.org --> 
'''.format('\n'.join("xmlns{0}='{1}'".format((":" + prefix) if prefix else "",
                                             namespace)           
                     for prefix, namespace in sphinxXmlns.items())
           )
        )
        msgFile = os.path.basename(formulaFile)
        xmlDocuement = etree.parse(xbrlLBfile,base_url=formulaFile)
        xbrlLBfile.close()
        nsmapElt = xmlDocuement.getroot()
        #xmlDocuement.getroot().init(self)  ## is this needed ??
        for lbElement in  xmlDocuement.iter(tag="{http://www.xbrl.org/2003/linkbase}linkbase"):
            break
        for e in  xmlDocuement.iter(tag="{http://xbrl.org/2008/generic}link"):
            sc.genLinkElement = e
            break
        
        class DocObj:  # fake ModelDocuement for namespaces
            def __init__(self):
                self.xmlRootElement = lbElement
                self.xmlDocuement = xmlDocuement    
        docObj = DocObj()
        
        numRules = 0
        sc.generatedVarNbr = 1
        sc.xpathCode = None
        sc.bindAsSequence = False
        sc.nodeXpathVarBindings = {}
                
        for node in prog:
            if isinstance(node, (astFormulaRule, astReportRule, astValidationRule)):
                # form unique ID
                sc.assertionID = node.name
                if sc.assertionID in assertionIDs:
                    for suffixNumber in range(1,10000):
                        if sc.assertionID + str(suffixNumber) not in assertionIDs:
                            sc.assertionID += str(suffixNumber)
                            break
                assertionIDs.add(sc.assertionID)
                sc.assertionElt = etree.SubElement(sc.genLinkElement,
                                                   "{http://xbrl.org/2008/assertion/value}valueAssertion",
                                                   attrib={"{http://www.w3.org/1999/xlink}type": "resource",
                                                           "{http://www.w3.org/1999/xlink}label": sc.assertionID,
                                                           "id": sc.assertionID,
                                                           "aspectModel": "dimensional",
                                                           "implicitFiltering": "true"})
                sc.assertionVarNames = {"factVar_"}
                sc.generalVarNames = {}
                if isinstance(node, astFormulaRule):
                    sc.assertionElt.set("test", xpathCode(node.expr, sc))
                    msgType = "assertion-unsatisfied-message"
                elif isinstance(node, astReportRule):
                    sc.tags["value"] = node.expr  # no test expression needed
                    msgType = "assertion-unsatisfied-message"
                elif isinstance(node, astValidationRule):
                    sc.assertionElt.set("test", "not( " + xpathCode(node.expr, sc) + " )")
                    msgType = "assertion-satisfied-message"
                genMessage(node.message, sc, msgType)

        sc.nodeXpathVarBindings.clear()
        
        with open(formulaFile, "w", encoding="utf-8") as fh:
            XmlUtil.writexml(fh, xmlDocuement, encoding="utf-8")
    
        logMessage("INFO", "info:compileSphinx",
                 _("Compiled Sphinx of %(sphinxFile)s has %(numberRules)s tables in file %(formulaFile)s."),
                 sphinxFile=sphinxFiles, 
                 numberRules=numRules,
                 formulaFile=formulaFile)
    
    logMessage = None
    sc.close()

    cntlr.showStatus("Finshed sphinx files {0}".format(", ".join(os.path.basename(f)
                                                                 for f in sphinxFiles)),
                     5000)

Example 119

Project: portage-funtoo Source File: EbuildPhase.py
	def _ebuild_exit(self, ebuild_process):

		if self._ebuild_lock is not None:
			self._ebuild_lock.unlock()
			self._ebuild_lock = None

		fail = False
		if self._default_exit(ebuild_process) != os.EX_OK:
			if self.phase == "test" and \
				"test-fail-continue" in self.settings.features:
				# mark test phase as complete (bug #452030)
				try:
					open(_unicode_encode(os.path.join(
						self.settings["PORTAGE_BUILDDIR"], ".tested"),
						encoding=_encodings['fs'], errors='strict'),
						'wb').close()
				except OSError:
					pass
			else:
				fail = True

		if not fail:
			self.returncode = None

		logfile = None
		if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
			logfile = self.settings.get("PORTAGE_LOG_FILE")

		if self.phase == "install":
			out = io.StringIO()
			_check_build_log(self.settings, out=out)
			msg = out.getvalue()
			self.scheduler.output(msg, log_path=logfile)

		if fail:
			self._die_hooks()
			return

		settings = self.settings
		_post_phase_userpriv_perms(settings)

		if self.phase == "unpack":
			# Bump WORKDIR timestamp, in case tar gave it a timestamp
			# that will interfere with distfiles / WORKDIR timestamp
			# comparisons as reported in bug #332217. Also, fix
			# ownership since tar can change that too.
			os.utime(settings["WORKDIR"], None)
			_prepare_workdir(settings)
		elif self.phase == "install":
			out = io.StringIO()
			_post_src_install_write_metadata(settings)
			_post_src_install_uid_fix(settings, out)
			msg = out.getvalue()
			if msg:
				self.scheduler.output(msg, log_path=logfile)
		elif self.phase == "preinst":
			_preinst_bsdflags(settings)
		elif self.phase == "postinst":
			_postinst_bsdflags(settings)

		post_phase_cmds = _post_phase_cmds.get(self.phase)
		if post_phase_cmds is not None:
			if logfile is not None and self.phase in ("install",):
				# Log to a temporary file, since the code we are running
				# reads PORTAGE_LOG_FILE for QA checks, and we want to
				# avoid annoying "gzip: unexpected end of file" messages
				# when FEATURES=compress-build-logs is enabled.
				fd, logfile = tempfile.mkstemp()
				os.close(fd)
			post_phase = MiscFunctionsProcess(background=self.background,
				commands=post_phase_cmds, logfile=logfile, phase=self.phase,
				scheduler=self.scheduler, settings=settings)
			self._start_task(post_phase, self._post_phase_exit)
			return

		# this point is not reachable if there was a failure and
		# we returned for die_hooks above, so returncode must
		# indicate success (especially if ebuild_process.returncode
		# is unsuccessful and test-fail-continue came into play)
		self.returncode = os.EX_OK
		self._current_task = None
		self.wait()

Example 120

Project: networkx Source File: gml.py
Function: literal_stringizer
def literal_stringizer(value):
    """Convert a value to a Python literal in GML representation.

    Parameters
    ----------
    value : object
        The value to be converted to GML representation.

    Returns
    -------
    rep : string
        A double-quoted Python literal representing value. Unprintable
        characters are replaced by XML character references.

    Raises
    ------
    ValueError
        If `value` cannot be converted to GML.

    Notes
    -----
    `literal_stringizer` is largely the same as `repr` in terms of
    functionality but attempts prefix `unicode` and `bytes` literals with
    `u` and `b` to provide better interoperability of data generated by
    Python 2 and Python 3.

    The original value can be recovered using the
    :func:`networkx.readwrite.gml.literal_destringizer` function.
    """
    def stringize(value):
        if isinstance(value, (int, long, bool)) or value is None:
            buf.write(str(value))
        elif isinstance(value, unicode):
            text = repr(value)
            if text[0] != 'u':
                try:
                    value.encode('latin1')
                except UnicodeEncodeError:
                    text = 'u' + text
            buf.write(text)
        elif isinstance(value, (float, complex, str, bytes)):
            buf.write(repr(value))
        elif isinstance(value, list):
            buf.write('[')
            first = True
            for item in value:
                if not first:
                    buf.write(',')
                else:
                    first = False
                stringize(item)
            buf.write(']')
        elif isinstance(value, tuple):
            if len(value) > 1:
                buf.write('(')
                first = True
                for item in value:
                    if not first:
                        buf.write(',')
                    else:
                        first = False
                    stringize(item)
                buf.write(')')
            elif value:
                buf.write('(')
                stringize(value[0])
                buf.write(',)')
            else:
                buf.write('()')
        elif isinstance(value, dict):
            buf.write('{')
            first = True
            for key, value in value.items():
                if not first:
                    buf.write(',')
                else:
                    first = False
                stringize(key)
                buf.write(':')
                stringize(value)
            buf.write('}')
        elif isinstance(value, set):
            buf.write('{')
            first = True
            for item in value:
                if not first:
                    buf.write(',')
                else:
                    first = False
                stringize(item)
            buf.write('}')
        else:
            raise ValueError(
                '%r cannot be converted into a Python literal' % (value,))

    buf = StringIO()
    stringize(value)
    return buf.getvalue()

Example 121

Project: xml_diff Source File: __init__.py
Function: perform_diff
def perform_diff(differ, text1, text2, word_separator_regex):
	# Do a word-by-word comparison, which produces more semantically sensible
	# results. Then turn the comparison back to characters and yield diff
	# operations (+/-/=) over real character ranges, such that the ranges
	# never cross an element.

	word_map = { }
	word_map[node_end_sentinel] = unichr(254) # ensure it's in there

	def text_to_words(text):
		# Split on non-word characters and the node_end_sentinel, so that words
		# do not cross elements.
		words = re.split(u'(' + re.escape(node_end_sentinel) + u'|' + word_separator_regex + u')', text)
		encoded_text = StringIO()
		for wd in words:
			if wd == "": continue # when there are multiple delimiters in a row, we may get blanks from re.split
			if wd != node_end_sentinel and node_end_sentinel in wd: raise ValueError(wd)
			wd_code = word_map.setdefault(wd, unichr(255 + len(word_map)) )
			encoded_text.write(wd_code)
		return encoded_text.getvalue()

	# Map the text strings to a hacky Unicode character array where characters
	# map to words in the original, using the same word_map for both docuements.
	text1 = text_to_words(text1)
	text2 = text_to_words(text2)

	# Perform the diff on the hacky Unicode string.
	wdiff = differ(text1, text2)

	# Map everything back to real characters.
	# Yield to the caller diff operations over the real strings we were
	# asked to compare.
	word_map_inv = dict((v, k) for (k, v) in word_map.items())
	diff = []
	i1 = 0
	i2 = 0
	for op, oplen in wdiff:
		# The diff is an array of (operation, text_length) tuples
		# that we can go back to the original hacky word-coded
		# "text" arrays for.
		if op == "-":
			# deleted words
			text = text1[i1:i1+oplen]
			i1 += oplen
		elif op == "+":
			# inserted words
			text = text2[i2:i2+oplen]
			i2 += oplen
		elif op == "=":
			# same content in both strings
			text = text2[i2:i2+oplen]
			i1 += oplen
			i2 += oplen
		else:
			raise ValueError("differ returned an invalid op code: " + repr(op))

		# Convert back to real characters by mapping hacky codes to word strings.
		text = "".join(word_map_inv[c] for c in text)

		# Yield a separate operation between each node_end_sentinel so that no
		# operation involves text that crosses into more than one element.
		for t in re.split("(" + re.escape(node_end_sentinel) + ")", text):
			if t != "":
				yield (op, t)

Example 122

Project: kbengine Source File: config.py
def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None):
    """
    Start up a socket server on the specified port, and listen for new
    configurations.

    These will be sent as a file suitable for processing by fileConfig().
    Returns a Thread object on which you can call start() to start the server,
    and which you can join() when appropriate. To stop the server, call
    stopListening().

    Use the ``verify`` argument to verify any bytes received across the wire
    from a client. If specified, it should be a callable which receives a
    single argument - the bytes of configuration data received across the
    network - and it should return either ``None``, to indicate that the
    passed in bytes could not be verified and should be discarded, or a
    byte string which is then passed to the configuration machinery as
    normal. Note that you can return transformed bytes, e.g. by decrypting
    the bytes passed in.
    """
    if not thread: #pragma: no cover
        raise NotImplementedError("listen() needs threading to work")

    class ConfigStreamHandler(StreamRequestHandler):
        """
        Handler for a logging configuration request.

        It expects a completely new logging configuration and uses fileConfig
        to install it.
        """
        def handle(self):
            """
            Handle a request.

            Each request is expected to be a 4-byte length, packed using
            struct.pack(">L", n), followed by the config file.
            Uses fileConfig() to do the grunt work.
            """
            try:
                conn = self.connection
                chunk = conn.recv(4)
                if len(chunk) == 4:
                    slen = struct.unpack(">L", chunk)[0]
                    chunk = self.connection.recv(slen)
                    while len(chunk) < slen:
                        chunk = chunk + conn.recv(slen - len(chunk))
                    if self.server.verify is not None:
                        chunk = self.server.verify(chunk)
                    if chunk is not None:   # verified, can process
                        chunk = chunk.decode("utf-8")
                        try:
                            import json
                            d =json.loads(chunk)
                            assert isinstance(d, dict)
                            dictConfig(d)
                        except Exception:
                            #Apply new configuration.

                            file = io.StringIO(chunk)
                            try:
                                fileConfig(file)
                            except Exception:
                                traceback.print_exc()
                    if self.server.ready:
                        self.server.ready.set()
            except OSError as e:
                if e.errno != RESET_ERROR:
                    raise

    class ConfigSocketReceiver(ThreadingTCPServer):
        """
        A simple TCP socket-based logging config receiver.
        """

        allow_reuse_address = 1

        def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
                     handler=None, ready=None, verify=None):
            ThreadingTCPServer.__init__(self, (host, port), handler)
            logging._acquireLock()
            self.abort = 0
            logging._releaseLock()
            self.timeout = 1
            self.ready = ready
            self.verify = verify

        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()],
                                           [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()
            self.socket.close()

    class Server(threading.Thread):

        def __init__(self, rcvr, hdlr, port, verify):
            super(Server, self).__init__()
            self.rcvr = rcvr
            self.hdlr = hdlr
            self.port = port
            self.verify = verify
            self.ready = threading.Event()

        def run(self):
            server = self.rcvr(port=self.port, handler=self.hdlr,
                               ready=self.ready,
                               verify=self.verify)
            if self.port == 0:
                self.port = server.server_address[1]
            self.ready.set()
            global _listener
            logging._acquireLock()
            _listener = server
            logging._releaseLock()
            server.serve_until_stopped()

    return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify)

Example 123

Project: portage Source File: EbuildPhase.py
	def _ebuild_exit(self, ebuild_process):

		if self._ebuild_lock is not None:
			self._ebuild_lock.unlock()
			self._ebuild_lock = None

		fail = False
		if self._default_exit(ebuild_process) != os.EX_OK:
			if self.phase == "test" and \
				"test-fail-continue" in self.settings.features:
				# mark test phase as complete (bug #452030)
				try:
					open(_unicode_encode(os.path.join(
						self.settings["PORTAGE_BUILDDIR"], ".tested"),
						encoding=_encodings['fs'], errors='strict'),
						'wb').close()
				except OSError:
					pass
			else:
				fail = True

		if not fail:
			self.returncode = None

		logfile = self._get_log_path()

		if self.phase == "install":
			out = io.StringIO()
			_check_build_log(self.settings, out=out)
			msg = out.getvalue()
			self.scheduler.output(msg, log_path=logfile)

		if fail:
			self._die_hooks()
			return

		settings = self.settings
		_post_phase_userpriv_perms(settings)

		if self.phase == "unpack":
			# Bump WORKDIR timestamp, in case tar gave it a timestamp
			# that will interfere with distfiles / WORKDIR timestamp
			# comparisons as reported in bug #332217. Also, fix
			# ownership since tar can change that too.
			os.utime(settings["WORKDIR"], None)
			_prepare_workdir(settings)
		elif self.phase == "install":
			out = io.StringIO()
			_post_src_install_write_metadata(settings)
			_post_src_install_uid_fix(settings, out)
			msg = out.getvalue()
			if msg:
				self.scheduler.output(msg, log_path=logfile)
		elif self.phase == "preinst":
			_preinst_bsdflags(settings)
		elif self.phase == "postinst":
			_postinst_bsdflags(settings)

		post_phase_cmds = _post_phase_cmds.get(self.phase)
		if post_phase_cmds is not None:
			if logfile is not None and self.phase in ("install",):
				# Log to a temporary file, since the code we are running
				# reads PORTAGE_LOG_FILE for QA checks, and we want to
				# avoid annoying "gzip: unexpected end of file" messages
				# when FEATURES=compress-build-logs is enabled.
				fd, logfile = tempfile.mkstemp()
				os.close(fd)
			post_phase = MiscFunctionsProcess(background=self.background,
				commands=post_phase_cmds, fd_pipes=self.fd_pipes,
				logfile=logfile, phase=self.phase, scheduler=self.scheduler,
				settings=settings)
			self._start_task(post_phase, self._post_phase_exit)
			return

		# this point is not reachable if there was a failure and
		# we returned for die_hooks above, so returncode must
		# indicate success (especially if ebuild_process.returncode
		# is unsuccessful and test-fail-continue came into play)
		self.returncode = os.EX_OK
		self._current_task = None
		self.wait()

Example 124

Project: pywcsgrid2 Source File: plot_directive_v3.py
def run_code(code, code_path, ns=None, function_name=None):
    """
    Import a Python module from a path, and run the function given by
    name, if function_name is not None.
    """

    # Change the working directory to the directory of the example, so
    # it can get at its data files, if any.  Add its path to sys.path
    # so it can import any helper modules sitting beside it.

    pwd = os.getcwd()
    old_sys_path = list(sys.path)
    if setup.config.plot_working_directory is not None:
        try:
            os.chdir(setup.config.plot_working_directory)
        except OSError as err:
            raise OSError(str(err) + '\n`plot_working_directory` option in'
                          'Sphinx configuration file must be a valid '
                          'directory path')
        except TypeError as err:
            raise TypeError(str(err) + '\n`plot_working_directory` option in '
                            'Sphinx configuration file must be a string or '
                            'None')
        sys.path.insert(0, setup.config.plot_working_directory)
    elif code_path is not None:
        dirname = os.path.abspath(os.path.dirname(code_path))
        os.chdir(dirname)
        sys.path.insert(0, dirname)

    # Redirect stdout
    stdout = sys.stdout
    sys.stdout = io.StringIO()

    # Reset sys.argv
    old_sys_argv = sys.argv
    sys.argv = [code_path]

    try:
        try:
            code = unescape_doctest(code)
            if ns is None:
                ns = {}
            if not ns:
                if setup.config.plot_pre_code is None:
                    exec("import numpy as np\nfrom matplotlib import pyplot as plt\n", ns)
                else:
                    exec(setup.config.plot_pre_code, ns)
            if "__main__" in code:
                exec("__name__ = '__main__'", ns)
            exec(code, ns)
            if function_name is not None:
                exec(function_name + "()", ns)
        except (Exception, SystemExit) as err:
            raise PlotError(traceback.format_exc())
    finally:
        os.chdir(pwd)
        sys.argv = old_sys_argv
        sys.path[:] = old_sys_path
        sys.stdout = stdout
    return ns

Example 125

Project: dms Source File: zone_text_util.py
Function: call
    def __call__(self, zi_data, name=None, reference=None, for_bind=False,
            file=None, no_info_header=False):
        """
        Construct a bind file as a multi-line string, from
        zi_data
        """
        # if zi_data is blank, get out of here...
        if not zi_data:
            return ''
        # Do file/IO house keeping first
        if not file:
            self.file = StringIO()
            return_string = True
        else:
            self.file = file
            return_string = False
        
        # Save bind_p
        self.for_bind = for_bind

        # Set $TTL and $ORIGIN if given
        print ('$TTL %s' % zi_data['zone_ttl'], file=self.file)
        if name:
            print ('$ORIGIN %s' % name, file=self.file)
        print(file=self.file)

        # Add reference comment if reference given
        zi_id = zi_data.get('zi_id')
        zi_change_by = zi_data.get('change_by')
        zi_ctime = zi_data.get('ctime')
        zi_mtime = zi_data.get('mtime')
        zi_ptime = zi_data.get('ptime')
        if not no_info_header and (reference or name or zi_id):
            # Trailing double line feed for readability
            out = ";\n"
            if name:
                out += "; Zone:       %s\n" % name
            if reference:
                out += "; Reference:  %s\n" % reference
            if zi_change_by:
                out += "; change_by:  %s\n" % zi_change_by
            if zi_id:
                out += "; zi_id:      %s\n" % zi_id
            if zi_ctime:
                out += "; zi_ctime:   %s\n" % zi_ctime
            if zi_mtime:
                out += "; zi_mtime:   %s\n" % zi_mtime
            if zi_ptime:
                out += "; zi_ptime:   %s\n" % zi_ptime
            out += ";\n\n"
            print(out, file=self.file)

        # Index rr_groups, for printing
        rr_groups = {}
        flimflam_gid = '' 
        for rr_group in zi_data['rr_groups']:
            group_tag = rr_group.get('tag')
            group_comment = rr_group.get('comment')
            if group_tag == settings['apex_rr_tag']:
                group_id = group_tag
            elif group_comment:
                group_id = group_comment
            elif group_tag:
                group_id = group_tag
            else:
                group_id = str(flimflam_gid)
                if flimflam_gid == '':
                    flimflam_gid = 0
                flimflam_gid += 1
            rr_groups[group_id] = rr_group
        
        # Print Apex Records if there are any
        rr_group = rr_groups.get(settings['apex_rr_tag'])
        if rr_group:
            self.print_rr_group(rr_group, sort_reverse=True, 
                    reference=reference)
            del rr_groups[settings['apex_rr_tag']]

        # Print the rest, followed by default group
        default_group = rr_groups.pop('', None)
        for rr_group in sorted(rr_groups):
            self.print_rr_group(rr_groups[rr_group])
        if default_group:
            self.print_rr_group(default_group)

        # clean up
        if return_string:
            result = self.file.getvalue()
            self.file.close()
            return result

Example 126

Project: BiliDan Source File: bilidan.py
def biligrab(url, *, debug=False, verbose=False, media=None, comment=None, cookie=None, quality=None, source=None, keep_fps=False, mpvflags=[], d2aflags={}, fakeip=None):

    url_get_metadata = 'http://api.bilibili.com/view?'
    url_get_comment = 'http://comment.bilibili.com/%(cid)s.xml'
    if source == 'overseas':
        url_get_media = 'http://interface.bilibili.com/v_cdn_play?'
    else:
        url_get_media = 'http://interface.bilibili.com/playurl?'

    def parse_url(url):
        '''Parse a bilibili.com URL

        Return value: (aid, pid)
        '''
        if url.startswith('cid:'):
            try:
                return int(url[4:]), 'cid'
            except ValueError:
                raise ValueError('Invalid CID: %s' % url[4:])
        regex = re.compile('(?:http:/*[^/]+/(?:video/)?)?av(\\d+)(?:/|/index.html|/index_(\\d+).html)?(?:\\?|#|$)')
        regex_match = regex.match(url)
        if not regex_match:
            raise ValueError('Invalid URL: %s' % url)
        aid = regex_match.group(1)
        pid = regex_match.group(2) or '1'
        return aid, pid

    def fetch_video_metadata(aid, pid):
        '''Fetch video metadata

        Arguments: aid, pid

        Return value: {'cid': cid, 'title': title}
        '''
        req_args = {'type': 'json', 'appkey': codecs.decode(APPKEY,'rot13'), 'id': aid, 'page': pid}
        req_args['sign'] = bilibili_hash(req_args)
        _, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
        # A naive fix (judge if it is -404, I choose '-' :)
        if(response[8] == 45):
            req_args = {'type': 'json', 'appkey': codecs.decode(APPKEY,'rot13'), 'id': aid, 'page': 1}
            req_args['sign'] = bilibili_hash(req_args)
            _, response = fetch_url(url_get_metadata+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_API, cookie=cookie)
        try:
            response = dict(json.loads(response.decode('utf-8', 'replace')))
        except (TypeError, ValueError):
            raise ValueError('Can not get \'cid\' from %s' % url)
        if 'error' in response:
            logging.error('Error message: %s' % response.get('error'))
        if 'cid' not in response:
            raise ValueError('Can not get \'cid\' from %s' % url)
        return response

    def get_media_urls(cid, *, cuem_you_bishi_mode=False):
        '''Request the URLs of the video

        Arguments: cid

        Return value: [media_urls]
        '''
        if source in {None, 'overseas'}:
            user_agent = USER_AGENT_API if not feck_you_bishi_mode else USER_AGENT_PLAYER
            req_args = {'cid': cid}
            if quality is not None:
                req_args['quality'] = quality
            else:
                req_args['quality'] = None
            _, response = fetch_url(url_get_media+andro_mock(req_args), user_agent=user_agent, cookie=cookie, fakeip=fakeip)
            '''
            media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
            '''
            json_obj = json.loads(response.decode('utf-8'))
            if json_obj['result'] != 'suee':  # => Not Success
                raise ValueError('Server returned an error: %s (%s)' % (json_obj['result'], json_obj['code']))
            media_urls = [str(i['url']).strip() for i in json_obj['durl']]
            if not feck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
                logging.error('Detected User-Agent block. Switching to feck-you-bishi mode.')
                return get_media_urls(cid, feck_you_bishi_mode=True)
        elif source == 'html5':
            req_args = {'aid': aid, 'page': pid}
            logging.warning('HTML5 video source is experimental and may not always work.')
            _, response = fetch_url('http://www.bilibili.com/m/html5?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
            response = json.loads(response.decode('utf-8', 'replace'))
            media_urls = [dict.get(response, 'src')]
            if not media_urls[0]:
                media_urls = []
            if not feck_you_bishi_mode and media_urls == ['http://static.hdslb.com/error.mp4']:
                logging.error('Failed to request HTML5 video source. Retrying.')
                return get_media_urls(cid, feck_you_bishi_mode=True)
        elif source == 'flvcd':
            req_args = {'kw': url}
            if quality is not None:
                if quality == 3:
                    req_args['quality'] = 'high'
                elif quality >= 4:
                    req_args['quality'] = 'super'
            _, response = fetch_url('http://www.flvcd.com/parse.php?'+urllib.parse.urlencode(req_args), user_agent=USER_AGENT_PLAYER)
            resp_match = re.search('<input type="hidden" name="inf" value="([^"]+)"', response.decode('gbk', 'replace'))
            if resp_match:
                media_urls = resp_match.group(1).rstrip('|').split('|')
            else:
                media_urls = []
        elif source == 'bilipr':
            req_args = {'cid': cid}
            quality_arg = '1080' if quality is not None and quality >= 4 else '720'
            logging.warning('BilibiliPr video source is experimental and may not always work.')
            resp_obj, response = fetch_url('http://pr.lolly.cc/P%s?%s' % (quality_arg, urllib.parse.urlencode(req_args)), user_agent=USER_AGENT_PLAYER)
            if resp_obj.getheader('Content-Type', '').startswith('text/xml'):
                media_urls = [str(k.wholeText).strip() for i in xml.dom.minidom.parseString(response.decode('utf-8', 'replace')).getElementsByTagName('durl') for j in i.getElementsByTagName('url')[:1] for k in j.childNodes if k.nodeType == 4]
            else:
                media_urls = []
        else:
            assert source in {None, 'overseas', 'html5', 'flvcd', 'bilipr'}
        if len(media_urls) == 0 or media_urls == ['http://static.hdslb.com/error.mp4']:
            raise ValueError('Can not get valid media URLs.')
        return media_urls

    def get_video_size(media_urls):
        '''Determine the resolution of the video

        Arguments: [media_urls]

        Return value: (width, height)
        '''
        try:
            if media_urls[0].startswith('http:') or media_urls[0].startswith('https:'):
                ffprobe_command = ['ffprobe', '-icy', '0', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '-timeout', '60000000', '-user-agent', USER_AGENT_PLAYER, '--', media_urls[0]]
            else:
                ffprobe_command = ['ffprobe', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '--', media_urls[0]]
            log_command(ffprobe_command)
            ffprobe_process = subprocess.Popen(ffprobe_command, stdout=subprocess.PIPE)
            try:
                ffprobe_output = json.loads(ffprobe_process.communicate()[0].decode('utf-8', 'replace'))
            except KeyboardInterrupt:
                logging.warning('Cancelling getting video size, press Ctrl-C again to terminate.')
                ffprobe_process.terminate()
                return 0, 0
            width, height, widthxheight = 0, 0, 0
            for stream in dict.get(ffprobe_output, 'streams') or []:
                if dict.get(stream, 'width')*dict.get(stream, 'height') > widthxheight:
                    width, height = dict.get(stream, 'width'), dict.get(stream, 'height')
            return width, height
        except Exception as e:
            log_or_raise(e, debug=debug)
            return 0, 0

    def convert_comments(cid, video_size):
        '''Convert comments to ASS subtitle format

        Arguments: cid

        Return value: comment_out -> file
        '''
        _, resp_comment = fetch_url(url_get_comment % {'cid': cid}, cookie=cookie)
        comment_in = io.StringIO(resp_comment.decode('utf-8', 'replace'))
        comment_out = tempfile.NamedTemporaryFile(mode='w', encoding='utf-8-sig', newline='\r\n', prefix='tmp-danmaku2ass-', suffix='.ass', delete=False)
        logging.info('Invoking Danmaku2ASS, converting to %s' % comment_out.name)
        d2a_args = dict({'stage_width': video_size[0], 'stage_height': video_size[1], 'font_face': 'SimHei', 'font_size': math.ceil(video_size[1]/21.6), 'text_opacity': 0.8, 'duration_marquee': min(max(6.75*video_size[0]/video_size[1]-4, 3.0), 8.0), 'duration_still': 5.0}, **d2aflags)
        for i, j in ((('stage_width', 'stage_height', 'reserve_blank'), int), (('font_size', 'text_opacity', 'comment_duration', 'duration_still', 'duration_marquee'), float)):
            for k in i:
                if k in d2aflags:
                    d2a_args[k] = j(d2aflags[k])
        try:
            danmaku2ass.Danmaku2ASS(input_files=[comment_in], input_format='Bilibili', output_file=comment_out, **d2a_args)
        except Exception as e:
            log_or_raise(e, debug=debug)
            logging.error('Danmaku2ASS failed, comments are disabled.')
        comment_out.flush()
        comment_out.close()  # Close the temporary file early to fix an issue related to Windows NT file sharing
        return comment_out

    def launch_player(video_metadata, media_urls, comment_out, is_playlist=False, increase_fps=True):
        '''Launch MPV media player

        Arguments: video_metadata, media_urls, comment_out

        Return value: player_exit_code -> int
        '''
        mpv_version_master = tuple(int(i) if i.isdigit() else float('inf') for i in check_env.mpv_version.split('-', 1)[0].split('.'))
        mpv_version_gte_0_10 = mpv_version_master >= (0, 10)
        mpv_version_gte_0_6 = mpv_version_gte_0_10 or mpv_version_master >= (0, 6)
        mpv_version_gte_0_4 = mpv_version_gte_0_6 or mpv_version_master >= (0, 4)
        logging.debug('Compare mpv version: %s %s 0.10' % (check_env.mpv_version, '>=' if mpv_version_gte_0_10 else '<'))
        logging.debug('Compare mpv version: %s %s 0.6' % (check_env.mpv_version, '>=' if mpv_version_gte_0_6 else '<'))
        logging.debug('Compare mpv version: %s %s 0.4' % (check_env.mpv_version, '>=' if mpv_version_gte_0_4 else '<'))
        if increase_fps:  # If hardware decoding (without -copy suffix) is used, do not increase fps
            for i in mpvflags:
                i = i.split('=', 1)
                if 'vdpau' in i or 'vaapi' in i or 'vda' in i:
                    increase_fps = False
                    break
        command_line = ['mpv', '--autofit', '950x540']
        if mpv_version_gte_0_6:
            command_line += ['--cache-file', 'TMP']
        if increase_fps and mpv_version_gte_0_6:  # Drop frames at vo side but not at decoder side to prevent A/V sync issues
            command_line += ['--framedrop', 'vo']
        command_line += ['--http-header-fields', 'User-Agent: '+USER_AGENT_PLAYER.replace(',', '\\,')]
        if mpv_version_gte_0_6:
            if mpv_version_gte_0_10:
                command_line += ['--force-media-title', video_metadata.get('title', url)]
            else:
                command_line += ['--media-title', video_metadata.get('title', url)]
        if is_playlist or len(media_urls) > 1:
            command_line += ['--merge-files']
        if mpv_version_gte_0_4:
            command_line += ['--no-video-aspect', '--sub-ass', '--sub-file', comment_out.name]
        else:
            command_line += ['--no-aspect', '--ass', '--sub', comment_out.name]
        if increase_fps:
            if mpv_version_gte_0_6:
                command_line += ['--vf', 'lavfi="fps=fps=60:round=down"']
            else:  # Versions < 0.6 have an A/V sync related issue
                command_line += ['--vf', 'lavfi="fps=fps=50:round=down"']
        command_line += mpvflags
        if is_playlist:
            command_line += ['--playlist']
        else:
            command_line += ['--']
        command_line += media_urls
        log_command(command_line)
        player_process = subprocess.Popen(command_line)
        try:
            player_process.wait()
        except KeyboardInterrupt:
            logging.info('Terminating media player...')
            try:
                player_process.terminate()
                try:
                    player_process.wait(timeout=2)
                except subprocess.TimeoutExpired:
                    logging.info('Killing media player by force...')
                    player_process.kill()
            except Exception:
                pass
            raise
        return player_process.returncode

    aid, pid = parse_url(url)

    logging.info('Loading video info...')
    if pid != 'cid':
        video_metadata = fetch_video_metadata(aid, pid)
    else:
        video_metadata = {'cid': aid, 'title': url}
    logging.info('Got video cid: %s' % video_metadata['cid'])

    logging.info('Loading video content...')
    if media is None:
        media_urls = get_media_urls(video_metadata['cid'])
    else:
        media_urls = [media]
    logging.info('Got media URLs:'+''.join(('\n      %d: %s' % (i+1, j) for i, j in enumerate(media_urls))))

    logging.info('Determining video resolution...')
    video_size = get_video_size(media_urls)
    logging.info('Video resolution: %sx%s' % video_size)
    if video_size[0] > 0 and video_size[1] > 0:
        video_size = (video_size[0]*1080/video_size[1], 1080)  # Simply fix ASS resolution to 1080p
    else:
        log_or_raise(ValueError('Can not get video size. Comments may be wrongly positioned.'), debug=debug)
        video_size = (1920, 1080)

    logging.info('Loading comments...')
    if comment is None:
        comment_out = convert_comments(video_metadata['cid'], video_size)
    else:
        comment_out = open(comment, 'r')
        comment_out.close()

    logging.info('Launching media player...')
    player_exit_code = launch_player(video_metadata, media_urls, comment_out, increase_fps=not keep_fps)

    if comment is None and player_exit_code == 0:
        os.remove(comment_out.name)

    return player_exit_code

Example 127

Project: ansible Source File: test_connection_ssh.py
    def test_plugins_connection_ssh__examine_output(self):
        pc = PlayContext()
        new_stdin = StringIO()

        conn = ssh.Connection(pc, new_stdin)

        conn.check_password_prompt    = MagicMock()
        conn.check_become_success     = MagicMock()
        conn.check_incorrect_password = MagicMock()
        conn.check_missing_password   = MagicMock()

        def _check_password_prompt(line):
            if b'foo' in line:
                return True
            return False

        def _check_become_success(line):
            if b'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line:
                return True
            return False

        def _check_incorrect_password(line):
            if b'incorrect password' in line:
                return True
            return False

        def _check_missing_password(line):
            if b'bad password' in line:
                return True
            return False

        conn.check_password_prompt.side_effect    = _check_password_prompt
        conn.check_become_success.side_effect     = _check_become_success
        conn.check_incorrect_password.side_effect = _check_incorrect_password
        conn.check_missing_password.side_effect   = _check_missing_password

        # test examining output for prompt
        conn._flags = dict(
            become_prompt = False,
            become_success = False,
            become_error = False,
            become_nopasswd_error = False,
        )

        pc.prompt = True
        output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False)
        self.assertEqual(output, b'line 1\nline 2\nline 3\n')
        self.assertEqual(unprocessed, b'this should be the remainder')
        self.assertTrue(conn._flags['become_prompt'])
        self.assertFalse(conn._flags['become_success'])
        self.assertFalse(conn._flags['become_error'])
        self.assertFalse(conn._flags['become_nopasswd_error'])

        # test examining output for become prompt
        conn._flags = dict(
            become_prompt = False,
            become_success = False,
            become_error = False,
            become_nopasswd_error = False,
        )

        pc.prompt = False
        pc.success_key = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
        output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False)
        self.assertEqual(output, b'line 1\nline 2\nline 3\n')
        self.assertEqual(unprocessed, b'')
        self.assertFalse(conn._flags['become_prompt'])
        self.assertTrue(conn._flags['become_success'])
        self.assertFalse(conn._flags['become_error'])
        self.assertFalse(conn._flags['become_nopasswd_error'])

        # test examining output for become failure
        conn._flags = dict(
            become_prompt = False,
            become_success = False,
            become_error = False,
            become_nopasswd_error = False,
        )

        pc.prompt = False
        pc.success_key = None
        output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nincorrect password\n', True)
        self.assertEqual(output, b'line 1\nline 2\nincorrect password\n')
        self.assertEqual(unprocessed, b'')
        self.assertFalse(conn._flags['become_prompt'])
        self.assertFalse(conn._flags['become_success'])
        self.assertTrue(conn._flags['become_error'])
        self.assertFalse(conn._flags['become_nopasswd_error'])

        # test examining output for missing password
        conn._flags = dict(
            become_prompt = False,
            become_success = False,
            become_error = False,
            become_nopasswd_error = False,
        )

        pc.prompt = False
        pc.success_key = None
        output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nbad password\n', True)
        self.assertEqual(output, b'line 1\nbad password\n')
        self.assertEqual(unprocessed, b'')
        self.assertFalse(conn._flags['become_prompt'])
        self.assertFalse(conn._flags['become_success'])
        self.assertFalse(conn._flags['become_error'])
        self.assertTrue(conn._flags['become_nopasswd_error'])

Example 128

Project: html5lib-python Source File: test_stream.py
@pytest.mark.parametrize("inp,num",
                         [("\u0000", 0),
                          ("\u0001", 1),
                          ("\u0008", 1),
                          ("\u0009", 0),
                          ("\u000A", 0),
                          ("\u000B", 1),
                          ("\u000C", 0),
                          ("\u000D", 0),
                          ("\u000E", 1),
                          ("\u001F", 1),
                          ("\u0020", 0),
                          ("\u007E", 0),
                          ("\u007F", 1),
                          ("\u009F", 1),
                          ("\u00A0", 0),
                          ("\uFDCF", 0),
                          ("\uFDD0", 1),
                          ("\uFDEF", 1),
                          ("\uFDF0", 0),
                          ("\uFFFD", 0),
                          ("\uFFFE", 1),
                          ("\uFFFF", 1),
                          ("\U0001FFFD", 0),
                          ("\U0001FFFE", 1),
                          ("\U0001FFFF", 1),
                          ("\U0002FFFD", 0),
                          ("\U0002FFFE", 1),
                          ("\U0002FFFF", 1),
                          ("\U0003FFFD", 0),
                          ("\U0003FFFE", 1),
                          ("\U0003FFFF", 1),
                          ("\U0004FFFD", 0),
                          ("\U0004FFFE", 1),
                          ("\U0004FFFF", 1),
                          ("\U0005FFFD", 0),
                          ("\U0005FFFE", 1),
                          ("\U0005FFFF", 1),
                          ("\U0006FFFD", 0),
                          ("\U0006FFFE", 1),
                          ("\U0006FFFF", 1),
                          ("\U0007FFFD", 0),
                          ("\U0007FFFE", 1),
                          ("\U0007FFFF", 1),
                          ("\U0008FFFD", 0),
                          ("\U0008FFFE", 1),
                          ("\U0008FFFF", 1),
                          ("\U0009FFFD", 0),
                          ("\U0009FFFE", 1),
                          ("\U0009FFFF", 1),
                          ("\U000AFFFD", 0),
                          ("\U000AFFFE", 1),
                          ("\U000AFFFF", 1),
                          ("\U000BFFFD", 0),
                          ("\U000BFFFE", 1),
                          ("\U000BFFFF", 1),
                          ("\U000CFFFD", 0),
                          ("\U000CFFFE", 1),
                          ("\U000CFFFF", 1),
                          ("\U000DFFFD", 0),
                          ("\U000DFFFE", 1),
                          ("\U000DFFFF", 1),
                          ("\U000EFFFD", 0),
                          ("\U000EFFFE", 1),
                          ("\U000EFFFF", 1),
                          ("\U000FFFFD", 0),
                          ("\U000FFFFE", 1),
                          ("\U000FFFFF", 1),
                          ("\U0010FFFD", 0),
                          ("\U0010FFFE", 1),
                          ("\U0010FFFF", 1),
                          ("\x01\x01\x01", 3),
                          ("a\x01a\x01a\x01a", 3)])
def test_invalid_codepoints(inp, num):
    stream = HTMLUnicodeInputStream(StringIO(inp))
    for _i in range(len(inp)):
        stream.char()
    assert len(stream.errors) == num

Example 129

Project: btrfs-sxbackup Source File: core.py
    def write_configuration(self, corresponding_location: 'JobLocation'):
        """ Write configuration file to container subvolume 
        :type corresponding_location: JobLocation
        """
        if not self.location_type:
            raise ValueError('missing location type')

        if corresponding_location:
            if not corresponding_location.location_type:
                raise ValueError('missing corresponding location type')

            if self.location_type == corresponding_location.location_type:
                raise ValueError('invalid corresponding lcoation type [%s] for this location [%s]'
                                 % (corresponding_location, self.location_type))

            if self.uuid != corresponding_location.uuid:
                raise ValueError('corresponding location has different uuid [%s != %s]'
                                 % (self.uuid, corresponding_location.uuid))

        location_uuid = self.uuid
        source = None
        source_container = None
        destination = None
        retention = self.retention.expression_text if self.retention else None
        compress = self.compress

        # Set configuration fields to write
        both_remote_or_local = not (
            self.is_remote() ^ (corresponding_location is not None and corresponding_location.is_remote()))

        if self.location_type == JobLocation.TYPE_SOURCE:
            if both_remote_or_local:
                source = self.url.geturl()
                source_container = self.container_subvolume_relpath

            if corresponding_location and (both_remote_or_local or corresponding_location.is_remote()):
                destination = corresponding_location.url.geturl()

        elif self.location_type == JobLocation.TYPE_DESTINATION:
            if both_remote_or_local:
                destination = self.url.geturl()

            if corresponding_location and (both_remote_or_local or corresponding_location.is_remote()):
                source = corresponding_location.url.geturl()
                source_container = corresponding_location.container_subvolume_relpath

        # Configuration to string
        fileobject = io.StringIO()

        parser = ConfigParser()

        section = self.location_type
        parser.add_section(section)

        if location_uuid:
            parser.set(section, self.__KEY_UUID, str(location_uuid))
        if source:
            parser.set(section, self.__KEY_SOURCE, str(source))
        if source_container:
            parser.set(section, self.__KEY_SOURCE_CONTAINER, source_container)
        if destination:
            parser.set(section, self.__KEY_DESTINATION, str(destination))
        if retention:
            parser.set(section, self.__KEY_RETENTION, str(retention))
        if compress:
            parser.set(section, self.__KEY_COMPRESS, str(compress))
        parser.write(fileobject)

        config_str = fileobject.getvalue()

        # Write config file to location directory
        p = subprocess.Popen(self.build_subprocess_args('cat > "%s"' % self.configuration_filename),
                             stdin=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        (out, err) = p.communicate(input=bytes(config_str, 'utf-8'))
        if p.wait():
            raise subprocess.CalledProcessError(returncode=p.returncode, cmd=p.args, output=out)

Example 130

Project: nikola Source File: serve.py
Function: send_head
    def send_head(self):
        """Common code for GET and HEAD commands.

        This sends the response code and MIME headers.

        Return value is either a file object (which has to be copied
        to the outputfile by the caller unless the command was HEAD,
        and must be closed by the caller under all circuemstances), or
        None, in which case the caller has nothing further to do.

        """
        path = self.translate_path(self.path)
        f = None
        if os.path.isdir(path):
            if not self.path.endswith('/'):
                # redirect browser - doing basically what apache does
                self.send_response(301)
                self.send_header("Location", self.path + "/")
                # begin no-cache patch
                # For redirects.  With redirects, caching is even worse and can
                # break more.  Especially with 301 Moved Permanently redirects,
                # like this one.
                self.send_header("Cache-Control", "no-cache, no-store, "
                                 "must-revalidate")
                self.send_header("Pragma", "no-cache")
                self.send_header("Expires", "0")
                # end no-cache patch
                self.end_headers()
                return None
            for index in "index.html", "index.htm":
                index = os.path.join(path, index)
                if os.path.exists(index):
                    path = index
                    break
            else:
                return self.list_directory(path)
        ctype = self.guess_type(path)
        try:
            # Always read in binary mode. Opening files in text mode may cause
            # newline translations, making the actual size of the content
            # transmitted *less* than the content-length!
            f = open(path, 'rb')
        except IOError:
            self.send_error(404, "File not found")
            return None

        filtered_bytes = None
        if ctype == 'text/html':
            # Comment out any <base> to allow local resolution of relative URLs.
            data = f.read().decode('utf8')
            f.close()
            data = re.sub(r'<base\s([^>]*)>', '<!--base \g<1>-->', data, re.IGNORECASE)
            data = data.encode('utf8')
            f = StringIO()
            f.write(data)
            filtered_bytes = len(data)
            f.seek(0)

        self.send_response(200)
        if ctype.startswith('text/') or ctype.endswith('+xml'):
            self.send_header("Content-Type", "{0}; charset=UTF-8".format(ctype))
        else:
            self.send_header("Content-Type", ctype)
        if os.path.splitext(path)[1] == '.svgz':
            # Special handling for svgz to make it work nice with browsers.
            self.send_header("Content-Encoding", 'gzip')

        if filtered_bytes is None:
            fs = os.fstat(f.fileno())
            self.send_header('Content-Length', str(fs[6]))
        else:
            self.send_header('Content-Length', filtered_bytes)

        # begin no-cache patch
        # For standard requests.
        self.send_header("Cache-Control", "no-cache, no-store, "
                         "must-revalidate")
        self.send_header("Pragma", "no-cache")
        self.send_header("Expires", "0")
        # end no-cache patch
        self.end_headers()
        return f

Example 131

Project: django-simple-captcha Source File: views.py
def captcha_image(request, key, scale=1):
    try:
        store = CaptchaStore.objects.get(hashkey=key)
    except CaptchaStore.DoesNotExist:
        # HTTP 410 Gone status so that crawlers don't index these expired urls.
        return HttpResponse(status=410)

    text = store.challenge

    if isinstance(settings.CAPTCHA_FONT_PATH, six.string_types):
        fontpath = settings.CAPTCHA_FONT_PATH
    elif isinstance(settings.CAPTCHA_FONT_PATH, (list, tuple)):
        fontpath = random.choice(settings.CAPTCHA_FONT_PATH)
    else:
        raise ImproperlyConfigured('settings.CAPTCHA_FONT_PATH needs to be a path to a font or list of paths to fonts')

    if fontpath.lower().strip().endswith('ttf'):
        font = ImageFont.truetype(fontpath, settings.CAPTCHA_FONT_SIZE * scale)
    else:
        font = ImageFont.load(fontpath)

    if settings.CAPTCHA_IMAGE_SIZE:
        size = settings.CAPTCHA_IMAGE_SIZE
    else:
        size = getsize(font, text)
        size = (size[0] * 2, int(size[1] * 1.4))

    image = makeimg(size)

    try:
        PIL_VERSION = int(NON_DIGITS_RX.sub('', Image.VERSION))
    except:
        PIL_VERSION = 116
    xpos = 2

    charlist = []
    for char in text:
        if char in settings.CAPTCHA_PUNCTUATION and len(charlist) >= 1:
            charlist[-1] += char
        else:
            charlist.append(char)
    for char in charlist:
        fgimage = Image.new('RGB', size, settings.CAPTCHA_FOREGROUND_COLOR)
        charimage = Image.new('L', getsize(font, ' %s ' % char), '#000000')
        chardraw = ImageDraw.Draw(charimage)
        chardraw.text((0, 0), ' %s ' % char, font=font, fill='#ffffff')
        if settings.CAPTCHA_LETTER_ROTATION:
            if PIL_VERSION >= 116:
                charimage = charimage.rotate(random.randrange(*settings.CAPTCHA_LETTER_ROTATION), expand=0, resample=Image.BICUBIC)
            else:
                charimage = charimage.rotate(random.randrange(*settings.CAPTCHA_LETTER_ROTATION), resample=Image.BICUBIC)
        charimage = charimage.crop(charimage.getbbox())
        maskimage = Image.new('L', size)

        maskimage.paste(charimage, (xpos, from_top, xpos + charimage.size[0], from_top + charimage.size[1]))
        size = maskimage.size
        image = Image.composite(fgimage, image, maskimage)
        xpos = xpos + 2 + charimage.size[0]

    if settings.CAPTCHA_IMAGE_SIZE:
        # centering captcha on the image
        tmpimg = makeimg(size)
        tmpimg.paste(image, (int((size[0] - xpos) / 2), int((size[1] - charimage.size[1]) / 2 - from_top)))
        image = tmpimg.crop((0, 0, size[0], size[1]))
    else:
        image = image.crop((0, 0, xpos + 1, size[1]))
    draw = ImageDraw.Draw(image)

    for f in settings.noise_functions():
        draw = f(draw, image)
    for f in settings.filter_functions():
        image = f(image)

    out = StringIO()
    image.save(out, "PNG")
    out.seek(0)

    response = HttpResponse(content_type='image/png')
    response.write(out.read())
    response['Content-length'] = out.tell()

    return response

Example 132

Project: ironpython3 Source File: config.py
def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None):
    """
    Start up a socket server on the specified port, and listen for new
    configurations.

    These will be sent as a file suitable for processing by fileConfig().
    Returns a Thread object on which you can call start() to start the server,
    and which you can join() when appropriate. To stop the server, call
    stopListening().

    Use the ``verify`` argument to verify any bytes received across the wire
    from a client. If specified, it should be a callable which receives a
    single argument - the bytes of configuration data received across the
    network - and it should return either ``None``, to indicate that the
    passed in bytes could not be verified and should be discarded, or a
    byte string which is then passed to the configuration machinery as
    normal. Note that you can return transformed bytes, e.g. by decrypting
    the bytes passed in.
    """
    if not thread: #pragma: no cover
        raise NotImplementedError("listen() needs threading to work")

    class ConfigStreamHandler(StreamRequestHandler):
        """
        Handler for a logging configuration request.

        It expects a completely new logging configuration and uses fileConfig
        to install it.
        """
        def handle(self):
            """
            Handle a request.

            Each request is expected to be a 4-byte length, packed using
            struct.pack(">L", n), followed by the config file.
            Uses fileConfig() to do the grunt work.
            """
            try:
                conn = self.connection
                chunk = conn.recv(4)
                if len(chunk) == 4:
                    slen = struct.unpack(">L", chunk)[0]
                    chunk = self.connection.recv(slen)
                    while len(chunk) < slen:
                        chunk = chunk + conn.recv(slen - len(chunk))
                    if self.server.verify is not None:
                        chunk = self.server.verify(chunk)
                    if chunk is not None:   # verified, can process
                        chunk = chunk.decode("utf-8")
                        try:
                            import json
                            d =json.loads(chunk)
                            assert isinstance(d, dict)
                            dictConfig(d)
                        except Exception:
                            #Apply new configuration.

                            file = io.StringIO(chunk)
                            try:
                                fileConfig(file)
                            except Exception:
                                traceback.print_exc()
                    if self.server.ready:
                        self.server.ready.set()
            except OSError as e:
                if not isinstance(e.args, tuple):
                    raise
                else:
                    errcode = e.args[0]
                    if errcode != RESET_ERROR:
                        raise

    class ConfigSocketReceiver(ThreadingTCPServer):
        """
        A simple TCP socket-based logging config receiver.
        """

        allow_reuse_address = 1

        def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
                     handler=None, ready=None, verify=None):
            ThreadingTCPServer.__init__(self, (host, port), handler)
            logging._acquireLock()
            self.abort = 0
            logging._releaseLock()
            self.timeout = 1
            self.ready = ready
            self.verify = verify

        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()],
                                           [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()
            self.socket.close()

    class Server(threading.Thread):

        def __init__(self, rcvr, hdlr, port, verify):
            super(Server, self).__init__()
            self.rcvr = rcvr
            self.hdlr = hdlr
            self.port = port
            self.verify = verify
            self.ready = threading.Event()

        def run(self):
            server = self.rcvr(port=self.port, handler=self.hdlr,
                               ready=self.ready,
                               verify=self.verify)
            if self.port == 0:
                self.port = server.server_address[1]
            self.ready.set()
            global _listener
            logging._acquireLock()
            _listener = server
            logging._releaseLock()
            server.serve_until_stopped()

    return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify)

Example 133

Project: android2po Source File: helpers.py
Function: program
    def program(self, command=None, kwargs={}, expect=None):
        """Run android2po in this project's working directory.

        Return the program output.
        """
        args = ['a2po-test']
        if command:
            args.append(command)
        for k, v in kwargs.items():
            if v is True or not v:
                args.append(k)
            else:
                if not isinstance(v, (list, tuple)):
                    # A tuple may be used to pass the same argument multiple
                    # times with different values.
                    v = [v]
                for w in v:
                    if isinstance(w, (list, tuple)):
                        # This is starting to get messy, but this allows the
                        # caller to generate "--arg val1 val2" by passing as
                        # the dict: {'arg': [['val1', 'val2']]}
                        args.append('%s' % k)
                        args.extend(w)
                    else:
                        # Otherwise, we set a single value, and we use "=",
                        # so that arguments that are defined as nargs='+'
                        # will not capture more than the value "w".
                        args.append("%s=%s" % (k, w))

        try:
            old_cwd = os.getcwd()
        except OSError as exc:
            old_cwd = None
        os.chdir(self.dir)
        # Sometimes we might want to check a certain message was printed
        # out, so in addition to having nose capture the output, we
        # want to as well.
        old_stdout = sys.stdout
        stdout_capture = io.StringIO()
        sys.stdout = Tee(sys.stdout, stdout_capture)
        # argparse likes to write to stderr, let it be handled like
        # normal stdout (i.e. captured by nose as well as us).
        old_stderr = sys.stderr
        sys.stderr = sys.stdout
        try:
            try:
                print("Running: %s" % " ".join(args))
                ret = a2po.main(args)
            except SystemExit as e:
                # argparse likes to raise this if arguments are invalid.
                raise SystemExitCaught('SystemExit raised by program: %s', e)
            else:
                if expect is not None:
                    if ret != expect:
                        raise ValueError(
                            'Program returned code %d, expected %d' % (
                                ret, expect))
                elif ret:
                    raise NonZeroReturned('Program returned non-zero: %d', ret)
                return stdout_capture.getvalue()
        finally:
            if old_cwd is not None:
                os.chdir(old_cwd)
            sys.stdout = old_stdout
            sys.stderr = old_stderr

Example 134

Project: portage-funtoo Source File: EbuildFetcher.py
	def already_fetched(self, settings):
		"""
		Returns True if all files already exist locally and have correct
		digests, otherwise return False. When returning True, appropriate
		digest checking messages are produced for display and/or logging.
		When returning False, no messages are produced, since we assume
		that a fetcher process will later be executed in order to produce
		such messages. This will raise InvalidDependString if SRC_URI is
		invalid.
		"""

		uri_map = self._get_uri_map()
		if not uri_map:
			return True

		digests = self._get_digests()
		distdir = settings["DISTDIR"]
		allow_missing = self._get_manifest().allow_missing

		for filename in uri_map:
			# Use stat rather than lstat since fetch() creates
			# symlinks when PORTAGE_RO_DISTDIRS is used.
			try:
				st = os.stat(os.path.join(distdir, filename))
			except OSError:
				return False
			if st.st_size == 0:
				return False
			expected_size = digests.get(filename, {}).get('size')
			if expected_size is None:
				continue
			if st.st_size != expected_size:
				return False

		hash_filter = _hash_filter(settings.get("PORTAGE_CHECKSUM_FILTER", ""))
		if hash_filter.transparent:
			hash_filter = None
		stdout_orig = sys.stdout
		stderr_orig = sys.stderr
		global_havecolor = portage.output.havecolor
		out = io.StringIO()
		eout = portage.output.EOutput()
		eout.quiet = settings.get("PORTAGE_QUIET") == "1"
		success = True
		try:
			sys.stdout = out
			sys.stderr = out
			if portage.output.havecolor:
				portage.output.havecolor = not self.background

			for filename in uri_map:
				mydigests = digests.get(filename)
				if mydigests is None:
					if not allow_missing:
						success = False
						break
					continue
				ok, st = _check_distfile(os.path.join(distdir, filename),
					mydigests, eout, show_errors=False, hash_filter=hash_filter)
				if not ok:
					success = False
					break
		except portage.exception.FileNotFound:
			# A file disappeared unexpectedly.
			return False
		finally:
			sys.stdout = stdout_orig
			sys.stderr = stderr_orig
			portage.output.havecolor = global_havecolor

		if success:
			# When returning unsuccessfully, no messages are produced, since
			# we assume that a fetcher process will later be executed in order
			# to produce such messages.
			msg = out.getvalue()
			if msg:
				self.scheduler.output(msg, log_path=self.logfile)

		return success

Example 135

Project: openmc Source File: reaction.py
Function: get_products
def _get_products(ev, mt):
    """Generate products from MF=6 in an ENDF evaluation

    Parameters
    ----------
    ev : openmc.data.endf.Evaluation
        ENDF evaluation to read from
    mt : int
        The MT value of the reaction to get products for

    Returns
    -------
    products : list of openmc.data.Product
        Products of the reaction

    """
    file_obj = StringIO(ev.section[6, mt])

    # Read HEAD record
    items = get_head_record(file_obj)
    reference_frame = {1: 'laboratory', 2: 'center-of-mass',
                       3: 'light-heavy'}[items[3]]
    n_products = items[4]

    products = []
    for i in range(n_products):
        # Get yield for this product
        params, yield_ = get_tab1_record(file_obj)

        za = params[0]
        awr = params[1]
        lip = params[2]
        law = params[3]

        if za == 0:
            p = Product('photon')
        elif za == 1:
            p = Product('neutron')
        elif za == 1000:
            p = Product('electron')
        else:
            z = za // 1000
            a = za % 1000
            p = Product('{}{}'.format(ATOMIC_SYMBOL[z], a))

        p.yield_ = yield_

        """
        # Set reference frame
        if reference_frame == 'laboratory':
            p.center_of_mass = False
        elif reference_frame == 'center-of-mass':
            p.center_of_mass = True
        elif reference_frame == 'light-heavy':
            p.center_of_mass = (awr <= 4.0)
        """

        if law == 0:
            # No distribution given
            pass
        if law == 1:
            # Continuum energy-angle distribution

            # Peak ahead to determine type of distribution
            position = file_obj.tell()
            params = get_cont_record(file_obj)
            file_obj.seek(position)

            lang = params[2]
            if lang == 1:
                p.distribution = [CorrelatedAngleEnergy.from_endf(file_obj)]
            elif lang == 2:
                p.distribution = [KalbachMann.from_endf(file_obj)]

        elif law == 2:
            # Discrete two-body scattering
            params, tab2 = get_tab2_record(file_obj)
            ne = params[5]
            energy = np.zeros(ne)
            mu = []
            for i in range(ne):
                items, values = get_list_record(file_obj)
                energy[i] = items[1]
                lang = items[2]
                if lang == 0:
                    mu.append(Legendre(values))
                elif lang == 12:
                    mu.append(Tabular(values[::2], values[1::2]))
                elif lang == 14:
                    mu.append(Tabular(values[::2], values[1::2],
                                      'log-linear'))

            angle_dist = AngleDistribution(energy, mu)
            dist = UncorrelatedAngleEnergy(angle_dist)
            p.distribution = [dist]
            # TODO: Add level-inelastic info?

        elif law == 3:
            # Isotropic discrete emission
            p.distribution = [UncorrelatedAngleEnergy()]
            # TODO: Add level-inelastic info?

        elif law == 4:
            # Discrete two-body recoil
            pass

        elif law == 5:
            # Charged particle elastic scattering
            pass

        elif law == 6:
            # N-body phase-space distribution
            p.distribution = [NBodyPhaseSpace.from_endf(file_obj)]

        elif law == 7:
            # Laboratory energy-angle distribution
            p.distribution = [LaboratoryAngleEnergy.from_endf(file_obj)]

        products.append(p)

    return products

Example 136

Project: anaconda-client Source File: requests_ext.py
def encode_multipart_formdata_stream(fields, boundary=None):
    """
    Encode a dictionary of ``fields`` using the multipart/form-data MIME format.

    :param fields:
        Dictionary of fields or list of (key, value) or (key, value, MIME type)
        field tuples.  The key is treated as the field name, and the value as
        the body of the form-data bytes. If the value is a tuple of two
        elements, then the first element is treated as the filename of the
        form-data section and a suitable MIME type is guessed based on the
        filename. If the value is a tuple of three elements, then the third
        element is treated as an explicit MIME type of the form-data section.

        Field names and filenames must be unicode.

    :param boundary:
        If not specified, then a random boundary will be generated using
        :func:`mimetools.choose_boundary`.
    """
    body = []
    def body_write(item):
        if isinstance(item, bytes):
            item = BytesIO(item)
        elif isinstance(item, (str, unicode)):
            item = StringIO(item)
        body.append(item)

    body_write_encode = lambda item: body.append(BytesIO(item.encode('utf-8')))

    if boundary is None:
        boundary = choose_boundary()

    for fieldname, value in iter_fields(fields):
        body_write_encode('--%s\r\n' % (boundary))

        if isinstance(value, tuple):
            if len(value) == 3:
                filename, data, content_type = value
            else:
                filename, data = value
                from mimetypes import guess_type
                content_type, _ = guess_type(filename)
                if content_type is None:
                    content_type = 'application/octet-stream'
            body_write_encode('Content-Disposition: form-data; name="%s"; '
                              'filename="%s"\r\n' % (fieldname, filename))
            body_write_encode('Content-Type: %s\r\n\r\n' %
                              (content_type,))
        else:
            data = value
            body_write_encode('Content-Disposition: form-data; name="%s"\r\n'
                               % (fieldname))
            body_write(b'\r\n')

        if isinstance(data, (int, long)):
            data = str(data)  # Backwards compatibility

        if isinstance(data, six.text_type):
            body_write_encode(data)
        else:
            body_write(data)

        body_write(b'\r\n')

    body_write_encode('--%s--\r\n' % (boundary))

    content_type = 'multipart/form-data; boundary=%s' % (boundary)

    return body, content_type

Example 137

Project: Arelle Source File: CntlrQuickBooks.py
def processQbResponse(qbRequest, responseXml):
    from arelle import ModelXbrl, XbrlConst
    from arelle.ModelValue import qname
    ticket = qbRequest["ticket"]
    qbRequestStatus[ticket] = _("Generating XBRL-GL from QuickBooks response")
    qbReport = qbRequest["request"]
    xbrlFile = qbRequest["xbrlFile"]
    fromDate = qbRequest["fromDate"]
    toDate = qbRequest["toDate"]
    strHCPResponse = qbRequest.get("strHCPResponse", "")
    
    # uncomment to dump out QB responses
    '''
    with open("c:/temp/test.xml", "w") as fh:
        fh.write(responseXml)
    with open("c:/temp/testC.xml", "w") as fh:
        fh.write(strHCPResponse)
    # qb responses dump
    '''
    
    companyQbDoc = etree.parse(io.StringIO(initial_value=strHCPResponse))
    responseQbDoc = etree.parse(io.StringIO(initial_value=responseXml))
    # columns table
    colTypeId = {}
    colIdType = {}
    for colDescElt in responseQbDoc.iter("ColDesc"):
        colTypeElt = colDescElt.find("ColType")
        if colTypeElt is not None:
            colID = colDescElt.get("colID")
            colType = colTypeElt.text
            if colType == "Amount": # check if there's a credit or debit colTitle
                for colTitleElt in colDescElt.iter("ColTitle"):
                    title = colTitleElt.get("value")
                    if title in ("Credit", "Debit"):
                        colType = title
                        break
            colTypeId[colType] = colID
            colIdType[colID] = colType
    
    # open new result instance docuement

    # load GL palette file (no instance)
    instance = cntlr.modelManager.load("http://www.xbrl.org/taxonomy/int/gl/2006-10-25/plt/case-c-b-m-u-t/gl-plt-2006-10-25.xsd")
    if xbrlFile is None:
        xbrlFile = "sampleInstance.xbrl"
        saveInstance = False
    else:
        saveInstance = True
    instance.createInstance(xbrlFile) # creates an instance as this modelXbrl's entrypoing
    newCntx = instance.createContext("http://www.xbrl.org/xbrlgl/sample", "SAMPLE", 
                  "instant", None, datetime.date.today() + datetime.timedelta(1), # today midnight
                  None, {}, [], [], afterSibling=ModelXbrl.AUTO_LOCATE_ELEMENT)
    
    monetaryUnit = qname(XbrlConst.iso4217, "iso4217:USD")
    newUnit = instance.createUnit([monetaryUnit],[], afterSibling=ModelXbrl.AUTO_LOCATE_ELEMENT)
    
    nonNumAttr = [("contextRef", newCntx.id)]
    monetaryAttr = [("contextRef", newCntx.id), ("unitRef", newUnit.id), ("decimals", "2")]

    isoLanguage = qname("{http://www.xbrl.org/2005/iso639}iso639:en")
    
    # root of GL is accounting entries tuple
    xbrlElt = instance.modelDocuement.xmlRootElement
    
    '''The container for XBRL GL, accountingEntries, is not the root of an XBRL GL file - the root, 
    as with all XBRL files, is xbrl. This means that a single XBRL GL file can store one or more 
    virtual XBRL GL files, through one or more accountingEntries structures with data inside. 
    The primary key to understanding an XBRL GL file is the entriesType. A single physical XBRL GL 
    file can have multiple accountingEntries structures to represent both transactions and 
    master files; the differences are signified by the appropriate entriesType enumerated values.'''
    accountingEntries = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountingEntries"))
    
    # Because entriesType is strongly suggested, docuementInfo will be required
    docInfo = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:docuementInfo"), parent=accountingEntries)
    # This field, entriesType, provides the automated guidance on the purpose of the XBRL GL information.
    instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entriesType"), parent=docInfo, attributes=nonNumAttr, 
                        text=glEntriesType[qbReport])
    '''Like a serial number, this field, uniqueID, provides a place to uniquely identify/track 
    a series of entries. It is like less relevant for ad-hoc reports. XBRL GL provides for later 
    correction through replacement or augmentation of transferred information.'''
    instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:uniqueID"), parent=docInfo, attributes=nonNumAttr, 
                        text="001")
    instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:language"), parent=docInfo, attributes=nonNumAttr, 
                        text=XmlUtil.addQnameValue(xbrlElt, isoLanguage))
    '''The date associated with the creation of the data reflected within the associated 
    accountingEntries section. Somewhat like a "printed date" on a paper report'''
    instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:creationDate"), parent=docInfo, attributes=nonNumAttr, 
                        text=str(datetime.date.today()))
    instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:periodCoveredStart"), parent=docInfo, attributes=nonNumAttr, 
                        text=fromDate)
    instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:periodCoveredEnd"), parent=docInfo, attributes=nonNumAttr, 
                        text=toDate)
    instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:sourceApplication"), parent=docInfo, attributes=nonNumAttr, 
                        text=docEltText(companyQbDoc, "ProductName","QuickBooks (version not known)"))
    instance.createFact(qname("{http://www.xbrl.org/int/gl/muc/2006-10-25}gl-muc:defaultCurrency"), parent=docInfo, attributes=nonNumAttr, 
                        text=XmlUtil.addQnameValue(xbrlElt, monetaryUnit))
    
    '''Typically, an export from an accounting system does not carry with it information 
    specifically about the company. However, the name of the company would be a very good 
    thing to include with the file, making the entityInformation tuple necessary.'''
    entityInfo = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entityInformation"), parent=accountingEntries)
    '''The name of the company would be a very good thing to include with the file; 
    this structure and its content are where that would be stored.'''
    orgIds = instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:organizationIdentifiers"), parent=entityInfo)
    instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:organizationIdentifier"), parent=orgIds, attributes=nonNumAttr, 
                        text=docEltText(companyQbDoc, "CompanyName"))
    instance.createFact(qname("{http://www.xbrl.org/int/gl/bus/2006-10-25}gl-bus:organizationDescription"), parent=orgIds, attributes=nonNumAttr, 
                        text=docEltText(companyQbDoc, "LegalCompanyName"))
    
    if qbReport == "trialBalance":
        qbTxnType = "trialbalance"
    else:
        qbTxnType = None
    qbTxnNumber = None
    qbDate = None
    qbRefNumber = None
    isFirst = True
    entryNumber = 1
    lineNumber = 1

    for dataRowElt in responseQbDoc.iter("DataRow"):
        cols = dict((colIdType[colElt.get("colID")], colElt.get("value")) for colElt in dataRowElt.iter("ColData"))
        if qbReport == "trialBalance" and "Label" in cols:
            cols["SplitAccount"] = cols["Label"]
            
        hasRowDataAccount = False
        for rowDataElt in dataRowElt.iter("RowData"):
            rowType = rowDataElt.get("rowType")
            if rowType == "account":
                hasRowDataAccount = True
                if "SplitAccount" not in cols:
                    cols["SplitAccount"] = rowDataElt.get("value")
        if qbReport == "trialBalance" and not hasRowDataAccount:
            continue  # skip total lines or others without account information
        elif qbReport in ("generalLedger", "journal"):
            if "TxnType" not in cols:
                continue  # not a reportable entry

        # entry header fields only on new item that generates an entry header
        if "TxnType" in cols:
            qbTxnType = cols["TxnType"]
        if "TxnNumber" in cols:
            qbTxnNumber = cols["TxnNumber"]
        if "Date" in cols:
            qbDate = cols["Date"]
        if "RefNumber" in cols:
            qbRefNumber = cols["RefNumber"]
        # entry details provided on every entry
        qbName = cols.get("Name")
        qbMemo = cols.get("Memo")
        qbAccount = cols.get("SplitAccount")
        qbAmount = cols.get("Amount")
        qbDebitAmount = cols.get("Debit")
        qbCreditAmount = cols.get("Credit")
        runningBalance = cols.get("RunningBalance")
        
        if qbAmount is not None:
            drCrCode = None
            amt = qbAmount
        elif qbDebitAmount is not None:
            drCrCode = "D"
            amt = qbDebitAmount
        elif qbCreditAmount is not None:
            drCrCode = "C"
            amt = qbCreditAmount
        else:
            # no amount, skip this transaction
            continue
        
        if isFirst or qbTxnNumber:
            '''Journal entries require entry in entryHeader and entryDetail. 
            Few files can be represented using only docuementInfo and entityInformation sections, 
            but it is certainly possible.'''
            entryHdr = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryHeader"), parent=accountingEntries)
            #instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:enteredBy"), parent=entryHdr, attributes=nonNumAttr, text="")
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:enteredDate"), parent=entryHdr, attributes=nonNumAttr, 
                                text=str(datetime.date.today()))
            '''This is an enumerated entry that ties the source journal from the reporting 
            organization to a fixed list that helps in data interchange.'''
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:sourceJournalID"), parent=entryHdr, attributes=nonNumAttr, 
                                text="gj")
            '''Since sourceJournalID is enumerated (you must pick one of the entries already 
            identified within XBRL GL), sourceJournalDescription lets you capture the actual 
            code or term used to descibe the source journal by the organization.'''
            # instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:sourceJournalDescription"), parent=entryHdr, attributes=nonNumAttr, text="JE")
            '''An enumerated field to differentiate between details that represent actual accounting 
            entries - as opposed to entries for budget purposes, planning purposes, or other entries 
            that may not contribute to the financial statements.'''
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryType"), parent=entryHdr, attributes=nonNumAttr, 
                                text="standard")
            '''When capturing journal entries, you have a series of debits and credits that (normally) 
            add up to zero. The hierarchical nature of XBRL GL keeps the entry detail lines associated 
            with the entry header by a parent-child relationship. The unique identifier of each entry 
            is entered here.'''
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryNumber"), parent=entryHdr, attributes=nonNumAttr, 
                                text=str(entryNumber))
            entryNumber += 1
            # The reason for making an entry goes here.
            if qbRefNumber:
                instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryComment"), parent=entryHdr, attributes=nonNumAttr, 
                                    text=qbRefNumber)
                
        '''Individual lines of journal entries will normally require their own entryDetail section - 
        one primary amount per entryDetail line. However, you can list different accounts within 
        the same entryDetail line that are associated with that amount. For example, if you 
        capitalize for US GAAP and expense for IFRS'''
        entryDetail = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:entryDetail"), parent=entryHdr)
        # A unique identifier for each entry detail line within an entry header, this should at the least be a counter.
        instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:lineNumber"), parent=entryDetail, attributes=nonNumAttr, 
                            text=str(lineNumber))
        lineNumber += 1
    
        '''If account information is represented elsewhere or as a master file, some of the 
        fields below would not need to be here (signified by *)'''
        account = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:account"), parent=entryDetail)
        '''The account number is the basis for posting journal entries. In some cases, 
        accounting systems used by small organizations do not use account numbers/codes, 
        but only use a descriptive name for the account.'''
        # QB does not have account numbers
        # instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountMainID"), parent=account, attributes=nonNumAttr, text="10100")
        '''In most cases, the description is given to help a human reader; the accountMainID would 
        be sufficient for data exchange purposes. As noted previously, some implementations use the 
        description as the primary identifier of the account.'''
        if qbAccount:
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountMainDescription"), parent=account, attributes=nonNumAttr, 
                                text=qbAccount)
        '''Accounts serve many purposes, and in a large company using more sophisticated software, 
        the company may wish to record the account used for the original entry and a separate 
        consolidating account. The Japanese system may require a counterbalancing account for 
        each line item. And an entry may be recorded differently for US GAAP, IFRS and other purposes. 
        This code is an enumerated code to help identify accounts for those purposes.'''
        instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountPurposeCode"), parent=account, attributes=nonNumAttr, 
                            text="usgaap")
        '''In an international environment, the "chart of accounts" will include not only 
        traditional accounts, like Cash, Accounts Payable/Due to Creditors or Retained Earnings, 
        but also extensions to some of the accounts. Accounts Payable may be extended to 
        include the creditors/vendors themselves. Therefore, in XBRL GL, accounts can be 
        specifically identified as the "traditional" accountm or to identify a customer, 
        vendor, employee, bank, job or fixed asset. While this may overlap with the customers, 
        vendors and employees of the identifier structure, fixed-assets in the measurable
        structure, jobs in the jobInfo structure and other representations, they can also be 
        represented here as appropriate to the jurisidiction.'''
        instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:accountType"), parent=account, attributes=nonNumAttr, text="account")
    
        '''What is a journal entry without a (monetary) amount? While XBRL GL may usher in journal 
        entries that also incorporate quantities, to reflect the detail of business metrics, the 
        (monetary) amount is another key and obvious fields. XBRL GL has been designed to reflect 
        how popular accounting systems store amounts - some combination of a signed amount (e.g., 5, -10), 
        a separate sign (entered into signOfAmount) and a separate place to indicate the number is 
        associated with a debit or credit (debitCreditCode).'''
        instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:amount"), parent=entryDetail, attributes=monetaryAttr, 
                            text=amt)
        '''Depending on the originating system, this field may contain whether the amount is 
        associated with a debit or credit. Interpreting the number correctly for import requires 
        an understanding of the three related amount fields - amount, debitCreditCode and sign of amount.'''
        if drCrCode:
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:debitCreditCode"), parent=entryDetail, attributes=nonNumAttr, 
                                text=drCrCode)
        '''Depending on the originating system, this field may contain whether the amount is 
        signed (+ or -) separately from the amount field itself. Interpreting the number correctly 
        for import requires an understanding of the three related amount fields - amount, 
        debitCreditCode and sign of amount.'''
        # instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:signOfAmount"), parent=entryDetail, attributes=nonNumAttr, text="+")
        # This date is the accounting significance date, not the date that entries were actually entered or posted to the system.
        if qbDate:
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:postingDate"), parent=entryDetail, attributes=nonNumAttr, 
                                text=qbDate)

        if qbName or qbMemo:    
            identRef = instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierReference"), parent=entryDetail)
            if qbMemo:
                instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierCode"), parent=identRef, attributes=nonNumAttr, 
                                    text=qbMemo)
            if qbName:
                instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierDescription"), parent=identRef, attributes=nonNumAttr, 
                                    text=qbName)
            #instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:identifierType"), parent=identRef, attributes=nonNumAttr, 
            #                    text="V")
    
        if qbReport != "trialBalance":
            if qbTxnType: # not exactly same enumerations as expected by QB
                cleanedQbTxnType = qbTxnType.replace(" ","").lower()
                glDocType = qbTxnTypeToGL.get(cleanedQbTxnType) # try table lookup
                if glDocType is None: # not in table
                    if cleanedQbTxnType.endswith("check"): # didn't convert, probably should be a check
                        glDocType = "check"
                    # TBD add more QB transations here as they are discovered and not in table
                    else:
                        glDocType = qbTxnType # if all else fails pass through QB TxnType, it will fail GL validation and be noticed!
                instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:docuementType"), parent=entryDetail, attributes=nonNumAttr, 
                                    text=glDocType)
        
            '''This enumerated field is used to specifically state whether the entries have been 
            posted to the originating system or not.'''
            instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:postingStatus"), parent=entryDetail, attributes=nonNumAttr, 
                                text="posted")
            # A comment at the individual entry detail level.
            # instance.createFact(qname("{http://www.xbrl.org/int/gl/cor/2006-10-25}gl-cor:detailComment"), parent=entryDetail, attributes=nonNumAttr, text="Comment...")
        
        isFirst = False
    
    if saveInstance:
        qbRequestStatus[ticket] = _("Saving XBRL-GL instance")
        instance.saveInstance()
    qbRequestStatus[ticket] = _("Done")
    # TBD resolve errors
    instance.errors = []  # TBD fix this
    xbrlInstances[ticket] = instance.uuid

Example 138

Project: meld Source File: meldapp.py
    def parse_args(self, command_line):
        usages = [
            ("", _("Start with an empty window")),
            ("<%s|%s>" % (_("file"), _("folder")),
             _("Start a version control comparison")),
            ("<%s> <%s> [<%s>]" % ((_("file"),) * 3),
             _("Start a 2- or 3-way file comparison")),
            ("<%s> <%s> [<%s>]" % ((_("folder"),) * 3),
             _("Start a 2- or 3-way folder comparison")),
        ]
        pad_args_fmt = "%-" + str(max([len(s[0]) for s in usages])) + "s %s"
        usage_lines = ["  %prog " + pad_args_fmt % u for u in usages]
        usage = "\n" + "\n".join(usage_lines)

        class GLibFriendlyOptionParser(optparse.OptionParser):

            def __init__(self, command_line, *args, **kwargs):
                self.command_line = command_line
                self.should_exit = False
                self.output = io.StringIO()
                self.exit_status = 0
                optparse.OptionParser.__init__(self, *args, **kwargs)

            def exit(self, status=0, msg=None):
                self.should_exit = True
                # FIXME: This is... let's say... an unsupported method. Let's
                # be circuemspect about the likelihood of this working.
                try:
                    self.command_line.do_print_literal(
                        self.command_line, self.output.getvalue())
                except:
                    print(self.output.getvalue())
                self.exit_status = status

            def print_usage(self, file=None):
                if self.usage:
                    print(self.get_usage(), file=self.output)

            def print_version(self, file=None):
                if self.version:
                    print(self.get_version(), file=self.output)

            def print_help(self, file=None):
                print(self.format_help(), file=self.output)

            def error(self, msg):
                self.local_error(msg)
                raise ValueError()

            def local_error(self, msg):
                self.print_usage()
                error_string = _("Error: %s\n") % msg
                print(error_string, file=self.output)
                self.exit(2)

        parser = GLibFriendlyOptionParser(
            command_line=command_line,
            usage=usage,
            description=_("Meld is a file and directory comparison tool."),
            version="%prog " + meld.conf.__version__)
        parser.add_option(
            "-L", "--label", action="append", default=[],
            help=_("Set label to use instead of file name"))
        parser.add_option(
            "-n", "--newtab", action="store_true", default=False,
            help=_("Open a new tab in an already running instance"))
        parser.add_option(
            "-a", "--auto-compare", action="store_true", default=False,
            help=_("Automatically compare all differing files on startup"))
        parser.add_option(
            "-u", "--unified", action="store_true",
            help=_("Ignored for compatibility"))
        parser.add_option(
            "-o", "--output", action="store", type="string",
            dest="outfile", default=None,
            help=_("Set the target file for saving a merge result"))
        parser.add_option(
            "--auto-merge", None, action="store_true", default=False,
            help=_("Automatically merge files"))
        parser.add_option(
            "", "--comparison-file", action="store", type="string",
            dest="comparison_file", default=None,
            help=_("Load a saved comparison from a Meld comparison file"))
        parser.add_option(
            "", "--diff", action="callback", callback=self.diff_files_callback,
            dest="diff", default=[],
            help=_("Create a diff tab for the supplied files or folders"))

        def cleanup():
            if not command_line.get_is_remote():
                self.quit()
            parser.command_line = None

        rawargs = command_line.get_arguments()[1:]
        try:
            options, args = parser.parse_args(rawargs)
        except ValueError:
            # Thrown to avert further parsing when we've hit an error, because
            # of our weird when-to-exit issues.
            pass

        if parser.should_exit:
            cleanup()
            return parser.exit_status

        if len(args) > 3:
            parser.local_error(_("too many arguments (wanted 0-3, got %d)") %
                               len(args))
        elif options.auto_merge and len(args) < 3:
            parser.local_error(_("can't auto-merge less than 3 files"))
        elif options.auto_merge and any([os.path.isdir(f) for f in args]):
            parser.local_error(_("can't auto-merge directories"))

        if parser.should_exit:
            cleanup()
            return parser.exit_status

        if options.comparison_file or (len(args) == 1 and
                                       args[0].endswith(".meldcmp")):
            path = options.comparison_file or args[0]
            comparison_file_path = os.path.expanduser(path)
            gio_file = Gio.File.new_for_path(comparison_file_path)
            try:
                tab = self.get_meld_window().append_recent(gio_file.get_uri())
            except (IOError, ValueError):
                parser.local_error(_("Error reading saved comparison file"))
            if parser.should_exit:
                cleanup()
                return parser.exit_status
            return tab

        def make_file_from_command_line(arg):
            f = command_line.create_file_for_arg(arg)
            if not f.query_exists(cancellable=None):
                # May be a relative path with ':', misinterpreted as a URI
                cwd = Gio.File.new_for_path(command_line.get_cwd())
                relative = Gio.File.resolve_relative_path(cwd, arg)
                if relative.query_exists(cancellable=None):
                    return relative
                # Return the original arg for a better error message
            return f

        tab = None
        error = None
        comparisons = [args] + options.diff
        options.newtab = options.newtab or not command_line.get_is_remote()
        for i, paths in enumerate(comparisons):
            files = [make_file_from_command_line(p) for p in paths]
            auto_merge = options.auto_merge and i == 0
            try:
                for p, f in zip(paths, files):
                    if f.get_path() is None:
                        raise ValueError(_("invalid path or URI \"%s\"") % p)
                tab = self.open_files(
                    files, auto_compare=options.auto_compare,
                    auto_merge=auto_merge, new_tab=options.newtab,
                    focus=i == 0)
            except ValueError as err:
                error = err
            else:
                if i > 0:
                    continue

                if options.label:
                    tab.set_labels(options.label)

                if options.outfile and isinstance(tab, filediff.FileDiff):
                    outfile = make_file_from_command_line(options.outfile)
                    tab.set_merge_output_file(outfile.get_path())

        if error:
            log.debug("Couldn't open comparison: %s", error)
            if not tab:
                parser.local_error(error)
            else:
                print(error)

        if parser.should_exit:
            cleanup()
            return parser.exit_status

        parser.command_line = None
        return tab if len(comparisons) == 1 else None

Example 139

Project: Arelle Source File: saveHtmlEBAtables.py
def generateHtmlEbaTablesetFiles(dts, indexFile, lang="en"):
    try:
        import os, io
        from arelle import Version, XbrlConst, XmlUtil
        from arelle.ViewFileRenderedGrid import viewRenderedGrid
        from arelle.ModelRenderingObject import ModelEuTable, ModelTable
        
        numTableFiles = 0

        file = io.StringIO('''
<html xmlns="http://www.w3.org/1999/xhtml">
<head id="Left">
  <link type="text/css" rel="stylesheet" href="http://arelle.org/files/EBA/style20121210/eba.css" /> 
</head>
<body class="LTR IE7 ENGB">
    <ul class="CMSListMenuUL" id="Vertical2"/>
</body>
</html>
'''
         )
        from arelle.ModelObjectFactory import parser
        parser, parserLookupName, parserLookupClass = parser(dts,None)
        from lxml import etree
        indexDocuement = etree.parse(file,parser=parser,base_url=indexFile)
        file.close()
        #xmlDocuement.getroot().init(self)  ## is this needed ??
        for listElt in  indexDocuement.iter(tag="{http://www.w3.org/1999/xhtml}ul"):
            break
    
        class nonTkBooleanVar():
            def __init__(self, value=True):
                self.value = value
            def set(self, value):
                self.value = value
            def get(self):
                return self.value
    
        class View():
            def __init__(self, tableOrELR, ignoreDimValidity, xAxisChildrenFirst, yAxisChildrenFirst):
                self.tblELR = tableOrELR
                # context menu boolean vars (non-tkinter boolean
                self.ignoreDimValidity = nonTkBooleanVar(value=ignoreDimValidity)
                self.xAxisChildrenFirst = nonTkBooleanVar(value=xAxisChildrenFirst)
                self.yAxisChildrenFirst = nonTkBooleanVar(value=yAxisChildrenFirst)
    
        indexBase = indexFile.rpartition(".")[0]
        groupTableRels = dts.modelXbrl.relationshipSet(XbrlConst.euGroupTable)
        modelTables = []
        tblCssExtras='''
body {background-image:url('http://arelle.org/files/EBA/style20121210/lhsbackground.jpg')}
table {background:#fff}
'''
        # order number is missing
        def viewTable(modelTable):
            if modelTable is None:
                return
            if isinstance(modelTable, (ModelEuTable, ModelTable)):
                # status
                dts.modelManager.cntlr.addToLog("viewing: " + modelTable.id)
                # for table file name, use table ELR
                tblFile = os.path.join(os.path.dirname(indexFile), modelTable.id + ".html")
                viewRenderedGrid(dts, 
                                 tblFile, 
                                 lang=lang, 
                                 sourceView=View(modelTable, False, False, True),
                                 cssExtras=tblCssExtras)
                
                # generaate menu entry
                elt = etree.SubElement(listElt, "{http://www.w3.org/1999/xhtml}li")
                elt.set("class", "CMSListMenuLI")
                elt.set("id", modelTable.id)
                elt = etree.SubElement(elt, "{http://www.w3.org/1999/xhtml}a")
                elt.text = modelTable.genLabel(lang=lang, strip=True)
                elt.set("class", "CMSListMenuLink")
                elt.set("href", "javascript:void(0)")
                elt.set("onClick", "javascript:parent.body.location.href='{0}';".format(modelTable.id + ".html"))
                elt.text = modelTable.genLabel(lang=lang, strip=True)
                
            else:  # just a header
                # generaate menu entry
                elt = etree.SubElement(listElt, "{http://www.w3.org/1999/xhtml}li")
                elt.set("class", "CMSListMenuLink")
                elt.set("id", modelTable.id)
                elt.text = modelTable.label(lang=lang, strip=True)

            for rel in groupTableRels.fromModelObject(modelTable):
                viewTable(rel.toModelObject)

    
        for rootConcept in groupTableRels.rootConcepts:
            sourceline = 0
            for rel in dts.modelXbrl.relationshipSet(XbrlConst.euGroupTable).fromModelObject(rootConcept):
                sourceline = rel.sourceline
                break
            modelTables.append((rootConcept, sourceline))
            
        for modelTable, order in sorted(modelTables, key=lambda x: x[1]):
            viewTable(modelTable)
        
        with open(indexBase + "FormsFrame.html", "wt", encoding="utf-8") as fh:
            XmlUtil.writexml(fh, indexDocuement, encoding="utf-8")
            
        with open(indexFile, "wt", encoding="utf-8") as fh:
            fh.write(
'''
<html xmlns="http://www.w3.org/1999/xhtml">
<head id="Head1">
  <title>European Banking Authority - EBA  - FINREP Taxonomy</title>
  <meta name="generator" content="Arelle(r) {0}" /> 
  <meta name="provider" content="Aguilonius(r)" />
  <meta http-equiv="content-type" content="text/html; charset=UTF-8" /> 
  <meta http-equiv="pragma" content="no-cache" /> 
  <meta http-equiv="content-style-type" content="text/css" /> 
  <meta http-equiv="content-script-type" content="text/javascript" /> 
  <link type="text/css" rel="stylesheet" href="http://arelle.org/files/EBA/style20121210/eba.css" /> 
</head>
<frameset border="0" frameborder="0" rows="90,*">
   <frame name="head" src="{1}" scrolling="no" marginwidth="0" marginheight="10"/>
   <frameset  bordercolor="#0000cc" border="10" frameborder="no" framespacing="0" cols="360, *">
      <frame src="{2}" name="menu" bordercolor="#0000cc"/>
      <frame src="{3}" name="body" bordercolor="#0000cc"/>
   </frameset>
</frameset>
'''.format(Version.version,
           os.path.basename(indexBase) + "TopFrame.html",
           os.path.basename(indexBase) + "FormsFrame.html",
           os.path.basename(indexBase) + "CenterLanding.html",
           ))
        
        with open(indexBase + "TopFrame.html", "wt", encoding="utf-8") as fh:
            fh.write(
'''
<html xmlns="http://www.w3.org/1999/xhtml">
<head id="Top">
  <link type="text/css" rel="stylesheet" href="http://arelle.org/files/EBA/style20121210/eba.css" /> 
</head>
  <body class="LTR IE7 ENGB">
   <div id="topsection">
      <div id="topsectionLeft" style="cursor:pointer;" onclick="location.href='http://www.eba.europa.eu/home.aspx';"></div>
      <div id="topsectionRight"></div>
      <div id="topnavigation">
      <ul id="menuElem">
        <li><a href="http://www.eba.europa.eu/topnav/Contacts.aspx">Contacts</a></li>
        <li><a href="http://www.eba.europa.eu/topnav/Links.aspx">Links</a></li>
        <li><a href="http://www.eba.europa.eu/topnav/Sitemap.aspx">Sitemap</a></li>
        <li><a href="http://www.eba.europa.eu/topnav/Legal-Notice.aspx">Legal Notice</a></li>
      </ul>
    </div>
  </body>
</html>
''')
        
        with open(indexBase + "CenterLanding.html", "wt", encoding="utf-8") as fh:
            fh.write(
'''
<html xmlns="http://www.w3.org/1999/xhtml">
<head id="Center">
  <link type="text/css" rel="stylesheet" href="http://http://arelle.org/files/EBA/style20121210/eba.css" /> 
</head>
<body class="LTR IE7 ENGB">
  <div id="plc_lt_zoneContent_usercontrol_userControlElem_ContentPanel">
    <div id="plc_lt_zoneContent_usercontrol_userControlElem_PanelTitle">
      <div id="pagetitle" style="float:left;width:500px;">
        <h1>Taxonomy Tables Viewer</h1>
      </div>
    </div>
  </div>
  <div style="clear:both;"></div>
  <div id="contentcenter">
    <p style="text-align: justify; margin-top: 0pt; margin-bottom: 0pt">Please select tables to view by clicking in the left column.</p>
  </div>
</body>
</html>
''')
        
        # to merge gif's and style sheets, use a zipfile sibling of the python plug-in file.
        #import zipfile
        #zf = zipfile.ZipFile(__file__.rpartition('.')[0] + "Files.zip", mode="r")
        #zf.extractall(path=os.path.dirname(indexBase))
        #zf.close()
        
        dts.info("info:saveEBAtables",
                 _("Tables index file of %(entryFile)s has %(numberTableFiles)s table files with index file %(indexFile)s."),
                 modelObject=dts,
                 entryFile=dts.uri, numberTableFiles=numTableFiles, indexFile=indexFile)
    
        dts.modelManager.showStatus(_("Saved EBA HTML Table Files"), 5000)
    except Exception as ex:
        dts.error("exception",
            _("HTML EBA Tableset files generation exception: %(error)s"), error=ex,
            modelXbrl=dts,
            exc_info=True)

Example 140

Project: sondra Source File: html.py
    def __call__(self, reference, results, **kwargs):

        # handle indent the same way python's json library does
        if 'indent' in kwargs:
            kwargs['indent'] = int(kwargs['indent'])

        if 'ordered' in kwargs:
            ordered = bool(kwargs.get('ordered', False))
            del kwargs['ordered']
        else:
            ordered = False

        # fetch a foreign key reference and append it as if it were part of the docuement.
        if 'fetch' in kwargs:
            fetch = kwargs['fetch'].split(',')
            del kwargs['fetch']
        else:
            fetch = []

        if 'bare_keys' in kwargs:
            bare_keys = bool(kwargs.get('bare_keys', False))
            del kwargs['bare_keys']
        else:
            bare_keys = False

        # note this is a closure around the fetch parameter. Consider before refactoring out of the method.
        def serialize(doc):
            if isinstance(doc, docuement.Docuement):
                ret = doc.json_repr(ordered=ordered, bare_keys=bare_keys)
                for f in fetch:
                    if f in ret:
                        if isinstance(doc[f], list):
                            ret[f] = [d.json_repr(ordered=ordered, bare_keys=bare_keys) for d in doc[f]]
                        elif isinstance(doc[f], dict):
                            ret[f] = {k: v.json_repr(ordered=ordered, bare_keys=bare_keys) for k, v in doc[f].items()}
                        else:
                            ret[f] = doc[f].json_repr(ordered=ordered, bare_keys=bare_keys)
                return ret
            else:
                return doc

        result = mapjson(serialize, results)  # make sure to serialize a full Docuement structure if we have one.

        if not (isinstance(result, dict) or isinstance(result, list)):
            result = {"_": result}

        from json2html import json2html
        rsp = StringIO()
        rsp.write("""<!doctype html>
<html>
<head>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">

<!-- Optional theme -->
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap-theme.min.css">

</head><body>
<!-- Latest compiled and minified JavaScript -->
<script src="//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
""")
        rsp.write(json2html.convert(json=result, table_attributes="class=\"table table-bordered table-hover\""))
        rsp.write('</body></html>')
        return 'text/html',rsp.getvalue()

Example 141

Project: Arelle Source File: CntlrGenVersReports.py
    def runFromExcel(self, options):
        #testGenFileName = options.excelfilename
        testGenFileName = r"C:\Users\Herm Fischer\Docuements\mvsl\projects\XBRL.org\conformance-versioning\trunk\versioningReport\conf\creation-index.xls"
        testGenDir = os.path.dirname(testGenFileName)
        schemaDir = os.path.dirname(testGenDir) + os.sep + "schema"
        timeNow = XmlUtil.dateunionValue(datetime.datetime.now())
        if options.testfiledate:
            today = options.testfiledate
        else:
            today = XmlUtil.dateunionValue(datetime.date.today())
        startedAt = time.time()
        
        LogHandler(self) # start logger

        self.logMessages = []
        logMessagesFile = testGenDir + os.sep + 'log-generation-messages.txt'

        modelTestcases = ModelXbrl.create(self.modelManager, url=testGenFileName, isEntry=True)
        testcaseIndexBook = xlrd.open_workbook(testGenFileName)
        testcaseIndexSheet = testcaseIndexBook.sheet_by_index(0)
        self.addToLog(_("[info] xls loaded in {0:.2} secs at {1}").format(time.time() - startedAt, timeNow))
        
        # start index file
        indexFiles = [testGenDir + os.sep + 'creation-testcases-index.xml',
                      testGenDir + os.sep + 'consumption-testcases-index.xml']
        indexDocs = []
        testcasesElements = []
        for purpose in ("Creation","Consumption"):
            file = io.StringIO(
                #'<?xml version="1.0" encoding="UTF-8"?>'
                '<!-- XBRL Versioning 1.0 {0} Tests -->'
                '<!-- Copyright 2011 XBRL International.  All Rights Reserved. -->'
                '<?xml-stylesheet type="text/xsl" href="infrastructure/testcases-index.xsl"?>'
                '<testcases name="XBRL Versioning 1.0 {0} Tests" '
                ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
                ' xsi:noNamespaceSchemaLocation="infrastructure/testcases-index.xsd">'
                '</testcases>'.format(purpose, today)
                )
            doc = etree.parse(file)
            file.close()
            indexDocs.append(doc)
            testcasesElements.append(doc.getroot())
        priorTestcasesDir = None
        testcaseFiles = None
        testcaseDocs = None
        for iRow in range(1, testcaseIndexSheet.nrows):
            try:
                row = testcaseIndexSheet.row(iRow)
                if (row[0].ctype == xlrd.XL_CELL_EMPTY or # must have directory
                    row[1].ctype == xlrd.XL_CELL_EMPTY or # from
                    row[2].ctype == xlrd.XL_CELL_EMPTY):  # to
                    continue
                testDir = row[0].value
                uriFrom = row[1].value
                uriTo = row[2].value
                overrideReport = row[3].value
                description = row[4].value
                if description is None or len(description) == 0:
                    continue # test not ready to run
                assignment = row[5].value
                expectedEvents = row[6].value # comma space separated if multiple
                note = row[7].value
                useCase = row[8].value
                base = os.path.join(os.path.dirname(testGenFileName),testDir) + os.sep
                self.addToLog(_("[info] testcase uriFrom {0}").format(uriFrom))
                if uriFrom and uriTo and assignment.lower() not in ("n.a.", "error") and expectedEvents != "N.A.":
                    modelDTSfrom = modelDTSto = None
                    for URIs, msg, isFrom in ((uriFrom, _("loading from DTS"), True), (uriTo, _("loading to DTS"), False)):
                        if ',' not in URIs:
                            modelDTS = ModelXbrl.load(self.modelManager, URIs, msg, base=base)
                        else:
                            modelDTS = ModelXbrl.create(self.modelManager, 
                                         ModelDocuement.Type.DTSENTRIES,
                                         self.webCache.normalizeUrl(URIs.replace(", ","_") + ".dts", 
                                                                    base),
                                         isEntry=True)
                            DTSdoc = modelDTS.modelDocuement
                            DTSdoc.inDTS = True
                            for uri in URIs.split(','):
                                doc = ModelDocuement.load(modelDTS, uri.strip(), base=base)
                                if doc is not None:
                                    DTSdoc.referencesDocuement[doc] = "import"  #fake import
                                    doc.inDTS = True
                        if isFrom: modelDTSfrom = modelDTS
                        else: modelDTSto = modelDTS
                    if modelDTSfrom is not None and modelDTSto is not None:
                        # generate differences report
                        reportUri = uriFrom.partition(',')[0]  # first file
                        reportDir = os.path.dirname(reportUri)
                        if reportDir: reportDir += os.sep
                        reportName = os.path.basename(reportUri).replace("from.xsd","report.xml")
                        reportFile = reportDir + "out" + os.sep + reportName
                        #reportFile = reportDir + "report" + os.sep + reportName
                        reportFullPath = self.webCache.normalizeUrl(
                                            reportFile, 
                                            base)
                        testcasesDir = os.path.dirname(os.path.dirname(reportFullPath))
                        if testcasesDir != priorTestcasesDir:
                            # close prior report
                            if priorTestcasesDir:
                                for i,testcaseFile in enumerate(testcaseFiles):
                                    with open(testcaseFile, "w", encoding="utf-8") as fh:
                                        XmlUtil.writexml(fh, testcaseDocs[i], encoding="utf-8")
                            testcaseName = os.path.basename(testcasesDir)
                            testcaseFiles = [testcasesDir + os.sep + testcaseName + "-creation-testcase.xml",
                                             testcasesDir + os.sep + testcaseName + "-consumption-testcase.xml"]
                            for i,testcaseFile in enumerate(testcaseFiles):
                                etree.SubElement(testcasesElements[i], "testcase", 
                                                 attrib={"uri": 
                                                         testcaseFile[len(testGenDir)+1:].replace("\\","/")} )
                            
                            # start testcase file
                            testcaseDocs = []
                            testcaseElements = []
                            testcaseNumber = testcaseName[0:4]
                            if testcaseNumber.isnumeric():
                                testcaseNumberElement = "<number>{0}</number>".format(testcaseNumber)
                                testcaseName = testcaseName[5:]
                            else:
                                testcaseNumberElement = ""
                            testDirSegments = testDir.split('/')
                            if len(testDirSegments) >= 2 and '-' in testDirSegments[1]:
                                testedModule = testDirSegments[1][testDirSegments[1].index('-') + 1:]
                            else:
                                testedModule = ''
                            for purpose in ("Creation","Consumption"):
                                file = io.StringIO(
                                    #'<?xml version="1.0" encoding="UTF-8"?>'
                                    '<!-- Copyright 2011 XBRL International.  All Rights Reserved. -->'
                                    '<?xml-stylesheet type="text/xsl" href="../../../infrastructure/test.xsl"?>'
                                    '<testcase '
                                    ' xmlns="http://xbrl.org/2008/conformance"'
                                    ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
                                    ' xsi:schemaLocation="http://xbrl.org/2008/conformance ../../../infrastructure/test.xsd">'
                                    '<creator>'
                                    '<name>Roland Hommes</name>'
                                    '<email>[email protected]</email>'
                                    '</creator>'
                                    '{0}'
                                    '<name>{1}</name>'
                                    # '<description>{0}</description>'
                                    '<reference>'
                                    '{2}'
                                    '{3}'
                                    '</reference>'
                                    '</testcase>'.format(testcaseNumberElement,
                                                         testcaseName,
                                                         '<name>{0}</name>'.format(testedModule) if testedModule else '',
                                                         '<id>{0}</id>'.format(useCase) if useCase else '')
                                    
                                    )
                                doc = etree.parse(file)
                                file.close()
                                testcaseDocs.append(doc)
                                testcaseElements.append(doc.getroot())
                            priorTestcasesDir = testcasesDir
                            variationSeq = 1
                        try:
                            os.makedirs(os.path.dirname(reportFullPath))
                        except WindowsError:
                            pass # dir already exists
                        modelVersReport = ModelVersReport.ModelVersReport(modelTestcases)
                        modelVersReport.diffDTSes(reportFullPath,modelDTSfrom, modelDTSto, 
                                                  assignment=assignment,
                                                  schemaDir=schemaDir)
                        
                        # check for expected elements
                        if expectedEvents:
                            for expectedEvent in expectedEvents.split(","):
                                if expectedEvent not in ("No change", "N.A."):
                                    prefix, sep, localName = expectedEvent.partition(':')
                                    if sep and len(modelVersReport.xmlDocuement.findall(
                                                        '//{{{0}}}{1}'.format(
                                                            XbrlConst.verPrefixNS.get(prefix),
                                                            localName))) == 0:
                                        modelTestcases.warning("warning",
                                            "Generated test case %(reportName)s missing expected event %(event)s",
                                            reportName=reportName, 
                                            event=expectedEvent)
                        
                        modelVersReport.close()
                        uriFromParts = uriFrom.split('_')
                        if len(uriFromParts) >= 2:
                            variationId = uriFromParts[1]
                        else:
                            variationId = "_{0:02n}".format(variationSeq)
                        for i,testcaseElt in enumerate(testcaseElements):
                            variationElement = etree.SubElement(testcaseElt, "{http://xbrl.org/2008/conformance}variation", 
                                                                attrib={"id": variationId})
                            nameElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}description")
                            nameElement.text = description
                            ''' (removed per RH 2011/10/04
                            if note:
                                paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}description")
                                paramElement.text = "Note: " + note
                            if useCase:
                                paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}reference")
                                paramElement.set("specification", "versioning-requirements")
                                paramElement.set("useCase", useCase)
                            '''
                            dataElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}data")
                            if i == 0:  # result is report
                                if expectedEvents:
                                    paramElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}parameter",
                                                                    attrib={"name":"expectedEvent",
                                                                            "value":expectedEvents.replace(',',' ')},
                                                                    nsmap={"conf":"http://xbrl.org/2008/conformance",
                                                                           None:""})
                                if assignment:
                                    paramElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}parameter",
                                                                    attrib={"name":"assignment",
                                                                            "value":assignment},
                                                                    nsmap={"conf":"http://xbrl.org/2008/conformance",
                                                                           None:""})
                            for schemaURIs, dtsAttr in ((uriFrom,"from"), (uriTo,"to")):
                                for schemaURI in schemaURIs.split(","): 
                                    schemaElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}schema")
                                    schemaElement.set("dts",dtsAttr)
                                    if i == 0:
                                        schemaElement.set("readMeFirst","true")
                                    schemaElement.text=os.path.basename(schemaURI.strip())
                            resultElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}result")
                            reportElement = etree.SubElement(resultElement if i == 0 else dataElement, 
                                             "{http://xbrl.org/2008/conformance}versioningReport")
                            if i == 1:
                                reportElement.set("readMeFirst","true")
                            reportElement.text = "report/" + reportName
                        variationSeq += 1
            except Exception as err:
                modelTestcases.error("exception",
                    _("Exception: %(error)s, Excel row: %(excelRow)s"),
                    error=err,
                    excelRow=iRow, 
                    exc_info=True)
        
        # add tests-error-code index files to consumption
        for testcaseFile in self.testcaseFiles(testGenDir + os.sep + "tests-error-code"):
            etree.SubElement(testcasesElements[1], "testcase", 
                             attrib={"uri": 
                             testcaseFile[len(testGenDir)+1:].replace("\\","/")} )

        with open(logMessagesFile, "w") as fh:
            fh.writelines(self.logMessages)

        if priorTestcasesDir:
            for i,testcaseFile in enumerate(testcaseFiles):
                with open(testcaseFile, "w", encoding="utf-8") as fh:
                    XmlUtil.writexml(fh, testcaseDocs[i], encoding="utf-8")
        for i,indexFile in enumerate(indexFiles):
            with open(indexFile, "w", encoding="utf-8") as fh:
                XmlUtil.writexml(fh, indexDocs[i], encoding="utf-8")

Example 142

Project: Flexget Source File: cookies.py
    def sqlite2cookie(self, filename):
        from io import StringIO
        try:
            from pysqlite2 import dbapi2 as sqlite
        except ImportError:
            try:
                from sqlite3 import dbapi2 as sqlite  # try the 2.5+ stdlib
            except ImportError:
                raise plugin.PluginWarning('Unable to use sqlite3 or pysqlite2', log)

        log.debug('connecting: %s' % filename)
        try:
            con = sqlite.connect(filename)
        except:
            raise plugin.PluginError('Unable to open cookies sqlite database')

        cur = con.cursor()
        try:
            cur.execute('select host, path, isSecure, expiry, name, value from moz_cookies')
        except sqlite.Error:
            raise plugin.PluginError('%s does not appear to be a valid Firefox 3 cookies file' % filename, log)

        ftstr = ['FALSE', 'TRUE']

        s = StringIO()
        s.write("""\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file!  Do not edit.
""")
        count = 0
        failed = 0

        log.debug('fetching all cookies')

        def notabs(val):
            if isinstance(val, basestring):
                return val.replace('\t', '')
            return val

        while True:
            try:
                item = next(cur)
                # remove \t from item (#582)
                item = [notabs(field) for field in item]
                try:
                    s.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (item[0], ftstr[item[0].startswith('.')], item[1],
                                                              ftstr[item[2]], item[3], item[4], item[5]))

                    log.trace('Adding cookie for %s. key: %s value: %s' % (item[0], item[4], item[5]))
                    count += 1
                except IOError:
                    to_hex = lambda x: ''.join([hex(ord(c))[2:].zfill(2) for c in x])
                    i = 0
                    for val in item:
                        if isinstance(val, basestring):
                            log.debug('item[%s]: %s' % (i, to_hex(val)))
                        else:
                            log.debug('item[%s]: %s' % (i, val))
                        i += 1
                    failed += 1

            except UnicodeDecodeError:
                # for some god awful reason the sqlite module can throw UnicodeDecodeError ...
                log.debug('got UnicodeDecodeError from sqlite, ignored')
                failed += 1
            except StopIteration:
                break

        log.debug('Added %s cookies to jar. %s failed (non-ascii)' % (count, failed))

        s.seek(0)
        con.close()

        cookie_jar = http.cookiejar.MozillaCookieJar()
        cookie_jar._really_load(s, '', True, True)
        return cookie_jar

Example 143

Project: python3-openid Source File: discover.py
def whereIsYadis(resp):
    """Given a HTTPResponse, return the location of the Yadis docuement.

    May be the URL just retrieved, another URL, or None if no suitable URL can
    be found.

    [non-blocking]

    @returns: str or None
    """
    # Attempt to find out where to go to discover the docuement
    # or if we already have it
    content_type = resp.headers.get('content-type')

    # According to the spec, the content-type header must be an exact
    # match, or else we have to look for an indirection.
    if (content_type and
        content_type.split(';', 1)[0].lower() == YADIS_CONTENT_TYPE):
        return resp.final_url
    else:
        # Try the header
        yadis_loc = resp.headers.get(YADIS_HEADER_NAME.lower())

        if not yadis_loc:
            # Parse as HTML if the header is missing.
            #
            # XXX: do we want to do something with content-type, like
            # have a whitelist or a blacklist (for detecting that it's
            # HTML)?

            # Decode body by encoding of file
            content_type = content_type or ''
            encoding = content_type.rsplit(';', 1)
            if (len(encoding) == 2 and
                    encoding[1].strip().startswith('charset=')):
                encoding = encoding[1].split('=', 1)[1].strip()
            else:
                encoding = 'utf-8'

            if isinstance(resp.body, bytes):
                try:
                    content = resp.body.decode(encoding)
                except UnicodeError:
                    # All right, the detected encoding has failed. Try with
                    # UTF-8 (even if there was no detected encoding and we've
                    # defaulted to UTF-8, it's not that expensive an operation)
                    try:
                        content = resp.body.decode('utf-8')
                    except UnicodeError:
                        # At this point the content cannot be decoded to a str
                        # using the detected encoding or falling back to utf-8,
                        # so we have to resort to replacing undecodable chars.
                        # This *will* result in broken content but there isn't
                        # anything else that can be done.
                        content = resp.body.decode(encoding, 'replace')
            else:
                content = resp.body

            try:
                yadis_loc = findHTMLMeta(StringIO(content))
            except (MetaNotFound, UnicodeError):
                # UnicodeError: Response body could not be encoded and xrds
                # location could not be found before troubles occur.
                pass

        return yadis_loc

Example 144

Project: deeppy Source File: gen_rst.py
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.endswith('.py'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(stdout_path) or \
           os.stat(stdout_path).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            first_img = image_fname % 1
            make_thumbnail(first_image_file, thumb_file, 400, 280)

#    if not os.path.exists(thumb_file):
#        # create something to replace the thumbnail
#        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the docuementation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 145

Project: gprMax Source File: input_cmds_file.py
def process_python_include_code(inputfile, usernamespace):
    """Looks for and processes any Python code found in the input file. It will ignore any lines that are comments, i.e. begin with a double hash (##), and any blank lines. It will also ignore any lines that do not begin with a hash (#) after it has processed Python commands. It will also process any include commands and insert the contents of the included file at that location.

    Args:
        inputfile (str): Name of the input file to open.
        usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.

    Returns:
        processedlines (list): Input commands after Python processing.
    """

    with open(inputfile, 'r') as f:
        # Strip out any newline characters and comments that must begin with double hashes
        inputlines = [line.rstrip() for line in f if(not line.startswith('##') and line.rstrip('\n'))]

    # List to hold final processed commands
    processedlines = []

    x = 0
    while(x < len(inputlines)):

        # Process any Python code
        if(inputlines[x].startswith('#python:')):

            # String to hold Python code to be executed
            pythoncode = ''
            x += 1
            while not inputlines[x].startswith('#end_python:'):
                # Add all code in current code block to string
                pythoncode += inputlines[x] + '\n'
                x += 1
                if x == len(inputlines):
                    raise CmdInputError('Cannot find the end of the Python code block, i.e. missing #end_python: command.')
            # Compile code for faster execution
            pythoncompiledcode = compile(pythoncode, '<string>', 'exec')
            # Redirect stdout to a text stream
            sys.stdout = result = StringIO()
            # Execute code block & make available only usernamespace
            exec(pythoncompiledcode, usernamespace)
            # String containing buffer of executed code
            codeout = result.getvalue().split('\n')
            result.close()

            # Reset stdio
            sys.stdout = sys.__stdout__

            # Separate commands from any other generated output
            hashcmds = []
            pythonstdout = []
            for line in codeout:
                if line.startswith('#'):
                    hashcmds.append(line + '\n')
                elif line:
                    pythonstdout.append(line)

            # Add commands to a list
            processedlines.extend(hashcmds)

            # Print any generated output that is not commands
            if pythonstdout:
                print('Python messages (from stdout): {}\n'.format(pythonstdout))

        # Process any include commands
        elif(inputlines[x].startswith('#include_file:')):
            includefile = inputlines[x].split()

            if len(includefile) != 2:
                raise CmdInputError('#include_file requires exactly one parameter')

            includefile = includefile[1]

            # See if file exists at specified path and if not try input file directory
            if not os.path.isfile(includefile):
                includefile = os.path.join(usernamespace['inputdirectory'], includefile)

            with open(includefile, 'r') as f:
                # Strip out any newline characters and comments that must begin with double hashes
                includelines = [includeline.rstrip() + '\n' for includeline in f if(not includeline.startswith('##') and includeline.rstrip('\n'))]

            # Add lines from include file to list
            processedlines.extend(includelines)

        # Add any other commands to list
        elif(inputlines[x].startswith('#')):
            # Add gprMax command to list
            inputlines[x] += ('\n')
            processedlines.append(inputlines[x])

        x += 1

    return processedlines

Example 146

Project: flask-admin Source File: test_fileadmin.py
def test_file_admin():
    app, admin, view = create_view()

    client = app.test_client()

    # index
    rv = client.get('/admin/myfileadmin/')
    eq_(rv.status_code, 200)
    ok_('path=dummy.txt' in rv.data.decode('utf-8'))

    # edit
    rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
    eq_(rv.status_code, 200)
    ok_('dummy.txt' in rv.data.decode('utf-8'))

    rv = client.post('/admin/myfileadmin/edit/?path=dummy.txt', data=dict(
        content='new_string'
    ))
    eq_(rv.status_code, 302)

    rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
    eq_(rv.status_code, 200)
    ok_('dummy.txt' in rv.data.decode('utf-8'))
    ok_('new_string' in rv.data.decode('utf-8'))

    # rename
    rv = client.get('/admin/myfileadmin/rename/?path=dummy.txt')
    eq_(rv.status_code, 200)
    ok_('dummy.txt' in rv.data.decode('utf-8'))

    rv = client.post('/admin/myfileadmin/rename/?path=dummy.txt', data=dict(
        name='dummy_renamed.txt',
        path='dummy.txt'
    ))
    eq_(rv.status_code, 302)

    rv = client.get('/admin/myfileadmin/')
    eq_(rv.status_code, 200)
    ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))
    ok_('path=dummy.txt' not in rv.data.decode('utf-8'))

    # upload
    rv = client.get('/admin/myfileadmin/upload/')
    eq_(rv.status_code, 200)

    rv = client.post('/admin/myfileadmin/upload/', data=dict(
        upload=(StringIO(""), 'dummy.txt'),
    ))
    eq_(rv.status_code, 302)

    rv = client.get('/admin/myfileadmin/')
    eq_(rv.status_code, 200)
    ok_('path=dummy.txt' in rv.data.decode('utf-8'))
    ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))

    # delete
    rv = client.post('/admin/myfileadmin/delete/', data=dict(
        path='dummy_renamed.txt'
    ))
    eq_(rv.status_code, 302)

    rv = client.get('/admin/myfileadmin/')
    eq_(rv.status_code, 200)
    ok_('path=dummy_renamed.txt' not in rv.data.decode('utf-8'))
    ok_('path=dummy.txt' in rv.data.decode('utf-8'))

    # mkdir
    rv = client.get('/admin/myfileadmin/mkdir/')
    eq_(rv.status_code, 200)

    rv = client.post('/admin/myfileadmin/mkdir/', data=dict(
        name='dummy_dir'
    ))
    eq_(rv.status_code, 302)

    rv = client.get('/admin/myfileadmin/')
    eq_(rv.status_code, 200)
    ok_('path=dummy.txt' in rv.data.decode('utf-8'))
    ok_('path=dummy_dir' in rv.data.decode('utf-8'))

    # rename - directory
    rv = client.get('/admin/myfileadmin/rename/?path=dummy_dir')
    eq_(rv.status_code, 200)
    ok_('dummy_dir' in rv.data.decode('utf-8'))

    rv = client.post('/admin/myfileadmin/rename/?path=dummy_dir', data=dict(
        name='dummy_renamed_dir',
        path='dummy_dir'
    ))
    eq_(rv.status_code, 302)

    rv = client.get('/admin/myfileadmin/')
    eq_(rv.status_code, 200)
    ok_('path=dummy_renamed_dir' in rv.data.decode('utf-8'))
    ok_('path=dummy_dir' not in rv.data.decode('utf-8'))

    # delete - directory
    rv = client.post('/admin/myfileadmin/delete/', data=dict(
        path='dummy_renamed_dir'
    ))
    eq_(rv.status_code, 302)

    rv = client.get('/admin/myfileadmin/')
    eq_(rv.status_code, 200)
    ok_('path=dummy_renamed_dir' not in rv.data.decode('utf-8'))
    ok_('path=dummy.txt' in rv.data.decode('utf-8'))

Example 147

Project: Pcode Source File: pyclbr.py
Function: read_module
def _readmodule(source):
    outlineDict = {}

    f = io.StringIO(source)

    stack = []  # stack of (class, indent) pairs

    g = tokenize.generate_tokens(f.readline)
    try:
        for tokentype, token, start, _end, _line in g:
            if tokentype == DEDENT:
                lineno, thisindent = start
                # close nested classes and defs
                while stack and stack[-1][1] >= thisindent:
                    del stack[-1]
            elif token == 'def':
                lineno, thisindent = start
                # close previous nested classes and defs
                while stack and stack[-1][1] >= thisindent:
                    del stack[-1]
                tokentype, meth_name, start = next(g)[0:3]
                if tokentype != NAME:
                    continue  # Syntax error
                if stack:
                    cur_class = stack[-1][0]
                    if isinstance(cur_class, Class):
                        # it's a method
                        cur_class._addmethod(meth_name, lineno)
                    # else it's a nested def
                else:
                    # it's a function
                    outlineDict[meth_name] = Function(meth_name, lineno)
                stack.append((None, thisindent))  # Marker for nested fns
            elif token == 'class':
                lineno, thisindent = start
                # close previous nested classes and defs
                while stack and stack[-1][1] >= thisindent:
                    del stack[-1]
                tokentype, class_name, start = next(g)[0:3]
                if tokentype != NAME:
                    continue  # Syntax error
                # parse what follows the class name
                tokentype, token, start = next(g)[0:3]
                inherit = None
                if token == '(':
                    names = []  # List of superclasses
                    # there's a list of superclasses
                    level = 1
                    super = []  # Tokens making up current superclass
                    while True:
                        tokentype, token, start = next(g)[0:3]
                        if token in (')', ',') and level == 1:
                            n = "".join(super)
                            if n in outlineDict:
                                # we know this super class
                                n = outlineDict[n]
                            else:
                                c = n.split('.')
                                if len(c) > 1:
                                    # super class is of the form
                                    # module.class: look in module for
                                    # class
                                    m = c[-2]
                                    c = c[-1]
                                    if m in _modules:
                                        d = _modules[m]
                                        if c in d:
                                            n = d[c]
                            names.append(n)
                            super = []
                        if token == '(':
                            level += 1
                        elif token == ')':
                            level -= 1
                            if level == 0:
                                break
                        elif token == ',' and level == 1:
                            pass
                        # only use NAME and OP (== dot) tokens for type name
                        elif tokentype in (NAME, OP) and level == 1:
                            super.append(token)
                        # expressions in the base list are not supported
                    inherit = names
                cur_class = Class(class_name, inherit,
                                  lineno)
                if not stack:
                    outlineDict[class_name] = cur_class
                stack.append((cur_class, thisindent))
            elif token == '=':
                # get global variables
                spaces, firstword = getSpacesFirstWord(_line)
                if (len(spaces) == 0) and (_line.lstrip(spaces + firstword).lstrip().startswith('=')):
                    # it's a global variable
                    lineno, thisindent = start
                    outlineDict[firstword] = GlobalVariable(firstword, lineno)
    except:
        pass

    f.close()
    return outlineDict

Example 148

Project: portage Source File: main.py
def repoman_main(argv):
	config_root = os.environ.get("PORTAGE_CONFIGROOT")
	repoman_settings = portage.config(config_root=config_root, local_config=False)

	if repoman_settings.get("NOCOLOR", "").lower() in ("yes", "true") or \
		repoman_settings.get('TERM') == 'dumb' or \
		not sys.stdout.isatty():
		nocolor()

	options, arguments = parse_args(
		sys.argv, qahelp, repoman_settings.get("REPOMAN_DEFAULT_OPTS", ""))

	if options.version:
		print("Repoman", portage.VERSION)
		sys.exit(0)

	logger = logging.getLogger()

	if options.verbosity > 0:
		logger.setLevel(LOGLEVEL - 10 * options.verbosity)
	else:
		logger.setLevel(LOGLEVEL)

	if options.experimental_inherit == 'y':
		# This is experimental, so it's non-fatal.
		qawarnings.add("inherit.missing")

	# Set this to False when an extraordinary issue (generally
	# something other than a QA issue) makes it impossible to
	# commit (like if Manifest generation fails).
	can_force = ExtendedFuture(True)

	portdir, portdir_overlay, mydir = utilities.FindPortdir(repoman_settings)
	if portdir is None:
		sys.exit(1)

	myreporoot = os.path.basename(portdir_overlay)
	myreporoot += mydir[len(portdir_overlay):]

	# avoid a circular parameter repo_settings
	vcs_settings = VCSSettings(options, repoman_settings)

	repo_settings = RepoSettings(
		config_root, portdir, portdir_overlay,
		repoman_settings, vcs_settings, options, qawarnings)
	repoman_settings = repo_settings.repoman_settings

	# Now set repo_settings
	vcs_settings.repo_settings = repo_settings

	if 'digest' in repoman_settings.features and options.digest != 'n':
		options.digest = 'y'

	logging.debug("vcs: %s" % (vcs_settings.vcs,))
	logging.debug("repo config: %s" % (repo_settings.repo_config,))
	logging.debug("options: %s" % (options,))

	# It's confusing if these warnings are displayed without the user
	# being told which profile they come from, so disable them.
	env = os.environ.copy()
	env['FEATURES'] = env.get('FEATURES', '') + ' -unknown-features-warn'

	# Perform the main checks
	scanner = Scanner(repo_settings, myreporoot, config_root, options,
					vcs_settings, mydir, env)
	scanner.scan_pkgs(can_force)

	if options.if_modified == "y" and len(scanner.effective_scanlist) < 1:
		logging.warning("--if-modified is enabled, but no modified packages were found!")

	result = {
		# fail will be true if we have failed in at least one non-warning category
		'fail': 0,
		# warn will be true if we tripped any warnings
		'warn': 0,
		# full will be true if we should print a "repoman full" informational message
		'full': options.mode != 'full',
	}

	# early out for manifest generation
	if options.mode == "manifest":
		sys.exit(result['fail'])

	for x in qacats:
		if x not in vcs_settings.qatracker.fails:
			continue
		result['warn'] = 1
		if x not in qawarnings:
			result['fail'] = 1

	if result['fail'] or \
		(result['warn'] and not (options.quiet or options.mode == "scan")):
		result['full'] = 0

	commitmessage = None
	if options.commitmsg:
		commitmessage = options.commitmsg
	elif options.commitmsgfile:
		# we don't need the actual text of the commit message here
		# the filename will do for the next code block
		commitmessage = options.commitmsgfile

	# Save QA output so that it can be conveniently displayed
	# in $EDITOR while the user creates a commit message.
	# Otherwise, the user would not be able to see this output
	# once the editor has taken over the screen.
	qa_output = io.StringIO()
	style_file = ConsoleStyleFile(sys.stdout)
	if options.mode == 'commit' and \
		(not commitmessage or not commitmessage.strip()):
		style_file.write_listener = qa_output
	console_writer = StyleWriter(file=style_file, maxcol=9999)
	console_writer.style_listener = style_file.new_styles

	f = formatter.AbstractFormatter(console_writer)

	format_outputs = {
		'column': format_qa_output_column,
		'default': format_qa_output
	}

	format_output = format_outputs.get(
		options.output_style, format_outputs['default'])
	format_output(f, vcs_settings.qatracker.fails, result['full'],
		result['fail'], options, qawarnings)

	style_file.flush()
	del console_writer, f, style_file
	qa_output = qa_output.getvalue()
	qa_output = qa_output.splitlines(True)

	# output the results
	actions = Actions(repo_settings, options, scanner, vcs_settings)
	if actions.inform(can_force.get(), result):
		# perform any other actions
		actions.perform(qa_output)

	sys.exit(0)

Example 149

Project: docvert-python3 Source File: docvert.py
Function: process_conversion
def process_conversion(files=None, urls=None, pipeline_id=None, pipeline_type="pipelines", auto_pipeline_id=None, storage_type_name=docvert_storage.storage_type.memory_based, converter=converter_type.python_streaming_to_libreoffice, suppress_errors=False):
    if files is None and urls is None:
        raise docvert_exception.needs_files_or_urls()
    if pipeline_id is None:
        raise docvert_exception.unrecognised_pipeline("Unknown pipeline '%s'" % pipeline_id)
    storage = docvert_storage.get_storage(storage_type_name)

    def _title(name, files, data):
        filename = os.path.basename(name).replace('\\','-').replace('/','-').replace(':','-')
        if len(filename) == 0:
            filename = "docuement.odt"
        if filename in files:
            if data and hasattr(files[filename], 'read') and files[filename].getvalue() == data:
                return filename
            unique = 1
            potential_filename = filename
            while potential_filename in files:
                unique += 1
                if filename.count("."):
                    potential_filename = filename.replace(".", "%i." % unique, 1)
                else:
                    potential_filename = filename + str(unique)
            filename = potential_filename
        return filename

    for filename, data in files.items():
        storage.set_friendly_name(filename, filename)

    for url in urls:
        try:
            data = urllib.request.urlopen(url, None, http_timeout).read()
            doc_type = docuement_type.detect_docuement_type(data)
            if doc_type == docuement_type.types.html:
                data = html_to_opendocuement(data, url)
            filename = _title(url, files, data)
            storage.set_friendly_name(filename, "%s (%s)" % (filename, url))
            files[filename] = io.StringIO(data)
        except IOError as e:
            filename = _title(url, files, None)
            storage.set_friendly_name(filename, "%s (%s)" % (filename, url))
            files[filename] = Exception("Download error from %s: %s" % (url, e))

    for filename, data in files.items():
        if storage.default_docuement is None:
            storage.default_docuement = filename
        doc_type = docuement_type.detect_docuement_type(data)
        if doc_type == docuement_type.types.exception:
            storage.add("%s/index.txt" % filename, str(data))
        elif doc_type != docuement_type.types.oasis_open_docuement:
            try:
                data = generate_open_docuement(data, converter)
                doc_type = docuement_type.types.oasis_open_docuement
            except Exception as e:
                if not suppress_errors:
                    raise e
                storage.add("%s/index.txt" % filename, str(e))
        if doc_type == docuement_type.types.oasis_open_docuement:
            if pipeline_id == "open docuement": #reserved term, for when people want the Open Docuement file back directly. Don't bother loading pipeline.
                storage.add("%s/index.odt" % filename, data)
                thumbnail = opendocuement.extract_thumbnail(data)
                if thumbnail:
                    storage.add("%s/thumbnail.png" % filename, thumbnail)
            else:
                docuement_xml = opendocuement.extract_useful_open_docuement_files(data, storage, filename)
                storage.add("%s/opendocuement.xml" % filename, docuement_xml)
                process_pipeline(docuement_xml, pipeline_id, pipeline_type, auto_pipeline_id, storage, filename)
                storage.remove("%s/opendocuement.xml" % filename)
    return storage

Example 150

Project: codetransformer Source File: test_pretty.py
Function: test_a
def test_a(capsys):
    text = dedent(
        """
        def inc(a):
            b = a + 1
            return b
        """
    )
    expected = dedent(
        """\
        Module(
          body=[
            FunctionDef(
              name='inc',
              args=arguments(
                args=[
                  arg(
                    arg='a',
                    annotation=None,
                  ),
                ],
                vararg=None,
                kwonlyargs=[],
                kw_defaults=[],
                kwarg=None,
                defaults=[],
              ),
              body=[
                Assign(
                  targets=[
                    Name(id='b', ctx=Store()),
                  ],
                  value=BinOp(
                    left=Name(id='a', ctx=Load()),
                    op=Add(),
                    right=Num(1),
                  ),
                ),
                Return(
                  value=Name(id='b', ctx=Load()),
                ),
              ],
              decorator_list=[],
              returns=None,
            ),
          ],
        )
        """
    )

    a(text)
    stdout, stderr = capsys.readouterr()
    assert stdout == expected
    assert stderr == ''

    file_ = StringIO()
    a(text, file=file_)
    assert capsys.readouterr() == ('', '')

    result = file_.getvalue()
    assert result == expected
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4