io.StringIO

Here are the examples of the python api io.StringIO taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: s3ql
Source File: t3_verify.py
View license
def test_retrieve(backend, db):
    plain_backend = backend
    backend = ComprencBackend(b'schnorz', ('zlib', 6), plain_backend)

    # Create a few objects in db
    obj_ids = (22, 25, 30, 31)
    for id_ in obj_ids:
        db.execute('INSERT INTO objects (id, refcount, size) VALUES(?, ?, ?)',
                   (id_, 1, 27 * id_))

    # Object one will be missing in backend

    # Object two will have a checksum error in the data
    key = 's3ql_data_%d' % obj_ids[1]
    backend[key] = b'some data that will be broken on a data check'
    (raw, meta) = plain_backend.fetch(key)
    raw = bytearray(raw)
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    plain_backend.store(key, raw, meta)

    # Object three will have a checksum error in the metadata
    key = 's3ql_data_%d' % obj_ids[2]
    backend.store(key, b'some data that will be broken on a metadata check',
                  { 'meta-key1': 'some textual data that just increases',
                    'meta-key2': 'the metadata size so that we can tamper with it' })
    meta = plain_backend.lookup(key)
    raw = bytearray(meta['data'])
    assert len(raw) > 20
    raw[-10:-6] = b'forg'
    meta['data'] = raw
    plain_backend.update_meta(key, meta)

    # Object four will be ok
    backend['s3ql_data_%d' % obj_ids[3]] = b'some data that is well'

    # When using a single thread, we can fake the backend factory
    def backend_factory():
        return backend

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()

    with assert_logs('^Backend seems to have lost', count=1, level=logging.WARNING), \
         assert_logs('^Object %d is corrupted', count=1, level=logging.WARNING):
        verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh,
                                thread_count=1, full=False)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[2]

    missing_fh = io.StringIO()
    corrupted_fh = io.StringIO()
    with assert_logs('^Backend seems to have lost', count=1, level=logging.WARNING), \
         assert_logs('^Object %d is corrupted', count=2, level=logging.WARNING):
        verify.retrieve_objects(db, backend_factory, corrupted_fh, missing_fh,
                                thread_count=1, full=True)
    assert missing_fh.getvalue() == 's3ql_data_%d\n' % obj_ids[0]
    assert corrupted_fh.getvalue() == ('s3ql_data_%d\n'*2) % obj_ids[1:3]

Example 2

Project: nodewatcher
Source File: builder.py
View license
def build_image(result, profile):
    """
    Spawns the builder process for the specified firmware image.

    :param result: Destination build result
    :param profile: Device OpenWRT profile
    :return: A list of output firmware files
    """

    cfg = result.config

    with result.builder.connect() as builder:
        temp_path = builder.create_tempdir()

        # Prepare configuration files.
        cfg_path = os.path.join(temp_path, 'etc', 'config')
        for fname, content in cfg.items():
            if fname.startswith('_'):
                continue
            builder.write_file(os.path.join(cfg_path, fname), content)

        # Prepare user account files.
        from . import crypt
        passwd = io.StringIO()
        for account in cfg['_accounts'].get('users', {}).values():
            if account['password'] != '*':
                account['password'] = crypt.md5crypt(
                    account['password'],
                    os.urandom(6).encode('base_64').strip()
                )

            passwd.write('%(username)s:%(password)s:%(uid)d:%(gid)d:%(username)s:%(home)s:%(shell)s\n' % account)
        builder.write_file(os.path.join(temp_path, 'etc', 'passwd'), passwd.getvalue().encode('ascii'))

        # Prepare the banner file if configured.
        if cfg.get('_banner', None):
            banner = io.StringIO()
            banner.write(cfg['_banner'])
            builder.write_file(os.path.join(temp_path, 'etc', 'banner'), banner.getvalue().encode('ascii'))

        # Prepare the sysctl configuration.
        if cfg.get('_sysctl', None):
            sysctl = io.StringIO()
            for key, value in cfg['_sysctl'].items():
                sysctl.write('%s=%s\n' % (key, value))
            builder.write_file(os.path.join(temp_path, 'etc', 'sysctl.conf'), sysctl.getvalue().encode('ascii'))

        # Prepare the routing table mappings.
        tables = io.StringIO()
        for identifier, name in cfg['_routing_tables'].items():
            tables.write('%s\t%s\n' % (identifier, name))
        builder.write_file(os.path.join(temp_path, 'etc', 'iproute2', 'rt_tables'), tables.getvalue().encode('ascii'))

        # Prepare the crypto objects.
        ssh_authorized_keys = io.StringIO()
        for crypto_object in cfg['_crypto']:
            # Populate SSH authorized keys.
            if crypto_object['type'] == cgm_base.PlatformCryptoManager.SSH_AUTHORIZED_KEY:
                ssh_authorized_keys.write('%s\n' % crypto_object['content'])

            if not crypto_object['path']:
                continue

            content = crypto_object['content']
            if crypto_object['decoder'] == cgm_base.PlatformCryptoManager.BASE64:
                content = base64.b64decode(content)
            else:
                content = content.encode('ascii')

            builder.write_file(os.path.join(temp_path, crypto_object['path'][1:]), content)

        builder.write_file(
            os.path.join(temp_path, 'etc', 'dropbear', 'authorized_keys'),
            ssh_authorized_keys.getvalue().encode('ascii'),
            mode=0600,
        )

        builder.chmod(os.path.join(temp_path, 'etc', 'dropbear'), 0755)

        # Prepare any custom files.
        for path, custom_file in cfg['_files'].items():
            if path[0] == '/':
                path = path[1:]

            builder.write_file(
                os.path.join(temp_path, path),
                custom_file['content'].encode('utf8'),
                mode=custom_file['mode'],
            )

        # Clean the build first to prevent accidentally taking build results from a previous build.
        builder.call('make', 'clean')
        # Ensure build dir paths are cleaned. This is required because for some architectures, there
        # are leftovers even after a 'make clean'.
        builder.call('rm', '-rf', 'build_dir/target-*/linux-*/{tmp,root.squashfs}', quote=False)
        # Ensure the prerequisite check is skipped.
        builder.call('touch', 'staging_dir/host/.prereq-build')

        # Run the build system and wait for its completion.
        result.build_log = builder.call(
            'make', 'image',
            'PROFILE=%s' % profile["name"],
            'FILES=%s' % temp_path,
            'PACKAGES=%s' % " ".join(cfg['_packages']),
            'FORCE=1'
        )

        # Determine the location of output files.
        output_locations = builder.list_dir('bin')

        # Collect the output files and return them.
        fw_files = []
        for fw_file in profile['files']:
            matched = False
            for output_location in output_locations:
                for output_filename in builder.list_dir(os.path.join('bin', output_location)):
                    if fnmatch.fnmatch(output_filename, fw_file):
                        try:
                            fw_files.append((
                                output_filename,
                                builder.read_result_file(os.path.join('bin', output_location, output_filename))
                            ))
                            matched = True
                        except IOError:
                            continue

            if not matched:
                raise cgm_exceptions.BuildError('Output file \'%s\' not found!' % fw_file)

        return fw_files

Example 3

Project: scikit-bio
Source File: test_driver.py
View license
    def test_invalid_input_phylogenetic(self):
        # otu_ids not provided
        self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
                          self.table1, list('ABC'), tree=self.tree1)
        self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
                          self.table1, list('ABC'), tree=self.tree1)
        # tree not provided
        self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
                          self.table1, list('ABC'), otu_ids=self.oids1)
        self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
                          self.table1, list('ABC'), otu_ids=self.oids1)

        # tree has duplicated tip ids
        t = TreeNode.read(
            io.StringIO(
                '(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
                '0.75,OTU5:0.75):1.25):0.0)root;'))
        counts = [1, 2, 3]
        otu_ids = ['OTU1', 'OTU2', 'OTU3']
        self.assertRaises(DuplicateNodeError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(DuplicateNodeError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)

        # unrooted tree as input
        t = TreeNode.read(io.StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
                                      'OTU4:0.7);'))
        counts = [1, 2, 3]
        otu_ids = ['OTU1', 'OTU2', 'OTU3']
        self.assertRaises(ValueError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(ValueError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)

        # otu_ids has duplicated ids
        t = TreeNode.read(
            io.StringIO(
                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
                '0.75,OTU2:0.75):1.25):0.0)root;'))
        counts = [1, 2, 3]
        otu_ids = ['OTU1', 'OTU2', 'OTU2']
        self.assertRaises(ValueError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(ValueError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)

        # count and OTU vectors are not equal length
        t = TreeNode.read(
            io.StringIO(
                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
                '0.75,OTU2:0.75):1.25):0.0)root;'))
        counts = [1, 2, 3]
        otu_ids = ['OTU1', 'OTU2']
        self.assertRaises(ValueError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(ValueError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)
        t = TreeNode.read(
            io.StringIO(
                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
                '0.75,OTU2:0.75):1.25):0.0)root;'))
        counts = [1, 2]
        otu_ids = ['OTU1', 'OTU2', 'OTU3']
        self.assertRaises(ValueError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(ValueError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)

        # tree with no branch lengths
        t = TreeNode.read(
            io.StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
        counts = [1, 2, 3]
        otu_ids = ['OTU1', 'OTU2', 'OTU3']
        self.assertRaises(ValueError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(ValueError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)

        # tree missing some branch lengths
        t = TreeNode.read(
            io.StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
                        '0.75,OTU5:0.75):1.25):0.0)root;'))
        counts = [1, 2, 3]
        otu_ids = ['OTU1', 'OTU2', 'OTU3']
        self.assertRaises(ValueError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(ValueError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)

        # some otu_ids not present in tree
        t = TreeNode.read(
            io.StringIO(
                '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
                '0.75,OTU5:0.75):1.25):0.0)root;'))
        counts = [1, 2, 3]
        otu_ids = ['OTU1', 'OTU2', 'OTU42']
        self.assertRaises(MissingNodeError, beta_diversity,
                          'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
        self.assertRaises(MissingNodeError, beta_diversity,
                          'unweighted_unifrac', counts, otu_ids=otu_ids,
                          tree=t)

Example 4

Project: cgstudiomap
Source File: test_copy.py
View license
    @skip_if_no_iobase
    def test_copy_expert_textiobase(self):
        self.conn.set_client_encoding('latin1')
        self._create_temp_table()  # the above call closed the xn

        if sys.version_info[0] < 3:
            abin = ''.join(map(chr, range(32, 127) + range(160, 256)))
            abin = abin.decode('latin1')
            about = abin.replace('\\', '\\\\')

        else:
            abin = bytes(range(32, 127) + range(160, 256)).decode('latin1')
            about = abin.replace('\\', '\\\\')

        import io
        f = io.StringIO()
        f.write(about)
        f.seek(0)

        curs = self.conn.cursor()
        psycopg2.extensions.register_type(
            psycopg2.extensions.UNICODE, curs)

        curs.copy_expert('COPY tcopy (data) FROM STDIN', f)
        curs.execute("select data from tcopy;")
        self.assertEqual(curs.fetchone()[0], abin)

        f = io.StringIO()
        curs.copy_expert('COPY tcopy (data) TO STDOUT', f)
        f.seek(0)
        self.assertEqual(f.readline().rstrip(), about)

        # same tests with setting size
        f = io.StringIO()
        f.write(about)
        f.seek(0)
        exp_size = 123
        # hack here to leave file as is, only check size when reading
        real_read = f.read
        def read(_size, f=f, exp_size=exp_size):
            self.assertEqual(_size, exp_size)
            return real_read(_size)
        f.read = read
        curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size)
        curs.execute("select data from tcopy;")
        self.assertEqual(curs.fetchone()[0], abin)

Example 5

Project: oq-engine
Source File: hazard_writers_test.py
View license
    @classmethod
    def setUpClass(cls):
        cls.expected_xml = io.StringIO(u"""\
<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
  <uniformHazardSpectra sourceModelTreePath="foo" gsimTreePath="bar" investigationTime="50.0" poE="0.1">
    <periods>0.0 0.025 0.1 0.2</periods>
    <uhs>
      <gml:Point>
        <gml:pos>0.0 0.0</gml:pos>
      </gml:Point>
      <IMLs>0.3 0.5 0.2 0.1</IMLs>
    </uhs>
    <uhs>
      <gml:Point>
        <gml:pos>1.0 1.0</gml:pos>
      </gml:Point>
      <IMLs>0.4 0.6 0.3 0.05</IMLs>
    </uhs>
  </uniformHazardSpectra>
</nrml>
""")
        cls.expected_mean_xml = io.StringIO(u"""\
<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
  <uniformHazardSpectra statistics="mean" investigationTime="50.0" poE="0.1">
    <periods>0.0 0.025 0.1 0.2</periods>
    <uhs>
      <gml:Point>
        <gml:pos>0.0 0.0</gml:pos>
      </gml:Point>
      <IMLs>0.3 0.5 0.2 0.1</IMLs>
    </uhs>
    <uhs>
      <gml:Point>
        <gml:pos>1.0 1.0</gml:pos>
      </gml:Point>
      <IMLs>0.4 0.6 0.3 0.05</IMLs>
    </uhs>
  </uniformHazardSpectra>
</nrml>
""")
        cls.expected_quantile_xml = io.StringIO(u"""\
<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
  <uniformHazardSpectra statistics="quantile" quantileValue="0.95" investigationTime="50.0" poE="0.1">
    <periods>0.0 0.025 0.1 0.2</periods>
    <uhs>
      <gml:Point>
        <gml:pos>0.0 0.0</gml:pos>
      </gml:Point>
      <IMLs>0.3 0.5 0.2 0.1</IMLs>
    </uhs>
    <uhs>
      <gml:Point>
        <gml:pos>1.0 1.0</gml:pos>
      </gml:Point>
      <IMLs>0.4 0.6 0.3 0.05</IMLs>
    </uhs>
  </uniformHazardSpectra>
</nrml>
""")
        cls.data = [
            UHSData(Location(0.0, 0.0), [0.3, 0.5, 0.2, 0.1]),
            UHSData(Location(1.0, 1.0), [0.4, 0.6, 0.3, 0.05]),
        ]

Example 6

Project: oq-risklib
Source File: hazard_writers_test.py
View license
    @classmethod
    def setUpClass(cls):
        cls.expected_xml = io.StringIO(u"""\
<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
  <uniformHazardSpectra sourceModelTreePath="foo" gsimTreePath="bar" investigationTime="50.0" poE="0.1">
    <periods>0.0 0.025 0.1 0.2</periods>
    <uhs>
      <gml:Point>
        <gml:pos>0.0 0.0</gml:pos>
      </gml:Point>
      <IMLs>0.3 0.5 0.2 0.1</IMLs>
    </uhs>
    <uhs>
      <gml:Point>
        <gml:pos>1.0 1.0</gml:pos>
      </gml:Point>
      <IMLs>0.4 0.6 0.3 0.05</IMLs>
    </uhs>
  </uniformHazardSpectra>
</nrml>
""")
        cls.expected_mean_xml = io.StringIO(u"""\
<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
  <uniformHazardSpectra statistics="mean" investigationTime="50.0" poE="0.1">
    <periods>0.0 0.025 0.1 0.2</periods>
    <uhs>
      <gml:Point>
        <gml:pos>0.0 0.0</gml:pos>
      </gml:Point>
      <IMLs>0.3 0.5 0.2 0.1</IMLs>
    </uhs>
    <uhs>
      <gml:Point>
        <gml:pos>1.0 1.0</gml:pos>
      </gml:Point>
      <IMLs>0.4 0.6 0.3 0.05</IMLs>
    </uhs>
  </uniformHazardSpectra>
</nrml>
""")
        cls.expected_quantile_xml = io.StringIO(u"""\
<?xml version='1.0' encoding='UTF-8'?>
<nrml xmlns:gml="http://www.opengis.net/gml" xmlns="http://openquake.org/xmlns/nrml/0.4">
  <uniformHazardSpectra statistics="quantile" quantileValue="0.95" investigationTime="50.0" poE="0.1">
    <periods>0.0 0.025 0.1 0.2</periods>
    <uhs>
      <gml:Point>
        <gml:pos>0.0 0.0</gml:pos>
      </gml:Point>
      <IMLs>0.3 0.5 0.2 0.1</IMLs>
    </uhs>
    <uhs>
      <gml:Point>
        <gml:pos>1.0 1.0</gml:pos>
      </gml:Point>
      <IMLs>0.4 0.6 0.3 0.05</IMLs>
    </uhs>
  </uniformHazardSpectra>
</nrml>
""")
        cls.data = [
            UHSData(Location(0.0, 0.0), [0.3, 0.5, 0.2, 0.1]),
            UHSData(Location(1.0, 1.0), [0.4, 0.6, 0.3, 0.05]),
        ]

Example 7

Project: palladium
Source File: test_server.py
View license
    def test_listen(self, stream):
        io_in = io.StringIO()
        io_out = io.StringIO()
        io_err = io.StringIO()
        lines = [
            '[{"id": 1, "color": "blue", "length": 1.0}]\n',
            '[{"id": 1, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
            '[{"id": 1, "color": "blue", "length": 1.0}, {"id": 2, "color": "{\\"a\\": 1, \\"b\\": 2}", "length": 1.0}]\n',
        ]
        for line in lines:
            io_in.write(line)
 
        io_in.write('EXIT\n')
        io_in.seek(0)
        predict = stream.predict_service.predict
        predict.side_effect = (
            lambda model, samples, **params:
            np.array([{'result': 1}] * len(samples))
            )
        stream_thread = Thread(
            target=stream.listen(io_in, io_out, io_err))
        stream_thread.start()
        stream_thread.join()
        io_out.seek(0)
        io_err.seek(0)
        assert len(io_err.read()) == 0
        assert io_out.read() == (
            ('[{"result":1}]\n' * 2) + ('[{"result":1},{"result":1}]\n'))
        assert predict.call_count == 3
        # check if the correct arguments are passed to predict call
        assert predict.call_args_list[0][0][1] == np.array([
            {'id': 1, 'color': 'blue', 'length': 1.0}])
        assert predict.call_args_list[1][0][1] == np.array([
            {'id': 1, 'color': '{"a": 1, "b": 2}', 'length': 1.0}])
        assert (predict.call_args_list[2][0][1] == np.array([
            {'id': 1, 'color': 'blue', 'length': 1.0},
            {'id': 2, 'color': '{"a": 1, "b": 2}', 'length': 1.0},
            ])).all()

        # check if string representation of attribute can be converted to json
        assert ujson.loads(predict.call_args_list[1][0][1][0]['color']) == {
            "a": 1, "b": 2}

Example 8

Project: bumpversion
Source File: __init__.py
View license
def main(original_args=None):

    positionals, args = split_args_in_optional_and_positional(
      sys.argv[1:] if original_args is None else original_args
    )

    if len(positionals[1:]) > 2:
        warnings.warn("Giving multiple files on the command line will be deprecated, please use [bumpversion:file:...] in a config file.", PendingDeprecationWarning)

    parser1 = argparse.ArgumentParser(add_help=False)

    parser1.add_argument(
        '--config-file', metavar='FILE',
        default=argparse.SUPPRESS, required=False,
        help='Config file to read most of the variables from (default: .bumpversion.cfg)')

    parser1.add_argument(
        '--verbose', action='count', default=0,
        help='Print verbose logging to stderr', required=False)

    parser1.add_argument(
        '--list', action='store_true', default=False,
        help='List machine readable information', required=False)

    parser1.add_argument(
        '--allow-dirty', action='store_true', default=False,
        help="Don't abort if working directory is dirty", required=False)

    known_args, remaining_argv = parser1.parse_known_args(args)

    logformatter = logging.Formatter('%(message)s')

    if len(logger.handlers) == 0:
        ch = logging.StreamHandler(sys.stderr)
        ch.setFormatter(logformatter)
        logger.addHandler(ch)

    if len(logger_list.handlers) == 0:
       ch2 = logging.StreamHandler(sys.stdout)
       ch2.setFormatter(logformatter)
       logger_list.addHandler(ch2)

    if known_args.list:
          logger_list.setLevel(1)

    log_level = {
        0: logging.WARNING,
        1: logging.INFO,
        2: logging.DEBUG,
    }.get(known_args.verbose, logging.DEBUG)

    logger.setLevel(log_level)

    logger.debug("Starting {}".format(DESCRIPTION))

    defaults = {}
    vcs_info = {}

    for vcs in VCS:
        if vcs.is_usable():
            vcs_info.update(vcs.latest_tag_info())

    if 'current_version' in vcs_info:
        defaults['current_version'] = vcs_info['current_version']

    config = RawConfigParser('')

    # don't transform keys to lowercase (which would be the default)
    config.optionxform = lambda option: option

    config.add_section('bumpversion')

    explicit_config = hasattr(known_args, 'config_file')

    if explicit_config:
        config_file = known_args.config_file
    elif not os.path.exists('.bumpversion.cfg') and \
            os.path.exists('setup.cfg'):
        config_file = 'setup.cfg'
    else:
        config_file = '.bumpversion.cfg'

    config_file_exists = os.path.exists(config_file)

    part_configs = {}

    files = []

    if config_file_exists:

        logger.info("Reading config file {}:".format(config_file))
        logger.info(io.open(config_file, 'rt', encoding='utf-8').read())

        config.readfp(io.open(config_file, 'rt', encoding='utf-8'))

        log_config = StringIO()
        config.write(log_config)

        if 'files' in dict(config.items("bumpversion")):
            warnings.warn(
                "'files =' configuration is will be deprecated, please use [bumpversion:file:...]",
                PendingDeprecationWarning
            )

        defaults.update(dict(config.items("bumpversion")))

        for listvaluename in ("serialize",):
            try:
                value = config.get("bumpversion", listvaluename)
                defaults[listvaluename] = list(filter(None, (x.strip() for x in value.splitlines())))
            except NoOptionError:
                pass  # no default value then ;)

        for boolvaluename in ("commit", "tag", "dry_run"):
            try:
                defaults[boolvaluename] = config.getboolean(
                    "bumpversion", boolvaluename)
            except NoOptionError:
                pass  # no default value then ;)

        for section_name in config.sections():

            section_name_match = re.compile("^bumpversion:(file|part):(.+)").match(section_name)

            if not section_name_match:
                continue

            section_prefix, section_value = section_name_match.groups()

            section_config = dict(config.items(section_name))

            if section_prefix == "part":

                ThisVersionPartConfiguration = NumericVersionPartConfiguration

                if 'values' in section_config:
                    section_config['values'] = list(filter(None, (x.strip() for x in section_config['values'].splitlines())))
                    ThisVersionPartConfiguration = ConfiguredVersionPartConfiguration

                part_configs[section_value] = ThisVersionPartConfiguration(**section_config)

            elif section_prefix == "file":

                filename = section_value

                if 'serialize' in section_config:
                    section_config['serialize'] = list(filter(None, (x.strip() for x in section_config['serialize'].splitlines())))

                section_config['part_configs'] = part_configs

                if not 'parse' in section_config:
                    section_config['parse'] = defaults.get("parse", '(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')

                if not 'serialize' in section_config:
                    section_config['serialize'] = defaults.get('serialize', [str('{major}.{minor}.{patch}')])

                if not 'search' in section_config:
                    section_config['search'] = defaults.get("search", '{current_version}')

                if not 'replace' in section_config:
                    section_config['replace'] = defaults.get("replace", '{new_version}')

                files.append(ConfiguredFile(filename, VersionConfig(**section_config)))

    else:
        message = "Could not read config file at {}".format(config_file)
        if explicit_config:
            raise argparse.ArgumentTypeError(message)
        else:
            logger.info(message)

    parser2 = argparse.ArgumentParser(prog='bumpversion', add_help=False, parents=[parser1])
    parser2.set_defaults(**defaults)

    parser2.add_argument('--current-version', metavar='VERSION',
                         help='Version that needs to be updated', required=False)
    parser2.add_argument('--parse', metavar='REGEX',
                         help='Regex parsing the version string',
                         default=defaults.get("parse", '(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'))
    parser2.add_argument('--serialize', metavar='FORMAT',
                         action=DiscardDefaultIfSpecifiedAppendAction,
                         help='How to format what is parsed back to a version',
                         default=defaults.get("serialize", [str('{major}.{minor}.{patch}')]))
    parser2.add_argument('--search', metavar='SEARCH',
                         help='Template for complete string to search',
                         default=defaults.get("search", '{current_version}'))
    parser2.add_argument('--replace', metavar='REPLACE',
                         help='Template for complete string to replace',
                         default=defaults.get("replace", '{new_version}'))

    known_args, remaining_argv = parser2.parse_known_args(args)

    defaults.update(vars(known_args))

    assert type(known_args.serialize) == list

    context = dict(list(time_context.items()) + list(prefixed_environ().items()) + list(vcs_info.items()))

    try:
        vc = VersionConfig(
            parse=known_args.parse,
            serialize=known_args.serialize,
            search=known_args.search,
            replace=known_args.replace,
            part_configs=part_configs,
        )
    except sre_constants.error as e:
        sys.exit(1)

    current_version = vc.parse(known_args.current_version) if known_args.current_version else None

    new_version = None

    if not 'new_version' in defaults and known_args.current_version:
        try:
            if current_version and len(positionals) > 0:
                logger.info("Attempting to increment part '{}'".format(positionals[0]))
                new_version = current_version.bump(positionals[0], vc.order())
                logger.info("Values are now: " + keyvaluestring(new_version._values))
                defaults['new_version'] = vc.serialize(new_version, context)
        except MissingValueForSerializationException as e:
            logger.info("Opportunistic finding of new_version failed: " + e.message)
        except IncompleteVersionRepresenationException as e:
            logger.info("Opportunistic finding of new_version failed: " + e.message)
        except KeyError as e:
            logger.info("Opportunistic finding of new_version failed")

    parser3 = argparse.ArgumentParser(
        prog='bumpversion',
        description=DESCRIPTION,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        conflict_handler='resolve',
        parents=[parser2],
    )

    parser3.set_defaults(**defaults)

    parser3.add_argument('--current-version', metavar='VERSION',
                         help='Version that needs to be updated',
                         required=not 'current_version' in defaults)
    parser3.add_argument('--dry-run', '-n', action='store_true',
                         default=False, help="Don't write any files, just pretend.")
    parser3.add_argument('--new-version', metavar='VERSION',
                         help='New version that should be in the files',
                         required=not 'new_version' in defaults)

    commitgroup = parser3.add_mutually_exclusive_group()

    commitgroup.add_argument('--commit', action='store_true', dest="commit",
                             help='Commit to version control', default=defaults.get("commit", False))
    commitgroup.add_argument('--no-commit', action='store_false', dest="commit",
                             help='Do not commit to version control', default=argparse.SUPPRESS)

    taggroup = parser3.add_mutually_exclusive_group()

    taggroup.add_argument('--tag', action='store_true', dest="tag", default=defaults.get("tag", False),
                          help='Create a tag in version control')
    taggroup.add_argument('--no-tag', action='store_false', dest="tag",
                          help='Do not create a tag in version control', default=argparse.SUPPRESS)

    parser3.add_argument('--tag-name', metavar='TAG_NAME',
                         help='Tag name (only works with --tag)',
                         default=defaults.get('tag_name', 'v{new_version}'))

    parser3.add_argument('--message', '-m', metavar='COMMIT_MSG',
                         help='Commit message',
                         default=defaults.get('message', 'Bump version: {current_version} → {new_version}'))


    file_names = []
    if 'files' in defaults:
        assert defaults['files'] != None
        file_names = defaults['files'].split(' ')

    parser3.add_argument('part',
                         help='Part of the version to be bumped.')
    parser3.add_argument('files', metavar='file',
                         nargs='*',
                         help='Files to change', default=file_names)

    args = parser3.parse_args(remaining_argv + positionals)

    if args.dry_run:
        logger.info("Dry run active, won't touch any files.")
    
    if args.new_version:
        new_version = vc.parse(args.new_version)

    logger.info("New version will be '{}'".format(args.new_version))

    file_names = file_names or positionals[1:]

    for file_name in file_names:
        files.append(ConfiguredFile(file_name, vc))

    for vcs in VCS:
        if vcs.is_usable():
            try:
                vcs.assert_nondirty()
            except WorkingDirectoryIsDirtyException as e:
                if not defaults['allow_dirty']:
                    logger.warn(
                        "{}\n\nUse --allow-dirty to override this if you know what you're doing.".format(e.message))
                    raise
            break
        else:
            vcs = None

    # make sure files exist and contain version string

    logger.info("Asserting files {} contain the version string:".format(", ".join([str(f) for f in files])))

    for f in files:
        f.should_contain_version(current_version, context)

    # change version string in files
    for f in files:
        f.replace(current_version, new_version, context, args.dry_run)

    commit_files = [f.path for f in files]

    config.set('bumpversion', 'new_version', args.new_version)

    for key, value in config.items('bumpversion'):
        logger_list.info("{}={}".format(key, value))

    config.remove_option('bumpversion', 'new_version')

    config.set('bumpversion', 'current_version', args.new_version)

    new_config = StringIO()

    try:
        write_to_config_file = (not args.dry_run) and config_file_exists

        logger.info("{} to config file {}:".format(
            "Would write" if not write_to_config_file else "Writing",
            config_file,
        ))

        config.write(new_config)
        logger.info(new_config.getvalue())

        if write_to_config_file:
            with io.open(config_file, 'wb') as f:
                f.write(new_config.getvalue().encode('utf-8'))

    except UnicodeEncodeError:
        warnings.warn(
            "Unable to write UTF-8 to config file, because of an old configparser version. "
            "Update with `pip install --upgrade configparser`."
        )

    if config_file_exists:
        commit_files.append(config_file)

    if not vcs:
        return

    assert vcs.is_usable(), "Did find '{}' unusable, unable to commit.".format(vcs.__name__)

    do_commit = (not args.dry_run) and args.commit
    do_tag = (not args.dry_run) and args.tag

    logger.info("{} {} commit".format(
        "Would prepare" if not do_commit else "Preparing",
        vcs.__name__,
    ))

    for path in commit_files:
        logger.info("{} changes in file '{}' to {}".format(
            "Would add" if not do_commit else "Adding",
            path,
            vcs.__name__,
        ))

        if do_commit:
            vcs.add_path(path)

    vcs_context = {
        "current_version": args.current_version,
        "new_version": args.new_version,
    }
    vcs_context.update(time_context)
    vcs_context.update(prefixed_environ())

    commit_message = args.message.format(**vcs_context)

    logger.info("{} to {} with message '{}'".format(
        "Would commit" if not do_commit else "Committing",
        vcs.__name__,
        commit_message,
    ))

    if do_commit:
        vcs.commit(message=commit_message)

    tag_name = args.tag_name.format(**vcs_context)
    logger.info("{} '{}' in {}".format(
        "Would tag" if not do_tag else "Tagging",
        tag_name,
        vcs.__name__
    ))

    if do_tag:
        vcs.tag(tag_name)

Example 9

Project: paramnormal
Source File: plot_directive.py
View license
def run_code(code, code_path, ns=None, function_name=None):
    """
    Import a Python module from a path, and run the function given by
    name, if function_name is not None.
    """

    # Change the working directory to the directory of the example, so
    # it can get at its data files, if any.  Add its path to sys.path
    # so it can import any helper modules sitting beside it.
    if six.PY2:
        pwd = os.getcwdu()
    else:
        pwd = os.getcwd()
    old_sys_path = list(sys.path)
    if setup.config.plot_working_directory is not None:
        try:
            os.chdir(setup.config.plot_working_directory)
        except OSError as err:
            raise OSError(str(err) + '\n`plot_working_directory` option in'
                          'Sphinx configuration file must be a valid '
                          'directory path')
        except TypeError as err:
            raise TypeError(str(err) + '\n`plot_working_directory` option in '
                            'Sphinx configuration file must be a string or '
                            'None')
        sys.path.insert(0, setup.config.plot_working_directory)
    elif code_path is not None:
        dirname = os.path.abspath(os.path.dirname(code_path))
        os.chdir(dirname)
        sys.path.insert(0, dirname)

    # Reset sys.argv
    old_sys_argv = sys.argv
    sys.argv = [code_path]

    # Redirect stdout
    stdout = sys.stdout
    if six.PY3:
        sys.stdout = io.StringIO()
    else:
        sys.stdout = cStringIO.StringIO()

    # Assign a do-nothing print function to the namespace.  There
    # doesn't seem to be any other way to provide a way to (not) print
    # that works correctly across Python 2 and 3.
    def _dummy_print(*arg, **kwarg):
        pass

    try:
        try:
            code = unescape_doctest(code)
            if ns is None:
                ns = {}
            if not ns:
                if setup.config.plot_pre_code is None:
                    six.exec_(six.text_type("import numpy as np\n" +
                    "from matplotlib import pyplot as plt\n"), ns)
                else:
                    six.exec_(six.text_type(setup.config.plot_pre_code), ns)
            ns['print'] = _dummy_print
            if "__main__" in code:
                six.exec_("__name__ = '__main__'", ns)
            code = remove_coding(code)
            six.exec_(code, ns)
            if function_name is not None:
                six.exec_(function_name + "()", ns)
        except (Exception, SystemExit) as err:
            raise PlotError(traceback.format_exc())
    finally:
        os.chdir(pwd)
        sys.argv = old_sys_argv
        sys.path[:] = old_sys_path
        sys.stdout = stdout
    return ns

Example 10

Project: diazo
Source File: test_diazo.py
View license
    def testAll(self):
        self.errors = BytesIO()
        config = configparser.ConfigParser()
        config.read([defaultsfn, os.path.join(self.testdir, "options.cfg")])

        themefn = None
        if config.get('diazotest', 'theme'):
            themefn = os.path.join(self.testdir, config.get('diazotest',
                                                            'theme'))
        contentfn = os.path.join(self.testdir, "content.html")
        rulesfn = os.path.join(self.testdir, "rules.xml")
        xpathsfn = os.path.join(self.testdir, "xpaths.txt")
        xslfn = os.path.join(self.testdir, "compiled.xsl")
        outputfn = os.path.join(self.testdir, "output.html")

        xsl_params = {}
        extra_params = config.get('diazotest', 'extra-params')
        if extra_params:
            for token in extra_params.split(' '):
                token_split = token.split(':')
                xsl_params[token_split[0]] = len(token_split) > 1 and \
                    token_split[1] or None

        if not os.path.exists(rulesfn):
            return

        contentdoc = etree.parse(source=contentfn, base_url=contentfn,
                                 parser=etree.HTMLParser())

        # Make a compiled version
        theme_parser = etree.HTMLParser()
        ct = diazo.compiler.compile_theme(
            rules=rulesfn,
            theme=themefn,
            parser=theme_parser,
            absolute_prefix=config.get('diazotest', 'absolute-prefix'),
            indent=config.getboolean('diazotest', 'pretty-print'),
            xsl_params=xsl_params,
        )

        # Serialize / parse the theme - this can catch problems with escaping.
        cts = etree.tostring(ct, encoding='unicode')
        parser = etree.XMLParser()
        etree.fromstring(cts, parser=parser)

        # Compare to previous version
        if os.path.exists(xslfn):
            with open(xslfn) as f:
                old = f.read()
            new = cts
            if old != new:
                if self.writefiles:
                    with open(xslfn + '.old', 'w') as f:
                        f.write(old)
                if self.warnings:
                    print("WARNING:", "compiled.xsl has CHANGED")
                    for line in difflib.unified_diff(old.split(u'\n'),
                                                     new.split(u'\n'),
                                                     xslfn, 'now'):
                        print(line)

        # Write the compiled xsl out to catch unexpected changes
        if self.writefiles:
            with open(xslfn, 'w') as f:
                f.write(cts)

        # Apply the compiled version, then test against desired output
        theme_parser.resolvers.add(diazo.run.RunResolver(self.testdir))
        processor = etree.XSLT(ct)
        params = {}
        params['path'] = "'%s'" % config.get('diazotest', 'path')

        for key in xsl_params:
            try:
                params[key] = quote_param(config.get('diazotest', key))
            except configparser.NoOptionError:
                pass

        result = processor(contentdoc, **params)

        # Read the whole thing to strip off xhtml namespace.
        # If we had xslt 2.0 then we could use xpath-default-namespace.
        self.themed_string = str(result)
        self.themed_content = etree.ElementTree(
            file=StringIO(self.themed_string), parser=etree.HTMLParser())

        # remove the extra meta content type

        metas = self.themed_content.xpath(
            "/html/head/meta[@http-equiv='Content-Type']")
        if metas:
            meta = metas[0]
            meta.getparent().remove(meta)

        if os.path.exists(xpathsfn):
            with open(xpathsfn) as f:
                for xpath in f.readlines():
                    # Read the XPaths from the file, skipping blank lines and
                    # comments
                    this_xpath = xpath.strip()
                    if not this_xpath or this_xpath[0] == '#':
                        continue
                    assert self.themed_content.xpath(this_xpath), "%s: %s" % (
                        xpathsfn, this_xpath)

        # Compare to previous version
        if os.path.exists(outputfn):
            with open(outputfn) as f:
                old = f.read()
            new = self.themed_string
            if not xml_compare(
                    etree.fromstring(old.strip()),
                    etree.fromstring(new.strip())):
                # if self.writefiles:
                #    open(outputfn + '.old', 'w').write(old)
                for line in difflib.unified_diff(old.split(u'\n'),
                                                 new.split(u'\n'),
                                                 outputfn, 'now'):
                    print(line)
                assert old == new, "output.html has CHANGED"

        # Write out the result to catch unexpected changes
        if self.writefiles:
            with open(outputfn, 'w') as f:
                f.write(self.themed_string)

Example 11

Project: pretix
Source File: orderlist.py
View license
    def render(self, form_data: dict):
        output = io.StringIO()
        tz = pytz.timezone(self.event.settings.timezone)
        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")

        qs = self.event.orders.all().select_related('invoice_address')
        if form_data['paid_only']:
            qs = qs.filter(status=Order.STATUS_PAID)
        tax_rates = self._get_all_tax_rates(qs)

        headers = [
            _('Order code'), _('Order total'), _('Status'), _('Email'), _('Order date'),
            _('Company'), _('Name'), _('Address'), _('ZIP code'), _('City'), _('Country'), _('VAT ID'),
            _('Payment date'), _('Payment type'), _('Payment method fee')
        ]

        for tr in tax_rates:
            headers += [
                _('Gross at {rate} % tax').format(rate=tr),
                _('Net at {rate} % tax').format(rate=tr),
                _('Tax value at {rate} % tax').format(rate=tr),
            ]

        writer.writerow(headers)

        provider_names = {}
        responses = register_payment_providers.send(self.event)
        for rec, response in responses:
            provider = response(self.event)
            provider_names[provider.identifier] = provider.verbose_name

        sum_cache = {
            (o['order__id'], o['tax_rate']): o for o in
            OrderPosition.objects.values('tax_rate', 'order__id').order_by().annotate(
                taxsum=Sum('tax_value'), grosssum=Sum('price')
            )
        }

        for order in qs.order_by('datetime'):
            row = [
                order.code,
                str(order.total),
                order.get_status_display(),
                order.email,
                order.datetime.astimezone(tz).strftime('%Y-%m-%d'),
            ]
            try:
                row += [
                    order.invoice_address.company,
                    order.invoice_address.name,
                    order.invoice_address.street,
                    order.invoice_address.zipcode,
                    order.invoice_address.city,
                    order.invoice_address.country,
                    order.invoice_address.vat_id,
                ]
            except InvoiceAddress.DoesNotExist:
                row += ['', '', '', '', '', '', '']

            row += [
                order.payment_date.astimezone(tz).strftime('%Y-%m-%d') if order.payment_date else '',
                provider_names.get(order.payment_provider, order.payment_provider),
                str(order.payment_fee)
            ]

            for tr in tax_rates:
                taxrate_values = sum_cache.get((order.id, tr), {'grosssum': Decimal('0.00'), 'taxsum': Decimal('0.00')})
                if tr == order.payment_fee_tax_rate and order.payment_fee_tax_value:
                    taxrate_values['grosssum'] += order.payment_fee
                    taxrate_values['taxsum'] += order.payment_fee_tax_value

                row += [
                    str(taxrate_values['grosssum']),
                    str(taxrate_values['grosssum'] - taxrate_values['taxsum']),
                    str(taxrate_values['taxsum']),
                ]

            writer.writerow(row)

        return 'orders.csv', 'text/csv', output.getvalue().encode("utf-8")

Example 12

Project: sequencer
Source File: model.py
View license
    def _get_deps(self, component, rule):
        """
        Find dependencies of a given component. This implies calling
        the rule.depsfinder script. Substitution of variables is done.
        Returns None if the given rule has already been applied on
        the given component.
        """
        result = dict()
        depsfinder = rule.depsfinder
        if rule.dependson is None or len(rule.dependson) == 0 or \
                depsfinder is None or len(depsfinder) == 0:
            _LOGGER.debug("No 'DepsFinder' or 'DependsOn' specified" + \
                              " in rule %s for component %s. Skipping.",
                          rule, component)
            return result
        var_map = _get_var_map(component.id,
                               component.name,
                               component.type,
                               component.category,
                               self.ruleset.name,
                               rule.name,
                               rule.help)
        cmd = substitute(var_map, depsfinder)
        _LOGGER.debug("Calling depsfinder for component %s: %s", component, cmd)
        popen_args = shlex.split(to_str_from_unicode(cmd, should_be_uni=True))
        try:
            popen = subprocess.Popen(popen_args,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     bufsize=-1) # Use system default
        except OSError as ose:
            _LOGGER.error("Can't call depsfinder '%s': %s", cmd, ose)
            return result

        (msg_std, msg_err) = popen.communicate()
        msg_std = msg_std.strip()
        msg_err = msg_err.strip()
        if len(msg_err) != 0:
            _LOGGER.warning("Depsfinder error when " + \
                                "applying rule %s to component %s: %s",
                            rule, component, msg_err)
        deps = set()
        with StringIO(to_unicode(msg_std)) as reader:
            for dep in reader:
                dep_id = dep.strip()
                if len(dep_id) == 0:
                    continue
                dependency = self.components_map.get(dep_id)
                if dependency is None:
                    _LOGGER.debug("Creating dep for component %s with id: %r",
                                  component, dep_id)
                    dependency = Component(dep_id)
                    self.components_map[dep_id] = dependency

                deps.add(dependency)
                _update_graph_with_node(self.dag, dep_id)

        if _LOGGER.isEnabledFor(INFO):
            _LOGGER.info("%s.depsfinder(%s): %s",
                         rule.name, component.id,
                         NodeSet.fromlist([str(x.id) for x in deps]))
        # Find match only on rule.dependson
        return _find_match([self.ruleset.rules_for[x] for x in rule.dependson],
                           deps)

Example 13

Project: pvlib-python
Source File: tmy.py
View license
def readtmy3(filename=None, coerce_year=None, recolumn=True):
    '''
    Read a TMY3 file in to a pandas dataframe.

    Note that values contained in the metadata dictionary are unchanged
    from the TMY3 file (i.e. units are retained). In the case of any
    discrepencies between this documentation and the TMY3 User's Manual
    [1], the TMY3 User's Manual takes precedence.

    The TMY3 files were updated in Jan. 2015. This function requires the
    use of the updated files.

    Parameters
    ----------
    filename : None or string
        If None, attempts to use a Tkinter file browser. A string can be
        a relative file path, absolute file path, or url.

    coerce_year : None or int
        If supplied, the year of the data will be set to this value.

    recolumn : bool
        If True, apply standard names to TMY3 columns. Typically this
        results in stripping the units from the column name.

    Returns
    -------
    Tuple of the form (data, metadata).

    data : DataFrame
        A pandas dataframe with the columns described in the table
        below. For more detailed descriptions of each component, please
        consult the TMY3 User's Manual ([1]), especially tables 1-1
        through 1-6.

    metadata : dict
        The site metadata available in the file.

    Notes
    -----

    The returned structures have the following fields.

    ===============   ======  ===================
    key               format  description
    ===============   ======  ===================
    altitude          Float   site elevation
    latitude          Float   site latitudeitude
    longitude         Float   site longitudeitude
    Name              String  site name
    State             String  state
    TZ                Float   UTC offset
    USAF              Int     USAF identifier
    ===============   ======  ===================

    =============================       ======================================================================================================================================================
    TMYData field                       description
    =============================       ======================================================================================================================================================
    TMYData.Index                       A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not indcluded)
    TMYData.ETR                         Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
    TMYData.ETRN                        Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
    TMYData.GHI                         Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
    TMYData.GHISource                   See [1], Table 1-4
    TMYData.GHIUncertainty              Uncertainty based on random and bias error estimates                        see [2]
    TMYData.DNI                         Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
    TMYData.DNISource                   See [1], Table 1-4
    TMYData.DNIUncertainty              Uncertainty based on random and bias error estimates                        see [2]
    TMYData.DHI                         Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
    TMYData.DHISource                   See [1], Table 1-4
    TMYData.DHIUncertainty              Uncertainty based on random and bias error estimates                        see [2]
    TMYData.GHillum                     Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
    TMYData.GHillumSource               See [1], Table 1-4
    TMYData.GHillumUncertainty          Uncertainty based on random and bias error estimates                        see [2]
    TMYData.DNillum                     Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
    TMYData.DNillumSource               See [1], Table 1-4
    TMYData.DNillumUncertainty          Uncertainty based on random and bias error estimates                        see [2]
    TMYData.DHillum                     Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
    TMYData.DHillumSource               See [1], Table 1-4
    TMYData.DHillumUncertainty          Uncertainty based on random and bias error estimates                        see [2]
    TMYData.Zenithlum                   Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
    TMYData.ZenithlumSource             See [1], Table 1-4
    TMYData.ZenithlumUncertainty        Uncertainty based on random and bias error estimates                        see [1] section 2.10
    TMYData.TotCld                      Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
    TMYData.TotCldSource                See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.TotCldUnertainty            See [1], Table 1-6
    TMYData.OpqCld                      Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
    TMYData.OpqCldSource                See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.OpqCldUncertainty           See [1], Table 1-6
    TMYData.DryBulb                     Dry bulb temperature at the time indicated, deg C
    TMYData.DryBulbSource               See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.DryBulbUncertainty          See [1], Table 1-6
    TMYData.DewPoint                    Dew-point temperature at the time indicated, deg C
    TMYData.DewPointSource              See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.DewPointUncertainty         See [1], Table 1-6
    TMYData.RHum                        Relatitudeive humidity at the time indicated, percent
    TMYData.RHumSource                  See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.RHumUncertainty             See [1], Table 1-6
    TMYData.Pressure                    Station pressure at the time indicated, 1 mbar
    TMYData.PressureSource              See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.PressureUncertainty         See [1], Table 1-6
    TMYData.Wdir                        Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
    TMYData.WdirSource                  See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.WdirUncertainty             See [1], Table 1-6
    TMYData.Wspd                        Wind speed at the time indicated, meter/second
    TMYData.WspdSource                  See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.WspdUncertainty             See [1], Table 1-6
    TMYData.Hvis                        Distance to discernable remote objects at time indicated (7777=unlimited), meter
    TMYData.HvisSource                  See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.HvisUncertainty             See [1], Table 1-6
    TMYData.CeilHgt                     Height of cloud base above local terrain (7777=unlimited), meter
    TMYData.CeilHgtSource               See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.CeilHgtUncertainty          See [1], Table 1-6
    TMYData.Pwat                        Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
    TMYData.PwatSource                  See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.PwatUncertainty             See [1], Table 1-6
    TMYData.AOD                         The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
    TMYData.AODSource                   See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.AODUncertainty              See [1], Table 1-6
    TMYData.Alb                         The ratio of reflected solar irradiance to global horizontal irradiance, unitless
    TMYData.AlbSource                   See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.AlbUncertainty              See [1], Table 1-6
    TMYData.Lprecipdepth                The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
    TMYData.Lprecipquantity             The period of accumulatitudeion for the liquid precipitation depth field, hour
    TMYData.LprecipSource               See [1], Table 1-5, 8760x1 cell array of strings
    TMYData.LprecipUncertainty          See [1], Table 1-6
    TMYData.PresWth                     Present weather code, see [2].
    TMYData.PresWthSource               Present weather code source, see [2].
    TMYData.PresWthUncertainty          Present weather code uncertainty, see [2].
    =============================       ======================================================================================================================================================

    References
    ----------

    [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
    NREL/TP-581-43156, Revised May 2008.

    [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
    Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
    '''

    if filename is None:
        try:
            filename = _interactive_load()
        except:
            raise Exception('Interactive load failed. Tkinter not supported ' +
                            'on this system. Try installing X-Quartz and ' +
                            'reloading')

    head = ['USAF', 'Name', 'State', 'TZ', 'latitude', 'longitude', 'altitude']

    try:
        csvdata = open(filename, 'r')
    except IOError:
        response = urlopen(filename)
        csvdata = io.StringIO(response.read().decode(errors='ignore'))

    # read in file metadata
    meta = dict(zip(head, csvdata.readline().rstrip('\n').split(",")))

    # convert metadata strings to numeric types
    meta['altitude'] = float(meta['altitude'])
    meta['latitude'] = float(meta['latitude'])
    meta['longitude'] = float(meta['longitude'])
    meta['TZ'] = float(meta['TZ'])
    meta['USAF'] = int(meta['USAF'])

    data = pd.read_csv(
        filename, header=1,
        parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
        date_parser=lambda *x: _parsedate(*x, year=coerce_year),
        index_col='datetime')

    if recolumn:
        _recolumn(data)  # rename to standard column names

    data = data.tz_localize(int(meta['TZ']*3600))

    return data, meta

Example 14

Project: doit
Source File: action.py
View license
    def execute(self, out=None, err=None):
        """
        Execute command action

        both stdout and stderr from the command are captured and saved
        on self.out/err. Real time output is controlled by parameters
        @param out: None - no real time output
                    a file like object (has write method)
        @param err: idem
        @return failure:
            - None: if successful
            - TaskError: If subprocess return code is greater than 125
            - TaskFailed: If subprocess return code isn't zero (and
        not greater than 125)
        """
        try:
            action = self.expand_action()
        except Exception as exc:
            return TaskError(
                "CmdAction Error creating command string", exc)

        # set environ to change output buffering
        env = None
        if self.buffering:
            env = os.environ.copy()
            env['PYTHONUNBUFFERED'] = '1'

        # spawn task process
        process = subprocess.Popen(
            action,
            shell=self.shell,
            #bufsize=2, # ??? no effect use PYTHONUNBUFFERED instead
            stdout=subprocess.PIPE, stderr=subprocess.PIPE,
            env=env,
            **self.pkwargs)

        output = StringIO()
        errput = StringIO()
        t_out = Thread(target=self._print_process_output,
                       args=(process, process.stdout, output, out))
        t_err = Thread(target=self._print_process_output,
                       args=(process, process.stderr, errput, err))
        t_out.start()
        t_err.start()
        t_out.join()
        t_err.join()

        self.out = output.getvalue()
        self.err = errput.getvalue()
        self.result = self.out + self.err

        # make sure process really terminated
        process.wait()

        # task error - based on:
        # http://www.gnu.org/software/bash/manual/bashref.html#Exit-Status
        # it doesnt make so much difference to return as Error or Failed anyway
        if process.returncode > 125:
            return TaskError("Command error: '%s' returned %s" %
                             (action, process.returncode))

        # task failure
        if process.returncode != 0:
            return TaskFailed("Command failed: '%s' returned %s" %
                              (action, process.returncode))

        # save stdout in values
        if self.save_out:
            self.values[self.save_out] = self.out

Example 15

Project: pysb
Source File: mathematica.py
View license
    def export(self):
        """Generate the corresponding Mathematica ODEs for the PySB model
        associated with the exporter.

        Returns
        -------
        string
            String containing the Mathematica code for the model's ODEs.
        """

        output = StringIO()
        pysb.bng.generate_equations(self.model)

        # Add docstring if there is one
        if self.docstring:
            output.write('(*\n' + self.docstring + '\n')
        else:
            output.write("(*\n")

        # Header comment
        output.write("Mathematica model definition file for ")
        output.write("model " + self.model.name + ".\n")
        output.write("Generated by " \
                     "pysb.export.mathematica.MathematicaExporter.\n")
        output.write("\n")
        output.write("Run with (for example):\n")
        output.write("tmax = 10\n")
        output.write("soln = NDSolve[Join[odes, initconds], slist, " \
                     "{t, 0, tmax}]\n")
        output.write("Plot[s0[t] /. soln, {t, 0, tmax}, PlotRange -> All]\n")
        output.write("*)\n\n")

        # PARAMETERS
        # Note that in Mathematica, underscores are not allowed in variable
        # names, so we simply strip them out here
        params_str = ''
        for i, p in enumerate(self.model.parameters):
            # Remove underscores
            pname = p.name.replace('_', '')

            # Convert parameter values to scientific notation
            # If the parameter is 0, don't take the log!
            if p.value == 0:
                params_str += '%s = %g;\n' %  (pname, p.value)
            # Otherwise, take the log (base 10) and format accordingly
            else:
                val_str = '%.17g' % p.value
                if 'e' in val_str:
                    (mantissa, exponent) = val_str.split('e')
                    params_str += '%s = %s * 10^%s;\n' % \
                            (pname, mantissa, exponent)
                else:
                    params_str += '%s = %s;\n' %  (pname, val_str)

        ## ODEs ###
        odes_str = 'odes = {\n'
        # Concatenate the equations
        odes_str += ',\n'.join(['s%d == %s' %
                                (i, sympy.ccode(self.model.odes[i]))
                                for i in range(len(self.model.odes))])
        # Replace, e.g., s0 with s[0]
        odes_str = re.sub(r's(\d+)', lambda m: 's%s[t]' % (int(m.group(1))),
                          odes_str)
        # Add the derivative symbol ' to the left hand sides
        odes_str = re.sub(r's(\d+)\[t\] ==', r"s\1'[t] ==", odes_str)
        # Correct the exponentiation syntax
        odes_str = re.sub(r'pow\(([^,]+), ([^)]+)\)', r'\1^\2', odes_str)
        odes_str += '\n}'
        #c_code = odes_str
        # Eliminate underscores from parameter names in equations
        for i, p in enumerate(self.model.parameters):
            odes_str = re.sub(r'\b(%s)\b' % p.name, p.name.replace('_', ''),
                              odes_str)

        ## INITIAL CONDITIONS
        ic_values = ['0'] * len(self.model.odes)
        for i, ic in enumerate(self.model.initial_conditions):
            ic_values[self.model.get_species_index(ic[0])] = \
                                        ic[1].name.replace('_', '')

        init_conds_str = 'initconds = {\n'
        init_conds_str += ',\n'.join(['s%s[0] == %s' % (i, val)
                                     for i, val in enumerate(ic_values)])
        init_conds_str += '\n}'

        ## SOLVE LIST
        solvelist_str = 'solvelist = {\n'
        solvelist_str += ',\n'.join(['s%s[t]' % (i)
                                    for i in range(0, len(self.model.odes))])
        solvelist_str += '\n}'

        ## OBSERVABLES
        observables_str = ''
        for obs in self.model.observables:
            # Remove underscores
            observables_str += obs.name.replace('_', '') + ' = '
            #groups = self.model.observable_groups[obs_name]
            observables_str += ' + '.join(['(s%s[t] * %d)' % (s, c)
                               for s, c in zip(obs.species, obs.coefficients)])
            observables_str += ' /. soln\n' 

        # Add comments identifying the species
        species_str = '\n'.join(['(* s%d[t] = %s *)' % (i, s) for i, s in
                            enumerate(self.model.species)])

        output.write('(* Parameters *)\n')
        output.write(params_str + "\n")
        output.write('(* List of Species *)\n')
        output.write(species_str + "\n\n")
        output.write('(* ODEs *)\n')
        output.write(odes_str + "\n\n")
        output.write('(* Initial Conditions *)\n')
        output.write(init_conds_str + "\n\n")
        output.write('(* List of Variables (e.g., as an argument to NDSolve) ' \
                     '*)\n')
        output.write(solvelist_str + '\n\n')
        output.write('(* Run the simulation -- example *)\n')
        output.write('tmax = 100\n')
        output.write('soln = NDSolve[Join[odes, initconds], ' \
                     'solvelist, {t, 0, tmax}]\n\n')
        output.write('(* Observables *)\n')
        output.write(observables_str + '\n')

        return output.getvalue()

Example 16

Project: pysb
Source File: matlab.py
View license
    def export(self):
        """Generate a MATLAB class definition containing the ODEs for the PySB
        model associated with the exporter.

        Returns
        -------
        string
            String containing the MATLAB code for an implementation of the
            model's ODEs.
        """
        output = StringIO()
        pysb.bng.generate_equations(self.model)

        docstring = ''
        if self.docstring:
            docstring += self.docstring.replace('\n', '\n    % ')

        # Substitute underscores for any dots in the model name
        model_name = self.model.name.replace('.', '_')

        # -- Parameters and Initial conditions -------
        # Declare the list of parameters as a struct
        params_str = 'self.parameters = struct( ...\n'+' '*16
        params_str_list = []
        for i, p in enumerate(self.model.parameters):
            # Add parameter to struct along with nominal value
            cur_p_str = "'%s', %.17g" % (_fix_underscores(p.name), p.value)
            # Decide whether to continue or terminate the struct declaration:
            if i == len(self.model.parameters) - 1:
                cur_p_str += ');'    # terminate
            else:
                cur_p_str += ', ...' # continue

            params_str_list.append(cur_p_str)

        # Format and indent the params struct declaration
        params_str += ('\n'+' '*16).join(params_str_list)

        # Fill in an array of the initial conditions based on the named
        # parameter values
        initial_values_str = ('initial_values = zeros(1,%d);\n'+' '*12) % \
                             len(self.model.species)
        initial_values_str += ('\n'+' '*12).join(
                ['initial_values(%d) = self.parameters.%s; %% %s' %
                 (i+1, _fix_underscores(ic[1].name), ic[0])
                 for i, ic in enumerate(self.model.initial_conditions)])

        # -- Build observables declaration --
        observables_str = 'self.observables = struct( ...\n'+' '*16
        observables_str_list = []
        for i, obs in enumerate(self.model.observables):
            # Associate species and coefficient lists with observable names,
            # changing from zero- to one-based indexing
            cur_obs_str = "'%s', [%s; %s]" % \
                          (_fix_underscores(obs.name),
                           ' '.join([str(sp+1) for sp in obs.species]),
                           ' '.join([str(c) for c in obs.coefficients]))
            # Decide whether to continue or terminate the struct declaration:
            if i == len(self.model.observables) - 1:
                cur_obs_str += ');'    # terminate
            else:
                cur_obs_str += ', ...' # continue

            observables_str_list.append(cur_obs_str)
        # Format and indent the observables struct declaration
        observables_str += ('\n'+' '*16).join(observables_str_list)

        # -- Build ODEs -------
        # Build a stringified list of species
        species_list = ['%% %s;' % s for i, s in enumerate(self.model.species)]
        # Build the ODEs as strings from the model.odes array
        odes_list = ['y(%d,1) = %s;' % (i+1, sympy.ccode(self.model.odes[i])) 
                     for i in range(len(self.model.odes))] 
        # Zip the ODEs and species string lists and then flatten them
        # (results in the interleaving of the two lists)
        odes_species_list = [item for sublist in zip(species_list, odes_list)
                                  for item in sublist]
        # Flatten to a string and add correct indentation
        odes_str = ('\n'+' '*12).join(odes_species_list)

        # Change species names from, e.g., '__s(0)' to 'y0(1)' (note change
        # from zero-based indexing to 1-based indexing)
        odes_str = re.sub(r'__s(\d+)', \
                          lambda m: 'y0(%s)' % (int(m.group(1))+1), odes_str)
        # Change C code 'pow' function to MATLAB 'power' function
        odes_str = re.sub(r'pow\(', 'power(', odes_str)
        # Prepend 'p.' to named parameters and fix any underscores
        for i, p in enumerate(self.model.parameters):
            odes_str = re.sub(r'\b(%s)\b' % p.name,
                              'p.%s' % _fix_underscores(p.name), odes_str)

        # -- Build final output --
        output.write(pad(r"""
            classdef %(model_name)s
                %% %(docstring)s
                %% A class implementing the ordinary differential equations
                %% for the %(model_name)s model.
                %%
                %% Save as %(model_name)s.m.
                %%
                %% Generated by pysb.export.matlab.MatlabExporter.
                %%
                %% Properties
                %% ----------
                %% observables : struct
                %%     A struct containing the names of the observables from the
                %%     PySB model as field names. Each field in the struct
                %%     maps the observable name to a matrix with two rows:
                %%     the first row specifies the indices of the species
                %%     associated with the observable, and the second row
                %%     specifies the coefficients associated with the species.
                %%     For any given timecourse of model species resulting from
                %%     integration, the timecourse for an observable can be
                %%     retrieved using the get_observable method, described
                %%     below.
                %%
                %% parameters : struct
                %%     A struct containing the names of the parameters from the
                %%     PySB model as field names. The nominal values are set by
                %%     the constructor and their values can be overriden
                %%     explicitly once an instance has been created.
                %%
                %% Methods
                %% -------
                %% %(model_name)s.odes(tspan, y0)
                %%     The right-hand side function for the ODEs of the model,
                %%     for use with MATLAB ODE solvers (see Examples).
                %%
                %% %(model_name)s.get_initial_values()
                %%     Returns a vector of initial values for all species,
                %%     specified in the order that they occur in the original
                %%     PySB model (i.e., in the order found in model.species).
                %%     Non-zero initial conditions are specified using the
                %%     named parameters included as properties of the instance.
                %%     Hence initial conditions other than the defaults can be
                %%     used by assigning a value to the named parameter and then
                %%     calling this method. The vector returned by the method
                %%     is used for integration by passing it to the MATLAB
                %%     solver as the y0 argument.
                %%
                %% %(model_name)s.get_observables(y)
                %%     Given a matrix of timecourses for all model species
                %%     (i.e., resulting from an integration of the model),
                %%     get the trajectories corresponding to the observables.
                %%     Timecourses are returned as a struct which can be
                %%     indexed by observable name.
                %%
                %% Examples
                %% --------
                %% Example integration using default initial and parameter
                %% values:
                %%
                %% >> m = %(model_name)s();
                %% >> tspan = [0 100];
                %% >> [t y] = ode15s(@m.odes, tspan, m.get_initial_values());
                %%
                %% Retrieving the observables:
                %%
                %% >> y_obs = m.get_observables(y)
                %%
                properties
                    observables
                    parameters
                end

                methods
                    function self = %(model_name)s()
                        %% Assign default parameter values
                        %(params_str)s

                        %% Define species indices (first row) and coefficients
                        %% (second row) of named observables
                        %(observables_str)s
                    end

                    function initial_values = get_initial_values(self)
                        %% Return the vector of initial conditions for all
                        %% species based on the values of the parameters
                        %% as currently defined in the instance.

                        %(initial_values_str)s
                    end

                    function y = odes(self, tspan, y0)
                        %% Right hand side function for the ODEs

                        %% Shorthand for the struct of model parameters
                        p = self.parameters;

                        %(odes_str)s
                    end

                    function y_obs = get_observables(self, y)
                        %% Retrieve the trajectories for the model observables
                        %% from a matrix of the trajectories of all model
                        %% species.

                        %% Initialize the struct of observable timecourses
                        %% that we will return
                        y_obs = struct();

                        %% Iterate over the observables;
                        observable_names = fieldnames(self.observables);
                        for i = 1:numel(observable_names)
                            obs_matrix = self.observables.(observable_names{i});
                            species = obs_matrix(1, :);
                            coefficients = obs_matrix(2, :);
                            y_obs.(observable_names{i}) = ...
                                            y(:, species) * coefficients';
                        end
                    end
                end
            end
            """, 0) %
            {'docstring': docstring,
             'model_name': model_name,
             'params_str':params_str,
             'initial_values_str': initial_values_str,
             'observables_str': observables_str,
             'params_str': params_str,
             'odes_str': odes_str})

        return output.getvalue()

Example 17

Project: pysb
Source File: python.py
View license
    def export(self):
        """Export Python code for simulation of a model without PySB.

        Returns
        -------
        string
            String containing the standalone Python code.
        """

        output = StringIO()
        pysb.bng.generate_equations(self.model)

        # Note: This has a lot of duplication from pysb.integrate.
        # Can that be helped?

        code_eqs = '\n'.join(['ydot[%d] = %s;' %
                                 (i, sympy.ccode(self.model.odes[i]))
                              for i in range(len(self.model.odes))])
        code_eqs = re.sub(r's(\d+)',
                          lambda m: 'y[%s]' % (int(m.group(1))), code_eqs)
        for i, p in enumerate(self.model.parameters):
            code_eqs = re.sub(r'\b(%s)\b' % p.name, 'p[%d]' % i, code_eqs)

        if self.docstring:
            output.write('"""')
            output.write(self.docstring)
            output.write('"""\n\n')
        output.write("# exported from PySB model '%s'\n" % self.model.name)
        output.write(pad(r"""
            import numpy
            import scipy.weave, scipy.integrate
            import collections
            import itertools
            import distutils.errors
            """))
        output.write(pad(r"""
            _use_inline = False
            # try to inline a C statement to see if inline is functional
            try:
                scipy.weave.inline('int i;', force=1)
                _use_inline = True
            except distutils.errors.CompileError:
                pass

            Parameter = collections.namedtuple('Parameter', 'name value')
            Observable = collections.namedtuple('Observable', 'name species coefficients')
            Initial = collections.namedtuple('Initial', 'param_index species_index')
            """))
        output.write("\n")

        output.write("class Model(object):\n")
        init_data = {
            'num_species': len(self.model.species),
            'num_params': len(self.model.parameters),
            'num_observables': len(self.model.observables),
            'num_ics': len(self.model.initial_conditions),
            }
        output.write(pad(r"""
            def __init__(self):
                self.y = None
                self.yobs = None
                self.integrator = scipy.integrate.ode(self.ode_rhs)
                self.integrator.set_integrator('vode', method='bdf',
                                               with_jacobian=True)
                self.y0 = numpy.empty(%(num_species)d)
                self.ydot = numpy.empty(%(num_species)d)
                self.sim_param_values = numpy.empty(%(num_params)d)
                self.parameters = [None] * %(num_params)d
                self.observables = [None] * %(num_observables)d
                self.initial_conditions = [None] * %(num_ics)d
            """, 4) % init_data)
        for i, p in enumerate(self.model.parameters):
            p_data = (i, repr(p.name), p.value)
            output.write(" " * 8)
            output.write("self.parameters[%d] = Parameter(%s, %.17g)\n" % p_data)
        output.write("\n")
        for i, obs in enumerate(self.model.observables):
            obs_data = (i, repr(obs.name), repr(obs.species),
                        repr(obs.coefficients))
            output.write(" " * 8)
            output.write("self.observables[%d] = Observable(%s, %s, %s)\n" %
                         obs_data)
        output.write("\n")
        for i, (cp, param) in enumerate(self.model.initial_conditions):
            ic_data = (i, self.model.parameters.index(param),
                       self.model.get_species_index(cp))
            output.write(" " * 8)
            output.write("self.initial_conditions[%d] = Initial(%d, %d)\n" %
                         ic_data)
        output.write("\n")

        output.write("    if _use_inline:\n")
        output.write(pad(r"""
            def ode_rhs(self, t, y, p):
                ydot = self.ydot
                scipy.weave.inline(r'''%s''', ['ydot', 't', 'y', 'p'])
                return ydot
            """, 8) % (pad('\n' + code_eqs, 16) + ' ' * 16))
        output.write("    else:\n")
        output.write(pad(r"""
            def ode_rhs(self, t, y, p):
                ydot = self.ydot
                %s
                return ydot
            """, 8) % pad('\n' + code_eqs, 12).replace(';','').strip())

        # note the simulate method is fixed, i.e. it doesn't require any templating
        output.write(pad(r"""
            def simulate(self, tspan, param_values=None, view=False):
                if param_values is not None:
                    # accept vector of parameter values as an argument
                    if len(param_values) != len(self.parameters):
                        raise Exception("param_values must have length %d" %
                                        len(self.parameters))
                    self.sim_param_values[:] = param_values
                else:
                    # create parameter vector from the values in the model
                    self.sim_param_values[:] = [p.value for p in self.parameters]
                self.y0.fill(0)
                for ic in self.initial_conditions:
                    self.y0[ic.species_index] = self.sim_param_values[ic.param_index]
                if self.y is None or len(tspan) != len(self.y):
                    self.y = numpy.empty((len(tspan), len(self.y0)))
                    if len(self.observables):
                        self.yobs = numpy.ndarray(len(tspan),
                                        zip((obs.name for obs in self.observables),
                                            itertools.repeat(float)))
                    else:
                        self.yobs = numpy.ndarray((len(tspan), 0))
                    self.yobs_view = self.yobs.view(float).reshape(len(self.yobs),
                                                                   -1)
                # perform the actual integration
                self.integrator.set_initial_value(self.y0, tspan[0])
                self.integrator.set_f_params(self.sim_param_values)
                self.y[0] = self.y0
                t = 1
                while self.integrator.successful() and self.integrator.t < tspan[-1]:
                    self.y[t] = self.integrator.integrate(tspan[t])
                    t += 1
                for i, obs in enumerate(self.observables):
                    self.yobs_view[:, i] = \
                        (self.y[:, obs.species] * obs.coefficients).sum(1)
                if view:
                    y_out = self.y.view()
                    yobs_out = self.yobs.view()
                    for a in y_out, yobs_out:
                        a.flags.writeable = False
                else:
                    y_out = self.y.copy()
                    yobs_out = self.yobs.copy()
                return (y_out, yobs_out)
            """, 4))

        return output.getvalue()

Example 18

Project: python-acoustics
Source File: aio.py
View license
def read_csv_cirrus(filename):
    """Read a Cirrus CSV file. Currently exists support for some types of
    CSV files extracted with NoiseTools. There is no support for CSVs related
    with occupational noise.

    If there are NC and NR values in the csv file, they will be stored in the
    returned object with attributes ``nc`` and ``nr``. If the CSV file contains
    time history, you can access to date and time with the ``time`` attribute.
    Also, it is possible to know the integration time with the
    ``integration_time`` attribute.

    :param filename: CSV file name.
    :returns: Pandas dataframe with all data extracted from the CSV file.
    :rtype: Pandas dataframe.

    """
    with open(filename, "r") as csvfile:
        csvreader = csvfile.read()
        csvreader = re.sub(r" dB", "", csvreader)  # Clean " dB" from data
        dialect = csv.Sniffer().sniff(csvreader, delimiters=",;")
        separator = dialect.delimiter
        # Guess decimal separator
        decimal_sep = re.search(r"\"\d{2,3}"
                                r"(\.|,)"  # Decimal separator
                                r"\d{1,2}\"",
                                csvreader).group(1)
    n_cols = re.search("(.+)\n", csvreader).group(1).count(separator) + 1
    if n_cols < 5:
        unsorted_data = []
        pdindex = ["Z"]
        for i, c in enumerate(csvreader.splitlines()):
            if c[:4] == '"NR"':
                nr = int(re.search(r"\d{2}", c).group(0))
                continue
            elif c[:4] == '"NC"':
                nc = int(re.search(r"\d{2}", c).group(0))
                continue
            if i != 0:
                unsorted_data.append(c.split(separator))
            else:
                if n_cols == 3:
                    pdindex.append(c[-2:-1])
                elif n_cols == 4:
                    pdindex.append("A")
                    pdindex.append("C")

        # Create a sorted temporary csv-like file
        csv_data = list(zip(*unsorted_data))
        temp_csv = ""
        for row in csv_data:
            temp_csv += separator.join(row) + "\n"
        # Then, read it with pandas
        data = pd.read_csv(io.StringIO(temp_csv), sep=separator,
                           decimal=decimal_sep)

        # Assign NC and NR data if they are present
        try:
            data.nc = nc
            data.nr = nr
        except:
            pass

        # If the csv file contains global data from the "Details" tab in
        # NoiseTools, skip row names
        if n_cols != 2:
            data.index = pdindex

    else:
        data = pd.read_csv(filename, parse_dates=[[0, 1]], sep=separator,
                           decimal=decimal_sep)

        # Fix time name column
        en_columns = data.columns.values
        en_columns[0] = "time"
        data.columns = en_columns

        # Guess integration time with statistical mode because the csv could
        # have been cleaned from unwanted noise
        data["time"] = pd.to_datetime(data.time)
        delta = data.time.diff().fillna(0)
        # Mode and change from ns to s
        int_time = int(delta.mode().astype(int) * 1e-9)
        if round(int_time, 2) == 0.06:  # Fix for 1/16 s
            int_time = 0.0625
        data.integration_time = int_time

    return data

Example 19

Project: glymur
Source File: test_printing.py
View license
    @unittest.skipIf(os.name == "nt", "Temporary file issue on window.")
    def test_less_common_boxes(self):
        """verify uinf, ulst, url, res, resd, resc box printing"""
        with tempfile.NamedTemporaryFile(suffix='.jp2') as tfile:
            with open(self.jp2file, 'rb') as ifile:
                # Everything up until the jp2c box.
                wbuffer = ifile.read(77)
                tfile.write(wbuffer)

                # Write the UINF superbox
                # Length = 50, id is uinf.
                wbuffer = struct.pack('>I4s', int(50), b'uinf')
                tfile.write(wbuffer)

                # Write the ULST box.
                # Length is 26, 1 UUID, hard code that UUID as zeros.
                wbuffer = struct.pack('>I4sHIIII', int(26), b'ulst', int(1),
                                      int(0), int(0), int(0), int(0))
                tfile.write(wbuffer)

                # Write the URL box.
                # Length is 16, version is one byte, flag is 3 bytes, url
                # is the rest.
                wbuffer = struct.pack('>I4sBBBB',
                                      int(16), b'url ',
                                      int(0), int(0), int(0), int(0))
                tfile.write(wbuffer)

                wbuffer = struct.pack('>ssss', b'a', b'b', b'c', b'd')
                tfile.write(wbuffer)

                # Start the resolution superbox.
                wbuffer = struct.pack('>I4s', int(44), b'res ')
                tfile.write(wbuffer)

                # Write the capture resolution box.
                wbuffer = struct.pack('>I4sHHHHBB',
                                      int(18), b'resc',
                                      int(1), int(1), int(1), int(1),
                                      int(0), int(1))
                tfile.write(wbuffer)

                # Write the display resolution box.
                wbuffer = struct.pack('>I4sHHHHBB',
                                      int(18), b'resd',
                                      int(1), int(1), int(1), int(1),
                                      int(1), int(0))
                tfile.write(wbuffer)

                # Get the rest of the input file.
                wbuffer = ifile.read()
                tfile.write(wbuffer)
                tfile.flush()

            jp2k = glymur.Jp2k(tfile.name)
            with patch('sys.stdout', new=StringIO()) as fake_out:
                print(jp2k.box[3])
                print(jp2k.box[4])
                actual = fake_out.getvalue().strip()
            exp = ('UUIDInfo Box (uinf) @ (77, 50)\n'
                   '    UUID List Box (ulst) @ (85, 26)\n'
                   '        UUID[0]:  00000000-0000-0000-0000-000000000000\n'
                   '    Data Entry URL Box (url ) @ (111, 16)\n'
                   '        Version:  0\n'
                   '        Flag:  0 0 0\n'
                   '        URL:  "abcd"\n'
                   'Resolution Box (res ) @ (127, 44)\n'
                   '    Capture Resolution Box (resc) @ (135, 18)\n'
                   '        VCR:  1.0\n'
                   '        HCR:  10.0\n'
                   '    Display Resolution Box (resd) @ (153, 18)\n'
                   '        VDR:  10.0\n'
                   '        HDR:  1.0')

            self.assertEqual(actual, exp)

Example 20

Project: PyCap
Source File: project.py
View license
    def export_records(self, records=None, fields=None, forms=None,
    events=None, raw_or_label='raw', event_name='label',
    format='json', export_survey_fields=False,
    export_data_access_groups=False, df_kwargs=None,
    export_checkbox_labels=False):
        """
        Export data from the REDCap project.

        Parameters
        ----------
        records : list
            array of record names specifying specific records to export.
            by default, all records are exported
        fields : list
            array of field names specifying specific fields to pull
            by default, all fields are exported
        forms : list
            array of form names to export. If in the web UI, the form
            name has a space in it, replace the space with an underscore
            by default, all forms are exported
        events : list
            an array of unique event names from which to export records

            :note: this only applies to longitudinal projects
        raw_or_label : (``'raw'``), ``'label'``, ``'both'``
            export the raw coded values or labels for the options of
            multiple choice fields, or both
        event_name : (``'label'``), ``'unique'``
             export the unique event name or the event label
        format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
            Format of returned data. ``'json'`` returns json-decoded
            objects while ``'csv'`` and ``'xml'`` return other formats.
            ``'df'`` will attempt to return a ``pandas.DataFrame``.
        export_survey_fields : (``False``), True
            specifies whether or not to export the survey identifier
            field (e.g., "redcap_survey_identifier") or survey timestamp
            fields (e.g., form_name+"_timestamp") when surveys are
            utilized in the project.
        export_data_access_groups : (``False``), ``True``
            specifies whether or not to export the
            ``"redcap_data_access_group"`` field when data access groups
            are utilized in the project.

            :note: This flag is only viable if the user whose token is
                being used to make the API request is *not* in a data
                access group. If the user is in a group, then this flag
                will revert to its default value.
        df_kwargs : dict
            Passed to ``pandas.read_csv`` to control construction of
            returned DataFrame.
            by default, ``{'index_col': self.def_field}``
        export_checkbox_labels : (``False``), ``True``
            specify whether to export checkbox values as their label on
            export.

        Returns
        -------
        data : list, str, ``pandas.DataFrame``
            exported data
        """
        ret_format = format
        if format == 'df':
            from pandas import read_csv
            ret_format = 'csv'
        pl = self.__basepl('record', format=ret_format)
        fields = self.backfill_fields(fields, forms)
        keys_to_add = (records, fields, forms, events,
        raw_or_label, event_name, export_survey_fields,
        export_data_access_groups, export_checkbox_labels)
        str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
        'eventName', 'exportSurveyFields', 'exportDataAccessGroups',
        'exportCheckboxLabel')
        for key, data in zip(str_keys, keys_to_add):
            if data:
                #  Make a url-ok string
                if key in ('fields', 'records', 'forms', 'events'):
                    pl[key] = ','.join(data)
                else:
                    pl[key] = data
        response, _ = self._call_api(pl, 'exp_record')
        if format in ('json', 'csv', 'xml'):
            return response
        elif format == 'df':
            if not df_kwargs:
                if self.is_longitudinal():
                    df_kwargs = {'index_col': [self.def_field,
                                               'redcap_event_name']}
                else:
                    df_kwargs = {'index_col': self.def_field}
            buf = StringIO(response)
            df = read_csv(buf, **df_kwargs)
            buf.close()
            return df

Example 21

Project: meza
Source File: convert.py
View license
def records2geojson(records, **kwargs):
    """Converts records into a GeoJSON file like object.

     Args:
        records (Iter[dict]): Rows of data whose keys are the field names.
            E.g., output from any `meza.io.read_geojson`.

        kwargs (dict): Keyword arguments.

    Kwargs:
        key (str): GeoJSON Feature ID (default: 'id').
        lon (int): longitude field name (default: 'lon').
        lat (int): latitude field name (default: 'lat').
        crs (str): coordinate reference system field name (default:
            'urn:ogc:def:crs:OGC:1.3:CRS84').
        indent (int): Number of spaces to indent (default: 2).
        sort_keys (bool): Sort rows by keys (default: True).
        ensure_ascii (bool): Sort response dict by keys (default: False).

    See also:
        `meza.convert.records2json`
        `meza.io.read_geojson`

    Returns:
        obj: io.StringIO instance

    Examples:
        >>> from json import loads

        >>> record = {
        ...     'id': 'gid', 'p1': 'prop', 'type': 'Point',
        ...     'lon': Decimal('12.2'), 'lat': Decimal('22.0')}
        ...
        >>> result = loads(next(records2geojson([record])))
        >>> result['type'] == 'FeatureCollection'
        True
        >>> result['bbox']
        [12.2, 22.0, 12.2, 22.0]
        >>> crs = 'urn:ogc:def:crs:OGC:1.3:CRS84'
        >>> result['crs'] == {'type': 'name', 'properties': {'name': crs}}
        True
        >>> features = result['features']
        >>> sorted(features[0].keys()) == [
        ...     'geometry', 'id', 'properties', 'type']
        True
        >>> features[0]['geometry'] == {
        ...     'type': 'Point', 'coordinates': [12.2, 22.0]}
        True
    """
    defaults = {
        'key': 'id', 'lon': 'lon', 'lat': 'lat', 'indent': 2, 'sort_keys': True,
        'crs': 'urn:ogc:def:crs:OGC:1.3:CRS84'}

    kw = ft.Objectify(kwargs, **defaults)
    crs = {'type': 'name', 'properties': {'name': kw.crs}}

    subresults = gen_subresults(records, kw)
    features = list(gen_features(subresults, kw))
    coords = [f['geometry']['coordinates'] for f in features]
    get_lon = lambda x: map(itemgetter(0), x)
    get_lat = lambda x: map(itemgetter(1), x)

    try:
        chained = (it.chain.from_iterable(map(get_lon, c)) for c in coords)
        lons = set(it.chain.from_iterable(chained))
    except TypeError:
        try:
            lons = set(it.chain.from_iterable(map(get_lon, coords)))
        except TypeError:
            # it's a point
            lons = set(get_lon(coords))
            lats = set(get_lat(coords))
        else:
            # it's a line
            lats = set(it.chain.from_iterable(map(get_lat, coords)))
    else:
        # it's a polygon
        chained = (it.chain.from_iterable(map(get_lat, c)) for c in coords)
        lats = set(it.chain.from_iterable(chained))

    if kw.sort_keys:
        crs = order_dict(crs, ['type', 'properties'])

    output = {
        'type': 'FeatureCollection',
        'bbox': [min(lons), min(lats), max(lons), max(lats)],
        'features': features,
        'crs': crs}

    if kw.sort_keys:
        output_order = ['type', 'bbox', 'features', 'crs']
        output = order_dict(output, output_order)

    dkwargs = ft.dfilter(kwargs, ['indent', 'sort_keys'], True)
    json = dumps(output, cls=ft.CustomEncoder, **dkwargs)
    return StringIO(str(json))

Example 22

Project: scons
Source File: TaskmasterTests.py
View license
    def test_trace(self):
        """Test Taskmaster tracing
        """
        import io

        trace = io.StringIO()
        n1 = Node("n1")
        n2 = Node("n2")
        n3 = Node("n3", [n1, n2])
        tm = SCons.Taskmaster.Taskmaster([n1, n1, n3], trace=trace)
        t = tm.next_task()
        t.prepare()
        t.execute()
        t.postprocess()
        n1.set_state(SCons.Node.executed)
        t = tm.next_task()
        t.prepare()
        t.execute()
        t.postprocess()
        n2.set_state(SCons.Node.executed)
        t = tm.next_task()
        t.prepare()
        t.execute()
        t.postprocess()
        t = tm.next_task()
        assert t is None

        value = trace.getvalue()
        expect = """\

Taskmaster: Looking for a node to evaluate
Taskmaster:     Considering node <no_state   0   'n1'> and its children:
Taskmaster: Evaluating <pending    0   'n1'>

Task.make_ready_current(): node <pending    0   'n1'>
Task.prepare():      node <executing  0   'n1'>
Task.execute():      node <executing  0   'n1'>
Task.postprocess():  node <executing  0   'n1'>

Taskmaster: Looking for a node to evaluate
Taskmaster:     Considering node <executed   0   'n1'> and its children:
Taskmaster:        already handled (executed)
Taskmaster:     Considering node <no_state   0   'n3'> and its children:
Taskmaster:        <executed   0   'n1'>
Taskmaster:        <no_state   0   'n2'>
Taskmaster:      adjusted ref count: <pending    1   'n3'>, child 'n2'
Taskmaster:     Considering node <no_state   0   'n2'> and its children:
Taskmaster: Evaluating <pending    0   'n2'>

Task.make_ready_current(): node <pending    0   'n2'>
Task.prepare():      node <executing  0   'n2'>
Task.execute():      node <executing  0   'n2'>
Task.postprocess():  node <executing  0   'n2'>
Task.postprocess():  removing <executing  0   'n2'>
Task.postprocess():  adjusted parent ref count <pending    0   'n3'>

Taskmaster: Looking for a node to evaluate
Taskmaster:     Considering node <pending    0   'n3'> and its children:
Taskmaster:        <executed   0   'n1'>
Taskmaster:        <executed   0   'n2'>
Taskmaster: Evaluating <pending    0   'n3'>

Task.make_ready_current(): node <pending    0   'n3'>
Task.prepare():      node <executing  0   'n3'>
Task.execute():      node <executing  0   'n3'>
Task.postprocess():  node <executing  0   'n3'>

Taskmaster: Looking for a node to evaluate
Taskmaster: No candidate anymore.

"""
        assert value == expect, value

Example 23

Project: line_profiler
Source File: line_profiler.py
View license
    @line_magic
    def lprun(self, parameter_s=''):
        """ Execute a statement under the line-by-line profiler from the
        line_profiler module.

        Usage:
          %lprun -f func1 -f func2 <statement>

        The given statement (which doesn't require quote marks) is run via the
        LineProfiler. Profiling is enabled for the functions specified by the -f
        options. The statistics will be shown side-by-side with the code through the
        pager once the statement has completed.

        Options:

        -f <function>: LineProfiler only profiles functions and methods it is told
        to profile.  This option tells the profiler about these functions. Multiple
        -f options may be used. The argument may be any expression that gives
        a Python function or method object. However, one must be careful to avoid
        spaces that may confuse the option parser. Additionally, functions defined
        in the interpreter at the In[] prompt or via %run currently cannot be
        displayed.  Write these functions out to a separate file and import them.

        -m <module>: Get all the functions/methods in a module

        One or more -f or -m options are required to get any useful results.

        -D <filename>: dump the raw statistics out to a pickle file on disk. The
        usual extension for this is ".lprof". These statistics may be viewed later
        by running line_profiler.py as a script.

        -T <filename>: dump the text-formatted statistics with the code side-by-side
        out to a text file.

        -r: return the LineProfiler object after it has completed profiling.

        -s: strip out all entries from the print-out that have zeros.
        """

        # Escape quote markers.
        opts_def = Struct(D=[''], T=[''], f=[], m=[])
        parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'")
        opts, arg_str = self.parse_options(parameter_s, 'rsf:m:D:T:', list_all=True)
        opts.merge(opts_def)

        global_ns = self.shell.user_global_ns
        local_ns = self.shell.user_ns

        # Get the requested functions.
        funcs = []
        for name in opts.f:
            try:
                funcs.append(eval(name, global_ns, local_ns))
            except Exception as e:
                raise UsageError('Could not find function %r.\n%s: %s' % (name,
                    e.__class__.__name__, e))

        profile = LineProfiler(*funcs)

        # Get the modules, too
        for modname in opts.m:
            try:
                mod = __import__(modname, fromlist=[''])
                profile.add_module(mod)
            except Exception as e:
                raise UsageError('Could not find module %r.\n%s: %s' % (modname,
                    e.__class__.__name__, e))

        # Add the profiler to the builtins for @profile.
        if PY3:
            import builtins
        else:
            import __builtin__ as builtins

        if 'profile' in builtins.__dict__:
            had_profile = True
            old_profile = builtins.__dict__['profile']
        else:
            had_profile = False
            old_profile = None
        builtins.__dict__['profile'] = profile

        try:
            try:
                profile.runctx(arg_str, global_ns, local_ns)
                message = ''
            except SystemExit:
                message = """*** SystemExit exception caught in code being profiled."""
            except KeyboardInterrupt:
                message = ("*** KeyboardInterrupt exception caught in code being "
                    "profiled.")
        finally:
            if had_profile:
                builtins.__dict__['profile'] = old_profile

        # Trap text output.
        stdout_trap = StringIO()
        profile.print_stats(stdout_trap, stripzeros='s' in opts)
        output = stdout_trap.getvalue()
        output = output.rstrip()

        page(output)
        print(message, end="")

        dump_file = opts.D[0]
        if dump_file:
            profile.dump_stats(dump_file)
            print('\n*** Profile stats pickled to file %r. %s' % (
                dump_file, message))

        text_file = opts.T[0]
        if text_file:
            pfile = open(text_file, 'w')
            pfile.write(output)
            pfile.close()
            print('\n*** Profile printout saved to text file %r. %s' % (
                text_file, message))

        return_value = None
        if 'r' in opts:
            return_value = profile

        return return_value

Example 24

View license
def main():
    initialized = False
    log('wfastcgi.py %s started' % __version__)
    log('Python version: %s' % sys.version)

    try:
        fcgi_stream = sys.stdin.detach() if sys.version_info[0] >= 3 else sys.stdin
        try:
            import msvcrt
            msvcrt.setmode(fcgi_stream.fileno(), os.O_BINARY)
        except ImportError:
            pass

        while True:
            record = read_fastcgi_record(fcgi_stream)
            if not record:
                continue

            errors = sys.stderr = sys.__stderr__ = record.params['wsgi.errors'] = StringIO()
            output = sys.stdout = sys.__stdout__ = StringIO()

            with handle_response(fcgi_stream, record, output.getvalue, errors.getvalue) as response:
                if not initialized:
                    log('wfastcgi.py %s initializing' % __version__)

                    os.chdir(response.physical_path)
                    sys.path[0] = '.'

                    # Initialization errors should be treated as fatal.
                    response.fatal_errors = True
                    response.error_message = 'Error occurred while reading WSGI handler'
                    env, handler = read_wsgi_handler(response.physical_path)

                    response.error_message = 'Error occurred starting file watcher'
                    start_file_watcher(response.physical_path, env.get('WSGI_RESTART_FILE_REGEX'))

                    response.error_message = ''
                    response.fatal_errors = False

                    log('wfastcgi.py %s initialized' % __version__)
                    initialized = True

                os.environ.update(env)

                # SCRIPT_NAME + PATH_INFO is supposed to be the full path
                # (http://www.python.org/dev/peps/pep-0333/) but by default
                # (http://msdn.microsoft.com/en-us/library/ms525840(v=vs.90).aspx)
                # IIS is sending us the full URL in PATH_INFO, so we need to
                # clear the script name here
                if 'AllowPathInfoForScriptMappings' not in os.environ:
                    record.params['SCRIPT_NAME'] = ''
                    record.params['wsgi.script_name'] = wsgi_encode('')

                # Send each part of the response to FCGI_STDOUT.
                # Exceptions raised in the handler will be logged by the context
                # manager and we will then wait for the next record.

                result = handler(record.params, response.start)
                try:
                    for part in result:
                        if part:
                            response.send(FCGI_STDOUT, part)
                finally:
                    if hasattr(result, 'close'):
                        result.close()
    except _ExitException:
        pass
    except Exception:
        maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
    except BaseException:
        maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
        raise
    finally:
        run_exit_tasks()
        maybe_log('wfastcgi.py %s closed' % __version__)

Example 25

View license
def main():
    initialized = False
    log('wfastcgi.py %s started' % __version__)
    log('Python version: %s' % sys.version)

    try:
        fcgi_stream = sys.stdin.detach() if sys.version_info[0] >= 3 else sys.stdin
        try:
            import msvcrt
            msvcrt.setmode(fcgi_stream.fileno(), os.O_BINARY)
        except ImportError:
            pass

        while True:
            record = read_fastcgi_record(fcgi_stream)
            if not record:
                continue

            errors = sys.stderr = sys.__stderr__ = record.params['wsgi.errors'] = StringIO()
            output = sys.stdout = sys.__stdout__ = StringIO()

            with handle_response(fcgi_stream, record, output.getvalue, errors.getvalue) as response:
                if not initialized:
                    log('wfastcgi.py %s initializing' % __version__)

                    os.chdir(response.physical_path)
                    sys.path[0] = '.'

                    # Initialization errors should be treated as fatal.
                    response.fatal_errors = True
                    response.error_message = 'Error occurred while reading WSGI handler'
                    env, handler = read_wsgi_handler(response.physical_path)

                    response.error_message = 'Error occurred starting file watcher'
                    start_file_watcher(response.physical_path, env.get('WSGI_RESTART_FILE_REGEX'))

                    response.error_message = ''
                    response.fatal_errors = False

                    log('wfastcgi.py %s initialized' % __version__)
                    initialized = True

                os.environ.update(env)

                # SCRIPT_NAME + PATH_INFO is supposed to be the full path
                # (http://www.python.org/dev/peps/pep-0333/) but by default
                # (http://msdn.microsoft.com/en-us/library/ms525840(v=vs.90).aspx)
                # IIS is sending us the full URL in PATH_INFO, so we need to
                # clear the script name here
                if 'AllowPathInfoForScriptMappings' not in os.environ:
                    record.params['SCRIPT_NAME'] = ''
                    record.params['wsgi.script_name'] = wsgi_encode('')

                # Send each part of the response to FCGI_STDOUT.
                # Exceptions raised in the handler will be logged by the context
                # manager and we will then wait for the next record.

                result = handler(record.params, response.start)
                try:
                    for part in result:
                        if part:
                            response.send(FCGI_STDOUT, part)
                finally:
                    if hasattr(result, 'close'):
                        result.close()
    except _ExitException:
        pass
    except Exception:
        maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
    except BaseException:
        maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
        raise
    finally:
        run_exit_tasks()
        maybe_log('wfastcgi.py %s closed' % __version__)

Example 26

View license
    def test_loadworkflowsampledata(self):
        """ Ensure that workflow sample data loads correctly. """

        # Load the test workflow
        load_workflow(TEST_WORKFLOW, VERSION_1)
        assert_test_dir_workflow_loaded(self)
        assert_test_dir_v1_loaded(self)

        # Calling the command with an invalid version argument should fail.
        invalid_args = (
            VERSION_1,  # Workflow slug omitted
            TEST_WORKFLOW,  # Version slug omitted
            '{}/{}/'.format(TEST_WORKFLOW, VERSION_1),  # Too many slashes
            '{}.{}'.format(TEST_WORKFLOW, VERSION_1),  # Wrong separator
        )
        for invalid_arg in invalid_args:
            invalid_stderr = StringIO()
            invalid_message = 'Please specify workflow versions in the format'
            call_command('loadworkflowsampledata', invalid_arg,
                         stderr=invalid_stderr)
            self.assertIn(invalid_message, invalid_stderr.getvalue())

        # Loading valid sample data should succeed without errors
        v1_str = '{}/{}'.format(TEST_WORKFLOW, VERSION_1)
        stdout = StringIO()
        call_command('loadworkflowsampledata', v1_str, stdout=stdout)
        output = stdout.getvalue()
        success_message = 'Successfully loaded sample data'
        self.assertIn(success_message, output)

        # clean up for subsequent commands
        del sys.modules['orchestra.tests.workflows.test_dir.load_sample_data']

        # Loading sample data for a nonexistent workflow should fail
        stderr1 = StringIO()
        call_command(
            'loadworkflowsampledata',
            '{}/{}'.format(NONEXISTENT_WORKFLOW_SLUG, VERSION_1),
            stderr=stderr1)
        output1 = stderr1.getvalue()
        no_workflow_error = ('Workflow {} has not been loaded into the '
                             'database'.format(NONEXISTENT_WORKFLOW_SLUG))
        self.assertIn(no_workflow_error, output1)

        # Loading sample data for a nonexistent version should fail
        stderr2 = StringIO()
        call_command(
            'loadworkflowsampledata',
            '{}/{}'.format(TEST_WORKFLOW, NONEXISTENT_VERSION),
            stderr=stderr2)
        output2 = stderr2.getvalue()
        no_version_error = ('Version {} does not exist'
                            .format(NONEXISTENT_VERSION))
        self.assertIn(no_version_error, output2)

        # Loading a workflow with no loading script should fail
        # Simulate this by moving the file.
        workflow = Workflow.objects.get(slug=TEST_WORKFLOW)
        workflow.sample_data_load_function = 'invalid_load_function'
        workflow.save()
        stderr3 = StringIO()
        call_command('loadworkflowsampledata', v1_str, stderr=stderr3)
        output3 = stderr3.getvalue()
        no_module_error = 'An error occurred while loading sample data'
        self.assertIn(no_module_error, output3)

        # Loading sample data for a workflow with no load function in its JSON
        # manifest should fail.
        workflow = Workflow.objects.get(slug=TEST_WORKFLOW)
        workflow.sample_data_load_function = None
        workflow.save()
        stderr4 = StringIO()
        call_command('loadworkflowsampledata', v1_str, stderr=stderr4)
        output4 = stderr4.getvalue()
        no_load_function_error = ('Workflow {} does not provide sample data'
                                  .format(TEST_WORKFLOW))
        self.assertIn(no_load_function_error, output4)

Example 27

Project: python3-trepan
Source File: deparse.py
View license
    def run(self, args):
        co = self.proc.curframe.f_code
        name = co.co_name

        try:
            opts, args = getopt(args[1:], "hpAPo:",
                                ["help", "parent", "pretty", "tree", "AST",
                                 "offset"])
        except GetoptError as err:
            # print help information and exit:
            print(str(err))  # will print something like "option -a not recognized"
            return

        pretty = False
        show_parent = False
        show_ast = False
        offset = None
        for o, a in opts:
            if o in ("-h", "--help"):
                self.proc.commands['help'].run(['help', 'deparse'])
                return
            elif o in ("-p", "--parent"):
                show_parent = True
            elif o in ("-P", "--pretty"):
                pretty = True
            elif o in ("-A", "--tree", '--AST'):
                show_ast = True
            elif o in ("-o", '--offset'):
                offset = a
            else:
                self.errmsg("unhandled option %s" % o)
            pass
        pass

        sys_version = version_info.major + (version_info.minor / 10.0)
        if len(args) >= 1 and args[0] == '.':
            try:
                if pretty:
                    deparsed = deparse_code(sys_version, co)
                    text = deparsed.text
                else:
                    out = StringIO()
                    deparsed = deparse_code_pretty(sys_version, co, out)
                    text = out.getvalue()
                    pass
            except:
                self.errmsg("error in deparsing code")

                return
            self.print_text(text)
            return

        elif offset:
            mess = ("The 'deparse' command when given an argument requires an"
                    " instruction offset. Got: %s" % offset)
            last_i = self.proc.get_an_int(offset, mess)
            if last_i is None:
                return
        else:
            last_i = self.proc.curframe.f_lasti
            if last_i == -1: last_i = 0

        try:
            deparsed = deparse_code(sys_version, co)
        except:
            self.errmsg("error in deparsing code at %d" % last_i)
            return
        if (name, last_i) in deparsed.offsets.keys():
            nodeInfo =  deparsed.offsets[name, last_i]
            extractInfo = deparsed.extract_node_info(nodeInfo)
            parentInfo = None
            # print extractInfo
            if show_ast:
                p = deparsed.ast
                if show_parent:
                    parentInfo, p = deparsed.extract_parent_info(nodeInfo.node)
                self.msg(p)
            if extractInfo:
                self.msg("opcode: %s" % nodeInfo.node.type)
                self.print_text(extractInfo.selectedLine)
                self.msg(extractInfo.markerLine)
                if show_parent:
                    if not parentInfo:
                        parentInfo, p = deparsed.extract_parent_info(nodeInfo.node)
                    if parentInfo:
                        self.msg("Contained in...")
                        self.print_text(parentInfo.selectedLine)
                        self.msg(parentInfo.markerLine)
                        self.msg("parsed type: %s" % p.type)
                    pass
                pass
            pass
        elif last_i == -1:
            if name:
                self.msg("At beginning of %s " % name)
            elif self.core.filename(None):
                self.msg("At beginning of program %s" % self.core.filename(None))
            else:
                self.msg("At beginning")
        else:
            self.errmsg("haven't recorded info for offset %d. Offsets I know are:"
                        % last_i)
            offsets = sorted([(str(x[0]), str(x[1])) for x in tuple(deparsed.offsets)])
            m = self.columnize_commands(offsets)
            self.msg_nocr(m)
        return

Example 28

View license
def reduce_operators(source):
    """
    Remove spaces between operators in *source* and returns the result.
    Example::

        def foo(foo, bar, blah):
            test = "This is a %s" % foo

    Will become::

        def foo(foo,bar,blah):
            test="This is a %s"%foo

    ..  note::

        Also removes trailing commas and joins disjointed strings like
        ``("foo" "bar")``.
    """
    io_obj = io.StringIO(source)
    prev_tok = None
    out_tokens = []
    out = ""
    last_lineno = -1
    last_col = 0
    nl_types = (tokenize.NL, tokenize.NEWLINE)
    joining_strings = False
    new_string = ""
    for tok in tokenize.generate_tokens(io_obj.readline):
        token_type = tok[0]
        token_string = tok[1]
        start_line, start_col = tok[2]
        end_line, end_col = tok[3]
        if start_line > last_lineno:
            last_col = 0
        if token_type != tokenize.OP:
            if start_col > last_col and token_type not in nl_types:
                if prev_tok[0] != tokenize.OP:
                    out += (" " * (start_col - last_col))
            if token_type == tokenize.STRING:
                if prev_tok[0] == tokenize.STRING:
                    # Join the strings into one
                    string_type = token_string[0]  # '' or ""
                    prev_string_type = prev_tok[1][0]
                    out = out.rstrip(" ")  # Remove any spaces we inserted prev
                    if not joining_strings:
                        # Remove prev token and start the new combined string
                        out = out[:(len(out) - len(prev_tok[1]))]
                        prev_string = prev_tok[1].strip(prev_string_type)
                        new_string = (
                            prev_string + token_string.strip(string_type))
                        joining_strings = True
                    else:
                        new_string += token_string.strip(string_type)
        else:
            if token_string in ('}', ')', ']'):
                if prev_tok[1] == ',':
                    out = out.rstrip(',')
            if joining_strings:
                # NOTE: Using triple quotes so that this logic works with
                # mixed strings using both single quotes and double quotes.
                out += "'''" + new_string + "'''"
                joining_strings = False
            if token_string == '@':  # Decorators need special handling
                if prev_tok[0] == tokenize.NEWLINE:
                    # Ensure it gets indented properly
                    out += (" " * (start_col - last_col))
        if not joining_strings:
            out += token_string
        last_col = end_col
        last_lineno = end_line
        prev_tok = tok
    return out

Example 29

Project: shellsploit-library
Source File: minification.py
View license
def reduce_operators(source):
    """
    Remove spaces between operators in *source* and returns the result.
    Example::

        def foo(foo, bar, blah):
            test = "This is a %s" % foo

    Will become::

        def foo(foo,bar,blah):
            test="This is a %s"%foo

    ..  note::

        Also removes trailing commas and joins disjointed strings like
        ``("foo" "bar")``.
    """
    io_obj = io.StringIO(source)
    prev_tok = None
    out_tokens = []
    out = ""
    last_lineno = -1
    last_col = 0
    nl_types = (tokenize.NL, tokenize.NEWLINE)
    joining_strings = False
    new_string = ""
    for tok in tokenize.generate_tokens(io_obj.readline):
        token_type = tok[0]
        token_string = tok[1]
        start_line, start_col = tok[2]
        end_line, end_col = tok[3]
        if start_line > last_lineno:
            last_col = 0
        if token_type != tokenize.OP:
            if start_col > last_col and token_type not in nl_types:
                if prev_tok[0] != tokenize.OP:
                    out += (" " * (start_col - last_col))
            if token_type == tokenize.STRING:
                if prev_tok[0] == tokenize.STRING:
                    # Join the strings into one
                    string_type = token_string[0]  # '' or ""
                    prev_string_type = prev_tok[1][0]
                    out = out.rstrip(" ")  # Remove any spaces we inserted prev
                    if not joining_strings:
                        # Remove prev token and start the new combined string
                        out = out[:(len(out) - len(prev_tok[1]))]
                        prev_string = prev_tok[1].strip(prev_string_type)
                        new_string = (
                            prev_string + token_string.strip(string_type))
                        joining_strings = True
                    else:
                        new_string += token_string.strip(string_type)
        else:
            if token_string in ('}', ')', ']'):
                if prev_tok[1] == ',':
                    out = out.rstrip(',')
            if joining_strings:
                # NOTE: Using triple quotes so that this logic works with
                # mixed strings using both single quotes and double quotes.
                out += "'''" + new_string + "'''"
                joining_strings = False
            if token_string == '@':  # Decorators need special handling
                if prev_tok[0] == tokenize.NEWLINE:
                    # Ensure it gets indented properly
                    out += (" " * (start_col - last_col))
        if not joining_strings:
            out += token_string
        last_col = end_col
        last_lineno = end_line
        prev_tok = tok
    return out

Example 30

Project: rst2pdf
Source File: pdfbuilder.py
View license
    def translate(self):
        visitor = PDFTranslator(self.document, self.builder)
        self.document.walkabout(visitor)
        lang = self.config.language or 'en'
        langmod = get_language_available(lang)[2]
        self.docutils_languages = {lang: langmod}

        # Generate Contents topic manually
        if self.use_toc:
            contents=nodes.topic(classes=['contents'])
            contents+=nodes.title('')
            contents[0]+=nodes.Text(langmod.labels['contents'])
            contents['ids']=['Contents']
            pending=nodes.topic()
            contents.append(pending)
            pending.details={}
            self.document.insert(0,nodes.raw(text='SetPageCounter 1 arabic', format='pdf'))
            self.document.insert(0,nodes.raw(text='OddPageBreak %s'%self.page_template, format='pdf'))
            self.document.insert(0,contents)
            self.document.insert(0,nodes.raw(text='SetPageCounter 1 lowerroman', format='pdf'))
            contTrans=PDFContents(self.document)
            contTrans.toc_depth = self.toc_depth
            contTrans.startnode=pending
            contTrans.apply()

        if self.use_coverpage:
            # Generate cover page

            # FIXME: duplicate from createpdf, refactor!

            # Find cover template, save it in cover_file
            def find_cover(name):
                cover_path=[self.srcdir, os.path.expanduser('~/.rst2pdf'),
                                            os.path.join(self.PATH,'templates')]

                # Add the Sphinx template paths
                def add_template_path(path):
                    return os.path.join(self.srcdir, path)

                cover_path.extend(list(map(add_template_path, self.config.templates_path)))

                cover_file=None
                for d in cover_path:
                    if os.path.exists(os.path.join(d,name)):
                        cover_file=os.path.join(d,name)
                        break
                return cover_file

            cover_file=find_cover(self.config.pdf_cover_template)
            if cover_file is None:
                log.error("Can't find cover template %s, using default"%self.custom_cover)
                cover_file=find_cover('sphinxcover.tmpl')

            # This is what's used in the python docs because
            # Latex does a manual linebreak. This sucks.
            authors=self.document.settings.author.split('\\')

            # Feed data to the template, get restructured text.
            cover_text = createpdf.renderTemplate(tname=cover_file,
                                title=self.document.settings.title or visitor.elements['title'],
                                subtitle='%s %s'%(_('version'),self.config.version),
                                authors=authors,
                                date=ustrftime(self.config.today_fmt or _('%B %d, %Y'))
                                )

            cover_tree = docutils.core.publish_doctree(cover_text)
            self.document.insert(0, cover_tree)

        sio=StringIO()

        if self.invariant:
            createpdf.patch_PDFDate()
            createpdf.patch_digester()

        createpdf.RstToPdf(sphinx=True,
                 stylesheets=self.stylesheets,
                 language=self.__language,
                 breaklevel=self.breaklevel,
                 breakside=self.breakside,
                 fit_mode=self.fitmode,
                 font_path=self.fontpath,
                 inline_footnotes=self.inline_footnotes,
                 highlightlang=self.highlightlang,
                 splittables=self.splittables,
                 style_path=self.style_path,
                 basedir=self.srcdir,
                 def_dpi=self.default_dpi,
                 real_footnotes=self.real_footnotes,
                 numbered_links=self.use_numbered_links,
                 background_fit_mode=self.fit_background_mode,
                 baseurl=self.baseurl,
                 section_header_depth=self.section_header_depth
                ).createPdf(doctree=self.document,
                    output=sio,
                    compressed=self.compressed)
        self.output=sio.getvalue()

Example 31

Project: polylearn
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 32

Project: scikit-video
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 33

Project: scipy
Source File: refguide_check.py
View license
def validate_rst_syntax(text, name, dots=True):
    if text is None:
        if dots:
            output_dot('E')
        return False, "ERROR: %s: no documentation" % (name,)

    ok_unknown_items = set([
        'mod', 'currentmodule', 'autosummary', 'data',
        'obj', 'versionadded', 'versionchanged', 'module', 'class',
        'ref', 'func', 'toctree', 'moduleauthor',
        'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
    ])

    # Run through docutils
    error_stream = io.StringIO()

    def resolve(name, is_label=False):
        return ("http://foo", name)

    token = '<RST-VALIDATE-SYNTAX-CHECK>'

    docutils.core.publish_doctree(
        text, token,
        settings_overrides = dict(halt_level=5,
                                  traceback=True,
                                  default_reference_context='title-reference',
                                  default_role='emphasis',
                                  link_base='',
                                  resolve_name=resolve,
                                  stylesheet_path='',
                                  raw_enabled=0,
                                  file_insertion_enabled=0,
                                  warning_stream=error_stream))

    # Print errors, disregarding unimportant ones
    error_msg = error_stream.getvalue()
    errors = error_msg.split(token)
    success = True
    output = ""

    for error in errors:
        lines = error.splitlines()
        if not lines:
            continue

        m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
        if m:
            if m.group(1) in ok_unknown_items:
                continue

        m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
        if m:
            continue

        output += name + lines[0] + "::\n    " + "\n    ".join(lines[1:]).rstrip() + "\n"
        success = False

    if not success:
        output += "    " + "-"*72 + "\n"
        for lineno, line in enumerate(text.splitlines()):
            output += "    %-4d    %s\n" % (lineno+1, line)
        output += "    " + "-"*72 + "\n\n"

    if dots:
        output_dot('.' if success else 'F')
    return success, output

Example 34

Project: cartopy
Source File: plot_directive.py
View license
def run_code(code, code_path, ns=None, function_name=None):
    """
    Import a Python module from a path, and run the function given by
    name, if function_name is not None.
    """

    # Change the working directory to the directory of the example, so
    # it can get at its data files, if any.  Add its path to sys.path
    # so it can import any helper modules sitting beside it.
    if six.PY2:
        pwd = os.getcwdu()
    else:
        pwd = os.getcwd()
    old_sys_path = list(sys.path)
    if setup.config.plot_working_directory is not None:
        try:
            os.chdir(setup.config.plot_working_directory)
        except OSError as err:
            raise OSError(str(err) + '\n`plot_working_directory` option in'
                          'Sphinx configuration file must be a valid '
                          'directory path')
        except TypeError as err:
            raise TypeError(str(err) + '\n`plot_working_directory` option in '
                            'Sphinx configuration file must be a string or '
                            'None')
        sys.path.insert(0, setup.config.plot_working_directory)
    elif code_path is not None:
        dirname = os.path.abspath(os.path.dirname(code_path))
        os.chdir(dirname)
        sys.path.insert(0, dirname)

    # Reset sys.argv
    old_sys_argv = sys.argv
    sys.argv = [code_path]

    # Redirect stdout
    stdout = sys.stdout
    if six.PY3:
        sys.stdout = io.StringIO()
    else:
        sys.stdout = cStringIO.StringIO()

    # Assign a do-nothing print function to the namespace.  There
    # doesn't seem to be any other way to provide a way to (not) print
    # that works correctly across Python 2 and 3.
    def _dummy_print(*arg, **kwarg):
        pass

    try:
        try:
            code = unescape_doctest(code)
            if ns is None:
                ns = {}
            if not ns:
                if setup.config.plot_pre_code is None:
                    six.exec_(six.text_type("import numpy as np\n" +
                    "from matplotlib import pyplot as plt\n"), ns)
                else:
                    six.exec_(six.text_type(setup.config.plot_pre_code), ns)
            ns['print'] = _dummy_print
            if "__main__" in code:
                six.exec_("__name__ = '__main__'", ns)
            code = remove_coding(code)
            six.exec_(code, ns)
            if function_name is not None:
                six.exec_(function_name + "()", ns)
        except (Exception, SystemExit) as err:
            raise PlotError(traceback.format_exc())
    finally:
        os.chdir(pwd)
        sys.argv = old_sys_argv
        sys.path[:] = old_sys_path
        sys.stdout = stdout
    return ns

Example 35

Project: sfs-python
Source File: plot_directive.py
View license
def run_code(code, code_path, ns=None, function_name=None):
    """
    Import a Python module from a path, and run the function given by
    name, if function_name is not None.
    """

    # Change the working directory to the directory of the example, so
    # it can get at its data files, if any.  Add its path to sys.path
    # so it can import any helper modules sitting beside it.
    if six.PY2:
        pwd = os.getcwdu()
    else:
        pwd = os.getcwd()
    old_sys_path = list(sys.path)
    if setup.config.plot_working_directory is not None:
        try:
            os.chdir(setup.config.plot_working_directory)
        except OSError as err:
            raise OSError(str(err) + '\n`plot_working_directory` option in'
                          'Sphinx configuration file must be a valid '
                          'directory path')
        except TypeError as err:
            raise TypeError(str(err) + '\n`plot_working_directory` option in '
                            'Sphinx configuration file must be a string or '
                            'None')
        sys.path.insert(0, setup.config.plot_working_directory)
    elif code_path is not None:
        dirname = os.path.abspath(os.path.dirname(code_path))
        os.chdir(dirname)
        sys.path.insert(0, dirname)

    # Reset sys.argv
    old_sys_argv = sys.argv
    sys.argv = [code_path]

    # Redirect stdout
    stdout = sys.stdout
    if six.PY3:
        sys.stdout = io.StringIO()
    else:
        sys.stdout = cStringIO.StringIO()

    # Assign a do-nothing print function to the namespace.  There
    # doesn't seem to be any other way to provide a way to (not) print
    # that works correctly across Python 2 and 3.
    def _dummy_print(*arg, **kwarg):
        pass

    try:
        try:
            code = unescape_doctest(code)
            if ns is None:
                ns = {}
            if not ns:
                if setup.config.plot_pre_code is None:
                    six.exec_(six.text_type("import numpy as np\n" +
                    "from matplotlib import pyplot as plt\n"), ns)
                else:
                    six.exec_(six.text_type(setup.config.plot_pre_code), ns)
            ns['print'] = _dummy_print
            if "__main__" in code:
                six.exec_("__name__ = '__main__'", ns)
            code = remove_coding(code)
            six.exec_(code, ns)
            if function_name is not None:
                six.exec_(function_name + "()", ns)
        except (Exception, SystemExit) as err:
            raise PlotError(traceback.format_exc())
    finally:
        os.chdir(pwd)
        sys.argv = old_sys_argv
        sys.path[:] = old_sys_path
        sys.stdout = stdout
    return ns

Example 36

Project: sklearn-theano
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    time_m = 0
    time_s = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_mngr.num)
                    plt.savefig(image_path % fig_mngr.num)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/dev/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to
        # _build/html/dev/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    example_code_obj = identify_names(open(example_file).read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 37

Project: iktomi
Source File: sqla.py
View license
    def test_schema_several_meta(self):
        Base1 = declarative_base()

        class A1(Base1):
            __tablename__ = 'A'
            id = Column(Integer, primary_key=True)

        class B1(Base1):
            __tablename__ = 'B'
            id = Column(Integer, primary_key=True)

        Base2 = declarative_base()

        class A2(Base2):
            __tablename__ = 'A'
            id = Column(Integer, primary_key=True)

        engine1 = create_engine('sqlite://')
        engine2 = create_engine('sqlite://')
        binds = {
            A1.__table__: engine1,
            B1.__table__: engine1,
            A2.__table__: engine2,
        }
        meta = {
            'm1': Base1.metadata,
            'm2': Base2.metadata,
            'm3': MetaData(),
        }
        cli = Sqla(orm.sessionmaker(binds=binds), metadata=meta)

        output = StringIO()
        with mock.patch.object(sys, 'stdout', output):
            cli.command_schema()
        created = self._created_tables(output.getvalue())
        self.assertEqual(len(created), 3)
        self.assertEqual(created.count('A'), 2)
        self.assertEqual(created.count('B'), 1)

        output = StringIO()
        with mock.patch.object(sys, 'stdout', output):
            cli.command_schema('m1')
        created = self._created_tables(output.getvalue())
        self.assertEqual(len(created), 2)
        self.assertEqual(created.count('A'), 1)
        self.assertEqual(created.count('B'), 1)

        output = StringIO()
        with mock.patch.object(sys, 'stdout', output):
            cli.command_schema('m1.B')
        created = self._created_tables(output.getvalue())
        self.assertEqual(created, ['B'])

        output = StringIO()
        with mock.patch.object(sys, 'stdout', output):
            try:
                cli.command_schema('m2.B')
            except SystemExit:
                pass
        created = self._created_tables(output.getvalue())
        self.assertEqual(created, [])

        output = StringIO()
        with mock.patch.object(sys, 'stdout', output):
            try:
                cli.command_schema('m3.A')
            except SystemExit:
                pass
        created = self._created_tables(output.getvalue())
        self.assertEqual(created, [])

Example 38

Project: qgisSpaceSyntaxToolkit
Source File: gml.py
View license
def literal_stringizer(value):
    """Convert a value to a Python literal in GML representation.

    Parameters
    ----------
    value : object
        The value to be converted to GML representation.

    Returns
    -------
    rep : string
        A double-quoted Python literal representing value. Unprintable
        characters are replaced by XML character references.

    Raises
    ------
    ValueError
        If ``value`` cannot be converted to GML.

    Notes
    -----
    ``literal_stringizer`` is largely the same as ``repr`` in terms of
    functionality but attempts prefix ``unicode`` and ``bytes`` literals with
    ``u`` and ``b`` to provide better interoperability of data generated by
    Python 2 and Python 3.

    The original value can be recovered using the
    ``networkx.readwrite.gml.literal_destringizer`` function.
    """
    def stringize(value):
        if isinstance(value, (int, long, bool)) or value is None:
            buf.write(str(value))
        elif isinstance(value, unicode):
            text = repr(value)
            if text[0] != 'u':
                try:
                    value.encode('latin1')
                except UnicodeEncodeError:
                    text = 'u' + text
            buf.write(text)
        elif isinstance(value, (float, complex, str, bytes)):
            buf.write(repr(value))
        elif isinstance(value, list):
            buf.write('[')
            first = True
            for item in value:
                if not first:
                    buf.write(',')
                else:
                    first = False
                stringize(item)
            buf.write(']')
        elif isinstance(value, tuple):
            if len(value) > 1:
                buf.write('(')
                first = True
                for item in value:
                    if not first:
                        buf.write(',')
                    else:
                        first = False
                    stringize(item)
                buf.write(')')
            elif value:
                buf.write('(')
                stringize(value[0])
                buf.write(',)')
            else:
                buf.write('()')
        elif isinstance(value, dict):
            buf.write('{')
            first = True
            for key, value in value.items():
                if not first:
                    buf.write(',')
                else:
                    first = False
                stringize(key)
                buf.write(':')
                stringize(value)
            buf.write('}')
        elif isinstance(value, set):
            buf.write('{')
            first = True
            for item in value:
                if not first:
                    buf.write(',')
                else:
                    first = False
                stringize(item)
            buf.write('}')
        else:
            raise ValueError(
                '%r cannot be converted into a Python literal' % (value,))

    buf = StringIO()
    stringize(value)
    return buf.getvalue()

Example 39

Project: asv
Source File: test_console.py
View license
def test_write_with_fallback(tmpdir, capfd):
    tmpdir = six.text_type(tmpdir)

    def check_write(value, expected, stream_encoding, preferred_encoding):
        old_getpreferredencoding = locale.getpreferredencoding
        try:
            locale.getpreferredencoding = lambda: preferred_encoding

            # Check writing to io.StringIO
            stream = io.StringIO()
            _write_with_fallback(value, stream.write, stream)
            assert stream.getvalue() == value

            # Check writing to a text stream
            buf = io.BytesIO()
            stream = io.TextIOWrapper(buf, encoding=stream_encoding)
            _write_with_fallback(value, stream.write, stream)
            stream.flush()
            got = buf.getvalue()
            assert got == expected

            # Check writing to a byte stream (no stream encoding, so
            # it should write in locale encoding)
            if stream_encoding == preferred_encoding:
                buf = io.BytesIO()
                _write_with_fallback(value, buf.write, buf)
                got = buf.getvalue()
                assert got == expected

            # Check writing to a file
            fn = os.path.join(tmpdir, 'tmp.txt')
            with io.open(fn, 'w', encoding=stream_encoding) as stream:
                _write_with_fallback(value, stream.write, stream)
            with open(fn, 'rb') as stream:
                got = stream.read()
                assert got == expected

            # Check writing to Py2 files
            if not six.PY3:
                if stream_encoding == preferred_encoding:
                    # No stream encoding: write in locale encoding
                    for mode in ['w', 'wb']:
                        with open(fn, mode) as stream:
                            _write_with_fallback(value, stream.write, stream)
                        with open(fn, 'rb') as stream:
                            got = stream.read()
                            assert got == expected
        finally:
            locale.getpreferredencoding = old_getpreferredencoding

    # What is printed should follow the following rules:
    #
    # - Try printing in stream encoding.
    # - Try printing in locale preferred encoding.
    # - Otherwise, map characters produced by asv to ascii equivalents, and
    #   - Try to print in latin1
    #   - Try to print in ascii, replacing all non-ascii characters
    encodings = ['utf-8', 'latin1', 'ascii', 'euc-jp']
    strings = ["helloμ", "hello·", "hello難", "helloä"]
    repmap = {"helloμ": "hellou", "hello·": "hello-"}

    for pref_enc, stream_enc, s in itertools.product(encodings, encodings, strings):
        expected = None
        for enc in [stream_enc, pref_enc]:
            try:
                expected = s.encode(enc)
                break
            except UnicodeError:
                pass
        else:
            s2 = repmap.get(s, s)
            try:
                expected = s2.encode('latin1')
            except UnicodeError:
                expected = s2.encode('ascii', 'replace')

        check_write(s, expected, stream_enc, pref_enc)

    # Should not bail out on bytes input
    _write_with_fallback("a".encode('ascii'), sys.stdout.write, sys.stdout)
    out, err = capfd.readouterr()
    assert out == "a"

Example 40

Project: sqlobject
Source File: csvexport.py
View license
def export_csv(soClass, select=None, writer=None, connection=None,
               orderBy=None):
    """
    Export the SQLObject class ``soClass`` to a CSV file.

    ``soClass`` can also be a SelectResult object, as returned by
    ``.select()``.  If it is a class, all objects will be retrieved,
    ordered by ``orderBy`` if given, or the ``.csvOrderBy`` attribute
    if present (but csvOrderBy will only be applied when no select
    result is given).

    You can also pass in select results (or simply a list of
    instances) in ``select`` -- if you have a list of objects (not a
    SelectResult instance, as produced by ``.select()``) then you must
    pass it in with ``select`` and pass the class in as the first
    argument.

    ``writer`` is a ``csv.writer()`` object, or a file-like object.
    If not given, the string of the file will be returned.

    Uses ``connection`` as the data source, if given, otherwise the
    default connection.

    Columns can be annotated with ``.csvTitle`` attributes, which will
    form the attributes of the columns, or 'title' (secondarily), or
    if nothing then the column attribute name.

    If a column has a ``.noCSV`` attribute which is true, then the
    column will be suppressed.

    Additionally a class can have an ``.extraCSVColumns`` attribute,
    which should be a list of strings/tuples.  If a tuple, it should
    be like ``(attribute, title)``, otherwise it is the attribute,
    which will also be the title.  These will be appended to the end
    of the CSV file; the attribute will be retrieved from instances.

    Also a ``.csvColumnOrder`` attribute can be on the class, which is
    the string names of attributes in the order they should be
    presented.
    """

    return_fileobj = None
    if not writer:
        return_fileobj = StringIO()
        writer = csv.writer(return_fileobj)
    elif not hasattr(writer, 'writerow'):
        writer = csv.writer(writer)

    if isinstance(soClass, sqlobject.SQLObject.SelectResultsClass):
        assert select is None, (
            "You cannot pass in a select argument (%r) "
            "and a SelectResult argument (%r) for soClass" % (select, soClass))
        select = soClass
        soClass = select.sourceClass
    elif select is None:
        select = soClass.select()
        if getattr(soClass, 'csvOrderBy', None):
            select = select.orderBy(soClass.csvOrderBy)

    if orderBy:
        select = select.orderBy(orderBy)
    if connection:
        select = select.connection(connection)

    _actually_export_csv(soClass, select, writer)

    if return_fileobj:
        # They didn't pass any writer or file object in, so we return
        # the string result:
        return return_fileobj.getvalue()

Example 41

Project: permute
Source File: plot_directive.py
View license
def run_code(code, code_path, ns=None, function_name=None):
    """
    Import a Python module from a path, and run the function given by
    name, if function_name is not None.
    """

    # Change the working directory to the directory of the example, so
    # it can get at its data files, if any.  Add its path to sys.path
    # so it can import any helper modules sitting beside it.
    if six.PY2:
        pwd = os.getcwdu()
    else:
        pwd = os.getcwd()
    old_sys_path = list(sys.path)
    if setup.config.plot_working_directory is not None:
        try:
            os.chdir(setup.config.plot_working_directory)
        except OSError as err:
            raise OSError(str(err) + '\n`plot_working_directory` option in'
                          'Sphinx configuration file must be a valid '
                          'directory path')
        except TypeError as err:
            raise TypeError(str(err) + '\n`plot_working_directory` option in '
                            'Sphinx configuration file must be a string or '
                            'None')
        sys.path.insert(0, setup.config.plot_working_directory)
    elif code_path is not None:
        dirname = os.path.abspath(os.path.dirname(code_path))
        os.chdir(dirname)
        sys.path.insert(0, dirname)

    # Reset sys.argv
    old_sys_argv = sys.argv
    sys.argv = [code_path]

    # Redirect stdout
    stdout = sys.stdout
    if six.PY3:
        sys.stdout = io.StringIO()
    else:
        sys.stdout = cStringIO.StringIO()

    # Assign a do-nothing print function to the namespace.  There
    # doesn't seem to be any other way to provide a way to (not) print
    # that works correctly across Python 2 and 3.
    def _dummy_print(*arg, **kwarg):
        pass

    try:
        try:
            code = unescape_doctest(code)
            if ns is None:
                ns = {}
            if not ns:
                if setup.config.plot_pre_code is None:
                    six.exec_(six.text_type("import numpy as np\n" +
                    "from matplotlib import pyplot as plt\n"), ns)
                else:
                    six.exec_(six.text_type(setup.config.plot_pre_code), ns)
            ns['print'] = _dummy_print
            if "__main__" in code:
                six.exec_("__name__ = '__main__'", ns)
            code = remove_coding(code)
            six.exec_(code, ns)
            if function_name is not None:
                six.exec_(function_name + "()", ns)
        except (Exception, SystemExit) as err:
            raise PlotError(traceback.format_exc())
    finally:
        os.chdir(pwd)
        sys.argv = old_sys_argv
        sys.path[:] = old_sys_path
        sys.stdout = stdout
    return ns

Example 42

Project: django-forms-builder
Source File: admin.py
View license
    def entries_view(self, request, form_id, show=False, export=False,
                     export_xls=False):
        """
        Displays the form entries in a HTML table with option to
        export as CSV file.
        """
        if request.POST.get("back"):
            bits = (self.model._meta.app_label, self.model.__name__.lower())
            change_url = reverse("admin:%s_%s_change" % bits, args=(form_id,))
            return HttpResponseRedirect(change_url)
        form = get_object_or_404(self.model, id=form_id)
        post = request.POST or None
        args = form, request, self.formentry_model, self.fieldentry_model, post
        entries_form = EntriesForm(*args)
        delete = "%s.delete_formentry" % self.formentry_model._meta.app_label
        can_delete_entries = request.user.has_perm(delete)
        submitted = entries_form.is_valid() or show or export or export_xls
        export = export or request.POST.get("export")
        export_xls = export_xls or request.POST.get("export_xls")
        if submitted:
            if export:
                response = HttpResponse(content_type="text/csv")
                fname = "%s-%s.csv" % (form.slug, slugify(now().ctime()))
                attachment = "attachment; filename=%s" % fname
                response["Content-Disposition"] = attachment
                queue = StringIO()
                try:
                    csv = writer(queue, delimiter=CSV_DELIMITER)
                    writerow = csv.writerow
                except TypeError:
                    queue = BytesIO()
                    delimiter = bytes(CSV_DELIMITER, encoding="utf-8")
                    csv = writer(queue, delimiter=delimiter)
                    writerow = lambda row: csv.writerow([c.encode("utf-8")
                        if hasattr(c, "encode") else c for c in row])
                writerow(entries_form.columns())
                for row in entries_form.rows(csv=True):
                    writerow(row)
                data = queue.getvalue()
                response.write(data)
                return response
            elif XLWT_INSTALLED and export_xls:
                response = HttpResponse(content_type="application/vnd.ms-excel")
                fname = "%s-%s.xls" % (form.slug, slugify(now().ctime()))
                attachment = "attachment; filename=%s" % fname
                response["Content-Disposition"] = attachment
                queue = BytesIO()
                workbook = xlwt.Workbook(encoding='utf8')
                sheet = workbook.add_sheet(form.title[:31])
                for c, col in enumerate(entries_form.columns()):
                    sheet.write(0, c, col)
                for r, row in enumerate(entries_form.rows(csv=True)):
                    for c, item in enumerate(row):
                        if isinstance(item, datetime):
                            item = item.replace(tzinfo=None)
                            sheet.write(r + 2, c, item, XLWT_DATETIME_STYLE)
                        else:
                            sheet.write(r + 2, c, item)
                workbook.save(queue)
                data = queue.getvalue()
                response.write(data)
                return response
            elif request.POST.get("delete") and can_delete_entries:
                selected = request.POST.getlist("selected")
                if selected:
                    try:
                        from django.contrib.messages import info
                    except ImportError:
                        def info(request, message, fail_silently=True):
                            request.user.message_set.create(message=message)
                    entries = self.formentry_model.objects.filter(id__in=selected)
                    count = entries.count()
                    if count > 0:
                        entries.delete()
                        message = ungettext("1 entry deleted",
                                            "%(count)s entries deleted", count)
                        info(request, message % {"count": count})
        template = "admin/forms/entries.html"
        context = {"title": _("View Entries"), "entries_form": entries_form,
                   "opts": self.model._meta, "original": form,
                   "can_delete_entries": can_delete_entries,
                   "submitted": submitted,
                   "xlwt_installed": XLWT_INSTALLED}
        return render(request, template, context)

Example 43

Project: sysql
Source File: ps.py
View license
def run(args, db):
 cmd = 'ps'
 table = 'ps'

 out_structure = \
 """pid  tname    time      etime        %cpu      sgi_p     uname     comm    args
    10   10       20        20           10        10        100       20      100500
    pid  tty_name cpu_time  elapsed_time cpu_ratio processor user_name command command_line
    int  text     cpu_time  elapsed_time float     text      text      text    text          """.split('\n')

 ps_out        = out_structure[0].split()
 ps_sizes      = out_structure[1].split()
 ps_names      = out_structure[2].split()
 ps_types      = out_structure[3].split()

 class Column:
  def __init__(s, **kwargs):
   for k, v in kwargs.items():
    setattr(s, k, v)

 columns = [Column(out=out, size=int(size), name=name, type=type) for out, size, name, type in zip(ps_out, ps_sizes, ps_names, ps_types)]

 class elapsed_time:
   # [DD-]hh:mm:ss
  def __init__(s, text):
   s.text = text

  @staticmethod
  def adapter(s):
   digits = list(map(int, list(re.findall('\d+', s.text))))
   ss = digits.pop()
   mm = digits.pop()
   hh = digits.pop() if digits else 0
   dd = digits.pop() if digits else 0
   t = datetime.timedelta(days=dd, hours=hh, minutes=mm, seconds=ss)
   return int(t.total_seconds())

  @staticmethod
  def converter(value):
   return datetime.timedelta(seconds=int(value))

 class cpu_time(elapsed_time):
  pass

 type_map = dict(int=int, float=float, text=str, bytes=bytes)
 for c in [elapsed_time, cpu_time]:
  type_map[c.__name__] = c
  sqlite3.register_adapter(c, c.adapter)
  sqlite3.register_converter(c.__name__, c.converter)

 args.extend(['-o', ','.join(['{}:{}'.format(col.out, col.size) for col in columns])])
 bout = subprocess.check_output([cmd] + args)
 out = io.StringIO(bout.decode(errors='surrogate'))
 out.readline()

 sql = 'CREATE TABLE {table} ({columns})'.format(table=table, columns=','.join(['{} {}'.format(col.name, col.type) for col in columns]))
 db.execute(sql)

 left = 0
 header = []
 for size in ps_sizes:
  right = left + int(size) + 1
  header.append([left, right])
  left = right

 for line in out:
  vals = [line[start:end].strip() for start, end in header]
  vals = [type_map[t](v) for t, v in zip(ps_types, vals)]
  db.execute('INSERT INTO {table} VALUES ({q})'.format(table=table,q=','.join('?'*len(ps_names))), tuple(vals))

Example 44

Project: Live-Blog
Source File: application.py
View license
    def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
                 confoverrides=None, status=sys.stdout, warning=sys.stderr,
                 freshenv=False, warningiserror=False, tags=None):
        self.next_listener_id = 0
        self._extensions = {}
        self._listeners = {}
        self.domains = BUILTIN_DOMAINS.copy()
        self.builderclasses = BUILTIN_BUILDERS.copy()
        self.builder = None
        self.env = None

        self.srcdir = srcdir
        self.confdir = confdir
        self.outdir = outdir
        self.doctreedir = doctreedir

        if status is None:
            self._status = StringIO()
            self.quiet = True
        else:
            self._status = status
            self.quiet = False

        if warning is None:
            self._warning = StringIO()
        else:
            self._warning = warning
        self._warncount = 0
        self.warningiserror = warningiserror

        self._events = events.copy()

        # say hello to the world
        self.info(bold('Running Sphinx v%s' % sphinx.__version__))

        # status code for command-line application
        self.statuscode = 0

        # read config
        self.tags = Tags(tags)
        self.config = Config(confdir, CONFIG_FILENAME,
                             confoverrides or {}, self.tags)
        self.config.check_unicode(self.warn)

        # set confdir to srcdir if -C given (!= no confdir); a few pieces
        # of code expect a confdir to be set
        if self.confdir is None:
            self.confdir = self.srcdir

        # backwards compatibility: activate old C markup
        self.setup_extension('sphinx.ext.oldcmarkup')
        # load all user-given extension modules
        for extension in self.config.extensions:
            self.setup_extension(extension)
        # the config file itself can be an extension
        if self.config.setup:
            self.config.setup(self)

        # now that we know all config values, collect them from conf.py
        self.config.init_values()

        # check the Sphinx version if requested
        if self.config.needs_sphinx and \
           self.config.needs_sphinx > sphinx.__version__[:3]:
            raise VersionRequirementError(
                'This project needs at least Sphinx v%s and therefore cannot '
                'be built with this version.' % self.config.needs_sphinx)

        # set up translation infrastructure
        self._init_i18n()
        # set up the build environment
        self._init_env(freshenv)
        # set up the builder
        self._init_builder(buildername)

Example 45

Project: taiga-back
Source File: services.py
View license
def userstories_to_csv(project, queryset):
    csv_data = io.StringIO()
    fieldnames = ["ref", "subject", "description", "sprint", "sprint_estimated_start",
                  "sprint_estimated_finish", "owner", "owner_full_name", "assigned_to",
                  "assigned_to_full_name", "status", "is_closed"]

    roles = project.roles.filter(computable=True).order_by('slug')
    for role in roles:
        fieldnames.append("{}-points".format(role.slug))

    fieldnames.append("total-points")

    fieldnames += ["backlog_order", "sprint_order", "kanban_order",
                   "created_date", "modified_date", "finish_date",
                   "client_requirement", "team_requirement", "attachments",
                   "generated_from_issue", "external_reference", "tasks",
                   "tags", "watchers", "voters"]

    custom_attrs = project.userstorycustomattributes.all()
    for custom_attr in custom_attrs:
        fieldnames.append(custom_attr.name)

    queryset = queryset.prefetch_related("role_points",
                                         "role_points__points",
                                         "role_points__role",
                                         "tasks",
                                         "attachments",
                                         "custom_attributes_values")
    queryset = queryset.select_related("milestone",
                                       "project",
                                       "status",
                                       "owner",
                                       "assigned_to",
                                       "generated_from_issue")

    queryset = attach_total_voters_to_queryset(queryset)
    queryset = attach_watchers_to_queryset(queryset)

    writer = csv.DictWriter(csv_data, fieldnames=fieldnames)
    writer.writeheader()
    for us in queryset:
        row = {
            "ref": us.ref,
            "subject": us.subject,
            "description": us.description,
            "sprint": us.milestone.name if us.milestone else None,
            "sprint_estimated_start": us.milestone.estimated_start if us.milestone else None,
            "sprint_estimated_finish": us.milestone.estimated_finish if us.milestone else None,
            "owner": us.owner.username if us.owner else None,
            "owner_full_name": us.owner.get_full_name() if us.owner else None,
            "assigned_to": us.assigned_to.username if us.assigned_to else None,
            "assigned_to_full_name": us.assigned_to.get_full_name() if us.assigned_to else None,
            "status": us.status.name if us.status else None,
            "is_closed": us.is_closed,
            "backlog_order": us.backlog_order,
            "sprint_order": us.sprint_order,
            "kanban_order": us.kanban_order,
            "created_date": us.created_date,
            "modified_date": us.modified_date,
            "finish_date": us.finish_date,
            "client_requirement": us.client_requirement,
            "team_requirement": us.team_requirement,
            "attachments": us.attachments.count(),
            "generated_from_issue": us.generated_from_issue.ref if us.generated_from_issue else None,
            "external_reference": us.external_reference,
            "tasks": ",".join([str(task.ref) for task in us.tasks.all()]),
            "tags": ",".join(us.tags or []),
            "watchers": us.watchers,
            "voters": us.total_voters
        }

        us_role_points_by_role_id = {us_rp.role.id: us_rp.points.value for us_rp in us.role_points.all()}
        for role in roles:
            row["{}-points".format(role.slug)] = us_role_points_by_role_id.get(role.id, 0)

        row['total-points'] = us.get_total_points()

        for custom_attr in custom_attrs:
            value = us.custom_attributes_values.attributes_values.get(str(custom_attr.id), None)
            row[custom_attr.name] = value

        writer.writerow(row)

    return csv_data

Example 46

Project: flit
Source File: inifile.py
View license
def _validate_config(cp, path):
    """
    Validate a config and return a dict containing `module`,`metadata`,`script`,`entry_point` keys.
    """
    unknown_sections = set(cp.sections()) - {'metadata', 'scripts'}
    unknown_sections = [s for s in unknown_sections if not s.lower().startswith('x-')]
    if unknown_sections:
        raise ConfigError('Unknown sections: ' + ', '.join(unknown_sections))

    if not cp.has_section('metadata'):
        raise ConfigError('[metadata] section is required')

    md_sect = cp['metadata']
    if not set(md_sect).issuperset(metadata_required_fields):
        missing = metadata_required_fields - set(md_sect)
        raise ConfigError("Required fields missing: " + '\n'.join(missing))

    module = md_sect.get('module')
    if not module.isidentifier():
        raise ConfigError("Module name %r is not a valid identifier" % module)

    md_dict = {}

    if 'description-file' in md_sect:
        description_file = path.parent / md_sect.get('description-file')
        with description_file.open() as f:
            raw_desc =  f.read()
        if description_file.suffix == '.md':
            try:
                import pypandoc
                log.debug('will convert %s to rst', description_file)
                raw_desc = pypandoc.convert(raw_desc, 'rst', format='markdown')
            except Exception:
                log.warn('Unable to convert markdown to rst. Please install `pypandoc` and `pandoc` to use markdown long description.')
        stream = io.StringIO()
        _, ok = render(raw_desc, stream)
        if not ok:
            log.warn("The file description seems not to be valid rst for PyPI;"
                    " it will be interpreted as plain text")
            log.warn(stream.getvalue())
        md_dict['description'] =  raw_desc

    if 'entry-points-file' in md_sect:
        entry_points_file = path.parent / md_sect.get('entry-points-file')
        if not entry_points_file.is_file():
            raise FileNotFoundError(entry_points_file)
    else:
        entry_points_file = path.parent / 'entry_points.txt'
        if not entry_points_file.is_file():
            entry_points_file = None

    for key, value in md_sect.items():
        if key in {'description-file', 'module', 'entry-points-file'}:
            continue
        if key not in metadata_allowed_fields:
            closest = difflib.get_close_matches(key, metadata_allowed_fields,
                                                n=1, cutoff=0.7)
            msg = "Unrecognised metadata key: {}".format(key)
            if closest:
                msg += " (did you mean {!r}?)".format(closest[0])
            raise ConfigError(msg)

        k2 = key.replace('-', '_')
        if key in metadata_list_fields:
            md_dict[k2] = value.splitlines()
        else:
            md_dict[k2] = value

    # What we call requires in the ini file is technically requires_dist in
    # the metadata.
    if 'requires' in md_dict:
        md_dict['requires_dist'] = md_dict.pop('requires')

    # And what we call dist-name is name in the metadata
    if 'dist_name' in md_dict:
        md_dict['name'] = md_dict.pop('dist_name')

    if 'classifiers' in md_dict:
        verify_classifiers(md_dict['classifiers'])

    # Scripts ---------------
    if cp.has_section('scripts'):
        scripts_dict = {k: common.parse_entry_point(v) for k, v in cp['scripts'].items()}
    else:
        scripts_dict = {}

    return {
        'module': module,
        'metadata': md_dict,
        'scripts': scripts_dict,
        'entry_points_file': entry_points_file,
        'raw_config': cp,
    }

Example 47

Project: tensorlib
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    time_m = 0
    time_s = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_mngr.num)
                    plt.savefig(image_path % fig_mngr.num)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/dev/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to
        # _build/html/dev/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    example_code_obj = identify_names(open(example_file).read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 48

Project: qutebrowser
Source File: asciidoc2html.py
View license
    def _build_website_file(self, root, filename):
        """Build a single website file."""
        # pylint: disable=too-many-locals,too-many-statements
        src = os.path.join(root, filename)
        src_basename = os.path.basename(src)
        parts = [self._args.website[0]]
        dirname = os.path.dirname(src)
        if dirname:
            parts.append(os.path.relpath(os.path.dirname(src)))
        parts.append(
            os.extsep.join((os.path.splitext(src_basename)[0],
                            'html')))
        dst = os.path.join(*parts)
        os.makedirs(os.path.dirname(dst), exist_ok=True)

        modified_src = os.path.join(self._tempdir, src_basename)
        shutil.copy('www/header.asciidoc', modified_src)

        outfp = io.StringIO()

        with open(modified_src, 'r', encoding='utf-8') as header_file:
            header = header_file.read()
            header += "\n\n"

        with open(src, 'r', encoding='utf-8') as infp:
            outfp.write("\n\n")
            hidden = False
            found_title = False
            title = ""
            last_line = ""

            for line in infp:
                if line.strip() == '// QUTE_WEB_HIDE':
                    assert not hidden
                    hidden = True
                elif line.strip() == '// QUTE_WEB_HIDE_END':
                    assert hidden
                    hidden = False
                elif line == "The Compiler <[email protected]>\n":
                    continue
                elif re.match(r'^:\w+:.*', line):
                    # asciidoc field
                    continue

                if not found_title:
                    if re.match(r'^=+$', line):
                        line = line.replace('=', '-')
                        found_title = True
                        title = last_line.rstrip('\n') + " | qutebrowser\n"
                        title += "=" * (len(title) - 1)
                    elif re.match(r'^= .+', line):
                        line = '==' + line[1:]
                        found_title = True
                        title = last_line.rstrip('\n') + " | qutebrowser\n"
                        title += "=" * (len(title) - 1)

                if not hidden:
                    outfp.write(line.replace(".asciidoc[", ".html["))
                    last_line = line

        current_lines = outfp.getvalue()
        outfp.close()

        with open(modified_src, 'w+', encoding='utf-8') as final_version:
            final_version.write(title + "\n\n" + header + current_lines)

        self.call(modified_src, dst, '--theme=qute')

Example 49

Project: prov
Source File: test_xml.py
View license
    def test_deserialization_example_04_and_05(self):
        """
        Example 4 and 5 have a different type specification. They use an
        xsi:type as an attribute on an entity. This can be read but if
        written again it will become an XML child element. This is
        semantically identical but cannot be tested with a round trip.
        """
        # Example 4.
        xml_string = """
        <prov:document
            xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
            xmlns:xsd="http://www.w3.org/2001/XMLSchema"
            xmlns:prov="http://www.w3.org/ns/prov#"
            xmlns:ex="http://example.com/ns/ex#"
            xmlns:tr="http://example.com/ns/tr#">

          <prov:entity prov:id="tr:WD-prov-dm-20111215" xsi:type="prov:Plan">
            <prov:type xsi:type="xsd:QName">ex:Workflow</prov:type>
          </prov:entity>

        </prov:document>
        """
        with io.StringIO() as xml:
            xml.write(xml_string)
            xml.seek(0, 0)
            actual_document = prov.ProvDocument.deserialize(source=xml,
                                                            format="xml")

        expected_document = prov.ProvDocument()
        ex_ns = Namespace(*EX_NS)
        expected_document.add_namespace(ex_ns)
        expected_document.add_namespace(*EX_TR)

        # The xsi:type attribute is mapped to a proper PROV attribute.
        expected_document.entity("tr:WD-prov-dm-20111215", (
            (prov.PROV_TYPE, QualifiedName(ex_ns, "Workflow")),
            (prov.PROV_TYPE, PROV["Plan"])))

        self.assertEqual(actual_document, expected_document, "example_04")

        # Example 5.
        xml_string = """
        <prov:document
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xmlns:xsd="http://www.w3.org/2001/XMLSchema"
          xmlns:prov="http://www.w3.org/ns/prov#"
          xmlns:ex="http://example.com/ns/ex#"
          xmlns:tr="http://example.com/ns/tr#">

        <prov:entity prov:id="tr:WD-prov-dm-20111215" xsi:type="prov:Plan">
          <prov:type xsi:type="xsd:QName">ex:Workflow</prov:type>
          <prov:type xsi:type="xsd:QName">prov:Plan</prov:type> <!-- inferred -->
          <prov:type xsi:type="xsd:QName">prov:Entity</prov:type> <!-- inferred -->
        </prov:entity>

        </prov:document>
        """
        with io.StringIO() as xml:
            xml.write(xml_string)
            xml.seek(0, 0)
            actual_document = prov.ProvDocument.deserialize(source=xml,
                                                            format="xml")

        expected_document = prov.ProvDocument()
        expected_document.add_namespace(*EX_NS)
        expected_document.add_namespace(*EX_TR)

        # The xsi:type attribute is mapped to a proper PROV attribute.
        expected_document.entity("tr:WD-prov-dm-20111215", (
            (prov.PROV_TYPE, QualifiedName(ex_ns, "Workflow")),
            (prov.PROV_TYPE, PROV["Entity"]),
            (prov.PROV_TYPE, PROV["Plan"])
        ))

        self.assertEqual(actual_document, expected_document, "example_05")

Example 50

Project: TrustRouter
Source File: config.py
View license
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
    """
    Start up a socket server on the specified port, and listen for new
    configurations.

    These will be sent as a file suitable for processing by fileConfig().
    Returns a Thread object on which you can call start() to start the server,
    and which you can join() when appropriate. To stop the server, call
    stopListening().
    """
    if not thread:
        raise NotImplementedError("listen() needs threading to work")

    class ConfigStreamHandler(StreamRequestHandler):
        """
        Handler for a logging configuration request.

        It expects a completely new logging configuration and uses fileConfig
        to install it.
        """
        def handle(self):
            """
            Handle a request.

            Each request is expected to be a 4-byte length, packed using
            struct.pack(">L", n), followed by the config file.
            Uses fileConfig() to do the grunt work.
            """
            import tempfile
            try:
                conn = self.connection
                chunk = conn.recv(4)
                if len(chunk) == 4:
                    slen = struct.unpack(">L", chunk)[0]
                    chunk = self.connection.recv(slen)
                    while len(chunk) < slen:
                        chunk = chunk + conn.recv(slen - len(chunk))
                    chunk = chunk.decode("utf-8")
                    try:
                        import json
                        d =json.loads(chunk)
                        assert isinstance(d, dict)
                        dictConfig(d)
                    except:
                        #Apply new configuration.

                        file = io.StringIO(chunk)
                        try:
                            fileConfig(file)
                        except (KeyboardInterrupt, SystemExit):
                            raise
                        except:
                            traceback.print_exc()
                    if self.server.ready:
                        self.server.ready.set()
            except socket.error as e:
                if not isinstance(e.args, tuple):
                    raise
                else:
                    errcode = e.args[0]
                    if errcode != RESET_ERROR:
                        raise

    class ConfigSocketReceiver(ThreadingTCPServer):
        """
        A simple TCP socket-based logging config receiver.
        """

        allow_reuse_address = 1

        def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
                     handler=None, ready=None):
            ThreadingTCPServer.__init__(self, (host, port), handler)
            logging._acquireLock()
            self.abort = 0
            logging._releaseLock()
            self.timeout = 1
            self.ready = ready

        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()],
                                           [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()
            self.socket.close()

    class Server(threading.Thread):

        def __init__(self, rcvr, hdlr, port):
            super(Server, self).__init__()
            self.rcvr = rcvr
            self.hdlr = hdlr
            self.port = port
            self.ready = threading.Event()

        def run(self):
            server = self.rcvr(port=self.port, handler=self.hdlr,
                               ready=self.ready)
            if self.port == 0:
                self.port = server.server_address[1]
            self.ready.set()
            global _listener
            logging._acquireLock()
            _listener = server
            logging._releaseLock()
            server.serve_until_stopped()

    return Server(ConfigSocketReceiver, ConfigStreamHandler, port)