sys.stdout

Here are the examples of the python api sys.stdout taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: arkc-client
Source File: main.py
View license
def main():
    global Mode
    parser = argparse.ArgumentParser(description=None)
    try:
        # Load arguments
        parser.add_argument(
            "-v", dest="v", action="store_true", help="show detailed logs")
        parser.add_argument(
            "-vv", dest="vv", action="store_true", help="show debug logs")
        # TODO: use native function
        parser.add_argument(
            "--version", dest="version", action="store_true", help="show version number")
        parser.add_argument('-kg', '--keygen', dest="kg", action='store_true',
                            help="Generate a key string and quit, overriding other options.")
        parser.add_argument('--kg-path', '--keygen-path', dest="kg_save_path",
                            help="Where to store a key string, if not set, use default.")
        parser.add_argument('-reg', '--keygen-email-register', dest="email_dest",
                            help="Email destination to register the key.")
        parser.add_argument('--get-meek', dest="dlmeek", action="store_true",
                            help="Download meek to home directory, overriding normal options")
        parser.add_argument('-c', '--config', dest="config", default=None,
                            help="Specify a configuration files, REQUIRED for ArkC Client to start")
        parser.add_argument('-g', '--gae', dest="gae", action='store_true',
                            help="Use GAE mode")
        parser.add_argument('-fs', '--frequent-swap', dest="fs", action="store_true",
                            help="Use frequent connection swapping")
        parser.add_argument('-pn', '--public-addr', dest="pn", action="store_true",
                            help="Disable UPnP when you have public network IP address (or NAT has been manually configured)")

        parser.add_argument("-v6", dest="ipv6", default="",
                            help="Enable this option to use IPv6 address (only use it if you have one)")
        print("""ArkC Client V""" + VERSION + """,  by ArkC Technology.
The programs is distributed under GNU General Public License Version 2.
""")

        options = parser.parse_args()

        if options.vv:
            logging.basicConfig(
                stream=sys.stdout, level=logging.DEBUG, format="%(levelname)s: %(asctime)s; %(message)s")
        elif options.v:
            logging.basicConfig(
                stream=sys.stdout, level=logging.INFO, format="%(levelname)s: %(asctime)s; %(message)s")
        else:
            logging.basicConfig(
                stream=sys.stdout, level=logging.WARNING, format="%(levelname)s: %(asctime)s; %(message)s")

        if options.gae:
            Mode = "GAE"
            logging.info("Using GAE mode.")
        else:
            Mode = "VPS"
            logging.info("Using VPS mode.")

        if options.version:
            print("ArkC Client Version " + VERSION)
            sys.exit()
        elif options.kg:
            genkey(options)
        elif options.dlmeek:
            dlmeek()
        elif options.config is None:
            logging.fatal("Config file (-c or --config) must be specified.\n")
            parser.print_help()
            sys.exit()

        data = {}

        # Load json configuration file
        try:
            data_file = open(options.config)
            data = json.load(data_file)
            data_file.close()
        except Exception as err:
            logging.fatal(
                "Fatal error while loading configuration file.\n" + err)
            sys.exit()

        if "control_domain" not in data:
            logging.fatal("missing control domain")
            sys.exit()

        # Apply default values
        if "local_host" not in data:
            data["local_host"] = DEFAULT_LOCAL_HOST

        if "local_port" not in data:
            data["local_port"] = DEFAULT_LOCAL_PORT

        if "remote_host" not in data:
            data["remote_host"] = DEFAULT_REMOTE_HOST

        if "remote_port" not in data:
            data["remote_port"] = random.randint(20000, 60000)
            logging.info(
                "Using random port " + str(data["remote_port"]) + " as remote listening port")

        if "number" not in data:
            data["number"] = DEFAULT_REQUIRED
        elif data["number"] > 20:
            logging.warning(
                "Requesting " + str(data["number"]) + " connections. Note: most servers impose a limit of 20. You may not receive response at all.")

        if data["number"] > 100:
            data["number"] = 100

        if "dns_servers" not in data:
            if "dns_server" in data:
                data["dns_servers"] = data["dns_server"]
            else:
                data["dns_servers"] = DEFAULT_DNS_SERVERS

        if "pt_exec" not in data:
            data["pt_exec"] = DEFAULT_OBFS4_EXECADDR

        if "debug_ip" not in data:
            data["debug_ip"] = None

        if Mode == "VPS":
            if "obfs_level" not in data:
                data["obfs_level"] = 0
            elif 1 <= int(data["obfs_level"]) <= 2:
                logging.error(
                    "Support for obfs4proxy is experimental with known bugs. Run this mode at your own risk.")
        else:
            data["obfs_level"] = 3

        # Load certificates
        try:
            serverpub_data = open(data["remote_cert"], "r").read()
            serverpub = certloader(serverpub_data).importKey()
        except KeyError as e:
            logging.fatal(
                e.tostring() + "is not found in the config file. Quitting.")
            sys.exit()
        except Exception as err:
            print ("Fatal error while loading remote host certificate.")
            print (err)
            sys.exit()

        try:
            clientpri_data = open(data["local_cert"], "r").read()
            clientpri_data = clientpri_data.strip(' ').lstrip('\n')
            clientpri = certloader(clientpri_data).importKey()
            clientpri_sha1 = certloader(clientpri_data).getSHA1()
            print("Using private key with SHA1: " + clientpri_sha1 +
                  ". Please make sure it is identical the string in server-side config.")
            if not clientpri.has_private():
                print(
                    "Fatal error, no private key included in local certificate.")
        except KeyError as e:
            logging.fatal(
                e.tostring() + "is not found in the config file. Quitting.")
            sys.exit()
        except Exception as err:
            print ("Fatal error while loading local certificate.")
            print (err)
            sys.exit()

        try:
            clientpub_data = open(data["local_cert_pub"], "r").read()
            clientpub_data = clientpub_data.strip(' ').lstrip('\n')
            clientpub_sha1 = certloader(clientpub_data).getSHA1()
        except KeyError as e:
            logging.fatal(
                e.tostring() + "is not found in the config file. Quitting.")
            sys.exit()
        except Exception as err:
            print ("Fatal error while calculating SHA1 digest.")
            print (err)
            sys.exit()

        # TODO: make it more elegant

        if options.fs:
            swapfq = 3
        else:
            swapfq = 8

    except IOError as e:
        print ("An error occurred: \n")
        print(e)

    # Start the main event loop

    try:
        ctl = Coordinate(
            data["control_domain"],
            clientpri,
            clientpri_sha1,
            serverpub,
            clientpub_sha1,
            data["number"],
            data["remote_host"],
            data["remote_port"],
            data["dns_servers"],
            data["debug_ip"],
            swapfq,
            data["pt_exec"],
            data["obfs_level"],
            options.ipv6,
            options.pn
        )
        sctl = ServerControl(
            data["remote_host"],
            ctl.remote_port,
            ctl,
            pt=bool(data["obfs_level"])
        )
        cctl = ClientControl(
            ctl,
            data["local_host"],
            data["local_port"]
        )
    except KeyError as e:
        print(e)
        logging.fatal("Bad config file. Quitting.")
        sys.exit()

    except Exception as e:
        print ("An error occurred: \n")
        print(e)

    logging.info("Listening to local services at " +
                 data["local_host"] + ":" + str(data["local_port"]))
    logging.info("Listening to remote server at " +
                 data["remote_host"] + ":" + str(ctl.remote_port))

    try:
        asyncore.loop(use_poll=1)
    except KeyboardInterrupt:
        pass

Example 2

Project: pyspace
Source File: generic_unittest.py
View license
def multiple_node_testing(verbose=False, report=False):
    """
    This function ensures the testing of all available nodes.
    The results of the test are packed into an HTML file which is
    saved in the current working directory.
    """
    # we define a list of nodes that we do not want to test
    skipped_dirs = ['pySPACE.missions.nodes.sink',
                    'pySPACE.missions.nodes.source',
                    'pySPACE.missions.nodes.scikits_nodes',
                    'pySPACE.missions.nodes.splitter',
                    'pySPACE.missions.nodes.meta',
                    'pySPACE.missions.nodes.debug.subflow_timing_node',
                    'pySPACE.missions.nodes.classification.ensemble',
                    'pySPACE.missions.nodes.classification.svm_variants.sparse',
                    'pySPACE.missions.nodes.spatial_filtering.sensor_selection',
                    'pySPACE.missions.nodes.visualization.ensemble_vis',
                    'pySPACE.missions.nodes.classification.svm_variants.RMM'
                    ]

    skipped_nodes = ['FeatureNormalizationNode',
                     # this exemplary call has a hardcoded path in it
                     'ElectrodeCoordinationPlotNode',
                     'AverageFeatureVisNode',
                     'AlamgirMultiTaskClassifierNode',
                     'ICAWrapperNode',
                     # The iteration does not converge on the test data
                     # TODO:Build converging iteration
                     'JunctionNode',  # requires private modules
                     'LaplacianReferenceNode',
                     # node has hardcoded values
                     'PissfNode',  # TODO:needs specialized training data
                     # does not apply to default data
                     'MonoTimeSeries2FeatureNode'
                     ]
    # initialize some counters and log the results to a txt file
    total_tests, docu, exemplary, initialize, execution = 0, 0, 0, 0, 0

    stdout = sys.stdout

    if not verbose:
        # suppress all the different outputs from popping on screen
        sys.stderr = open(os.devnull, 'w')
        sys.stdout = open(os.devnull, 'w')
        pySPACE.configuration.min_log_level = 1000

    # this list will be populated will unit testing suites for all the
    # available nodes
    the_report_suite = []

    count = 0

    for key, item in list_of_nodes.items():
        # we want to skip the nodes defined above
        skiptest = [item.__module__.startswith(x) for x in skipped_dirs]
        count += 1
        stdout.write('\r')


        if True in skiptest or item.__name__ in skipped_nodes:
            continue

        stdout.write('\r')
        # print an OS independent status message
        stdout.write(">>>>>>>>>>>>>>> Nodes tested already: %d. Currently testing %s "
                     "<<<<<<<<<<<<<<<" % (count, item.__name__))
        stdout.flush()

        # update the test count
        total_tests += 4

        if verbose:
            stdout.write('\n' + '*' * 70 + '\n' + str(key) +
                         '\n' + '*' * 70 + '\n')

        suite = unittest.TestSuite()
        suite.addTest(ParametrizedTestCase.parametrize(
            current_testcase=GenericTestCase, node=item))

        if verbose:
            result = unittest.TextTestRunner(stream=stdout, verbosity=2).run(suite)
        else:
            result = unittest.TextTestRunner(stream=open(os.devnull, 'w'), verbosity=2).run(suite)

        if report:
            the_report_suite.append((suite, key))

        # check which tests failed
        for item in result.failures:
            failed_test = str(item[0])
            if failed_test.startswith('test_has_documentation'):
                docu += 1
            elif failed_test.startswith('test_has_exemplary_call'):
                exemplary += 1
            elif failed_test.startswith('test_initialize'):
                initialize += 1
            elif failed_test.startswith('test_execution'):
                execution += 1

        for item in result.errors:
            failed_test = str(item[0])
            if failed_test.startswith('test_has_documentation'):
                docu += 1
            elif failed_test.startswith('test_has_exemplary_call'):
                exemplary += 1
            elif failed_test.startswith('test_initialize'):
                initialize += 1
            elif failed_test.startswith('test_execution'):
                execution += 1

    # either generate an HTML report or generate a matplotlib plot of
    # the results
    if report:
        try:
            import HTMLTestRunner
            import datetime
        except ImportError:
            print "Please download the HTMLTestRunner python script"

        the_html = open("generic_unittests.html", 'w')
        desc = ('This is the result of running the generic unit test on' +
                       ' all available nodes as of %s') % datetime.datetime.now()
        runner = HTMLTestRunner.HTMLTestRunner(stream=the_html,
                                               title='Generic unittest',
                                               description=desc)
        runner.run(the_report_suite)
        the_html.close()

        # if a webbrowser is available, open the report
        try:
            import webbrowser
            webbrowser.open("generic_unittests.html")
        except:
            pass
    else:
        # plot the results
        success = total_tests - docu - exemplary - initialize - execution
        import matplotlib.pyplot as plt
        import numpy as np
        plt.clf()
        plt.figure(125, figsize=(15, 15))
        colors = plt.cm.prism(np.linspace(0., 1., 5))

        patches = plt.pie([success, docu, exemplary, initialize, execution],
                          autopct='%2.2f%%', colors=colors,
                          pctdistance=1.1, labeldistance=0.5,
                          explode=[0.03, 0.10, 0.15, 0.03, 0.03])

        plt.legend(patches[0],
                   ["Successful (" + str(success) + ")",
                    "No documentation (" + str(docu) + ")",
                    "No exemplary call (" + str(exemplary) + ")",
                    "Initialization failed (" + str(initialize) + ")",
                    "Execution failed (" + str(execution) + ")"],
                   loc="best")

        plt.title("Total number of tests:" + str(total_tests))
        plt.savefig("generic_unittest_plot.pdf")
        plt.close()

        # some print statements with the results of the tests
        sys.stdout = stdout
        print "\n" + '*' * 70 + '\n' + "Test results" + '\n' + '*' * 70
        print "Successful tests: " + str(success) + "\n" + \
              '-' * 70 + '\n' + \
              "No documentation: " + str(docu) + "\n" + \
              "No exemplary call: " + str(exemplary) + "\n" + \
              "Initialization failed: " + str(initialize) + "\n" + \
              "Execution failed: " + str(execution) + "\n" + \
              '-' * 70 + '\n'

Example 3

Project: imagrium
Source File: regrtest.py
View license
def main(tests=None, testdir=None, verbose=0, quiet=False,
         exclude=False, single=False, randomize=False, fromfile=None,
         findleaks=False, use_resources=None, trace=False, coverdir='coverage',
         runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
         expected=False, memo=None, junit_xml=None):
    """Execute a test suite.

    This also parses command-line options and modifies its behavior
    accordingly.

    tests -- a list of strings containing test names (optional)
    testdir -- the directory in which to look for tests (optional)

    Users other than the Python test suite will certainly want to
    specify testdir; if it's omitted, the directory containing the
    Python test suite is searched for.

    If the tests argument is omitted, the tests listed on the
    command-line will be used.  If that's empty, too, then all *.py
    files beginning with test_ will be used.

    The other default arguments (verbose, quiet, exclude,
    single, randomize, findleaks, use_resources, trace, coverdir, and
    print_slow) allow programmers calling main() directly to set the
    values that would normally be set by flags on the command line.
    """

    test_support.record_original_stdout(sys.stdout)
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:wM:em:j:',
                                   ['help', 'verbose', 'quiet', 'exclude',
                                    'single', 'slow', 'random', 'fromfile',
                                    'findleaks', 'use=', 'threshold=', 'trace',
                                    'coverdir=', 'nocoverdir', 'runleaks',
                                    'huntrleaks=', 'verbose2', 'memlimit=',
                                    'expected', 'memo'
                                    ])
    except getopt.error, msg:
        usage(2, msg)

    # Defaults
    allran = True
    if use_resources is None:
        use_resources = []
    for o, a in opts:
        if o in ('-h', '--help'):
            usage(0)
        elif o in ('-v', '--verbose'):
            verbose += 1
        elif o in ('-w', '--verbose2'):
            verbose2 = True
        elif o in ('-q', '--quiet'):
            quiet = True;
            verbose = 0
        elif o in ('-x', '--exclude'):
            exclude = True
            allran = False
        elif o in ('-e', '--expected'):
            expected = True
            allran = False
        elif o in ('-s', '--single'):
            single = True
        elif o in ('-S', '--slow'):
            print_slow = True
        elif o in ('-r', '--randomize'):
            randomize = True
        elif o in ('-f', '--fromfile'):
            fromfile = a
        elif o in ('-l', '--findleaks'):
            findleaks = True
        elif o in ('-L', '--runleaks'):
            runleaks = True
        elif o in ('-m', '--memo'):
            memo = a
        elif o in ('-j', '--junit-xml'):
            junit_xml = a
        elif o in ('-t', '--threshold'):
            import gc
            gc.set_threshold(int(a))
        elif o in ('-T', '--coverage'):
            trace = True
        elif o in ('-D', '--coverdir'):
            coverdir = os.path.join(os.getcwd(), a)
        elif o in ('-N', '--nocoverdir'):
            coverdir = None
        elif o in ('-R', '--huntrleaks'):
            huntrleaks = a.split(':')
            if len(huntrleaks) != 3:
                print a, huntrleaks
                usage(2, '-R takes three colon-separated arguments')
            if len(huntrleaks[0]) == 0:
                huntrleaks[0] = 5
            else:
                huntrleaks[0] = int(huntrleaks[0])
            if len(huntrleaks[1]) == 0:
                huntrleaks[1] = 4
            else:
                huntrleaks[1] = int(huntrleaks[1])
            if len(huntrleaks[2]) == 0:
                huntrleaks[2] = "reflog.txt"
        elif o in ('-M', '--memlimit'):
            test_support.set_memlimit(a)
        elif o in ('-u', '--use'):
            u = [x.lower() for x in a.split(',')]
            for r in u:
                if r == 'all':
                    use_resources[:] = RESOURCE_NAMES
                    continue
                remove = False
                if r[0] == '-':
                    remove = True
                    r = r[1:]
                if r not in RESOURCE_NAMES:
                    usage(1, 'Invalid -u/--use option: ' + a)
                if remove:
                    if r in use_resources:
                        use_resources.remove(r)
                elif r not in use_resources:
                    use_resources.append(r)
        else:
            print >>sys.stderr, ("No handler for option {0}.  Please "
                "report this as a bug at http://bugs.python.org.").format(o)
            sys.exit(1)
    if single and fromfile:
        usage(2, "-s and -f don't go together!")

    good = []
    bad = []
    skipped = []
    resource_denieds = []

    if findleaks:
        try:
            if test_support.is_jython:
                raise ImportError()
            import gc
        except ImportError:
            print 'No GC available, disabling findleaks.'
            findleaks = False
        else:
            # Uncomment the line below to report garbage that is not
            # freeable by reference counting alone.  By default only
            # garbage that is not collectable by the GC is reported.
            #gc.set_debug(gc.DEBUG_SAVEALL)
            found_garbage = []

    if single:
        from tempfile import gettempdir
        filename = os.path.join(gettempdir(), 'pynexttest')
        try:
            fp = open(filename, 'r')
            next = fp.read().strip()
            tests = [next]
            fp.close()
        except IOError:
            pass

    if fromfile:
        tests = []
        fp = open(fromfile)
        for line in fp:
            guts = line.split() # assuming no test has whitespace in its name
            if guts and not guts[0].startswith('#'):
                tests.extend(guts)
        fp.close()

    # Strip .py extensions.
    if args:
        args = map(removepy, args)
        allran = False
    if tests:
        tests = map(removepy, tests)

    stdtests = STDTESTS[:]
    nottests = NOTTESTS.copy()
    if exclude:
        for arg in args:
            if arg in stdtests:
                stdtests.remove(arg)
        nottests[:0] = args
        args = []
    tests = tests or args or findtests(testdir, stdtests, nottests)
    if single:
        tests = tests[:1]
    if randomize:
        random.shuffle(tests)
    if trace:
        import trace
        tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
                             trace=False, count=True)
    test_times = []
    test_support.verbose = verbose      # Tell tests to be moderately quiet
    test_support.use_resources = use_resources
    test_support.junit_xml_dir = junit_xml
    save_modules = sys.modules.keys()
    skips = _ExpectedSkips()
    failures = _ExpectedFailures()
    for test in tests:
        if expected and (test in skips or test in failures):
            continue
        if not quiet:
            print test
            sys.stdout.flush()
        if trace:
            # If we're tracing code coverage, then we don't exit with status
            # if on a false return value from main.
            tracer.runctx('runtest(test, verbose, quiet,'
                          '        test_times, testdir)',
                          globals=globals(), locals=vars())
        else:
            try:
                ok = runtest(test, verbose, quiet, test_times,
                             testdir, huntrleaks, junit_xml)
            except KeyboardInterrupt:
                # print a newline separate from the ^C
                print
                break
            except:
                raise
            if ok > 0:
                good.append(test)
            elif ok == 0:
                bad.append(test)
            else:
                skipped.append(test)
                if ok == -2:
                    resource_denieds.append(test)
        if findleaks:
            gc.collect()
            if gc.garbage:
                print "Warning: test created", len(gc.garbage),
                print "uncollectable object(s)."
                # move the uncollectable objects somewhere so we don't see
                # them again
                found_garbage.extend(gc.garbage)
                del gc.garbage[:]
        # Unload the newly imported modules (best effort finalization)
        for module in sys.modules.keys():
            if module not in save_modules and module.startswith("test."):
                test_support.unload(module)
                module = module[5:]
                if hasattr(_test, module):
                    delattr(_test, module)

    if good and not quiet:
        if not bad and not skipped and len(good) > 1:
            print "All",
        print count(len(good), "test"), "OK."
    if print_slow:
        test_times.sort(reverse=True)
        print "10 slowest tests:"
        for time, test in test_times[:10]:
            print "%s: %.1fs" % (test, time)
    surprises = 0
    if skipped and not quiet:
        print count(len(skipped), "test"), "skipped:"
        surprises += countsurprises(skips, skipped, 'skip', 'ran', allran, resource_denieds)
    if bad:
         print count(len(bad), "test"), "failed:"
         surprises += countsurprises(failures, bad, 'fail', 'passed', allran, resource_denieds)

    if verbose2 and bad:
        print "Re-running failed tests in verbose mode"
        for test in bad:
            print "Re-running test %r in verbose mode" % test
            sys.stdout.flush()
            try:
                test_support.verbose = True
                ok = runtest(test, True, quiet, test_times, testdir,
                             huntrleaks)
            except KeyboardInterrupt:
                # print a newline separate from the ^C
                print
                break
            except:
                raise

    if single:
        alltests = findtests(testdir, stdtests, nottests)
        for i in range(len(alltests)):
            if tests[0] == alltests[i]:
                if i == len(alltests) - 1:
                    os.unlink(filename)
                else:
                    fp = open(filename, 'w')
                    fp.write(alltests[i+1] + '\n')
                    fp.close()
                break
        else:
            os.unlink(filename)

    if trace:
        r = tracer.results()
        r.write_results(show_missing=True, summary=True, coverdir=coverdir)

    if runleaks:
        os.system("leaks %d" % os.getpid())

    if memo:
        savememo(memo,good,bad,skipped)

    sys.exit(surprises > 0)

Example 4

Project: rootpy
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
    """ Generate the rst file for a given example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%s.png' % base_image_name
    root_image_fname = 'root_%s_%%s.png' % base_image_name
    root_fig_num = 1

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    root_image_path = os.path.join(image_dir, root_image_fname)

    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                               'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        first_root_image_file = root_image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if (not os.path.exists(first_image_file) or
            not os.path.exists(first_root_image_file) or
                os.stat(first_image_file).st_mtime <=
                                    os.stat(src_file).st_mtime):
            # We need to execute the code
            print 'plotting %s' % fname
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()
                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                                            my_globals['__doc__'],
                                            '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                for fig_num in (fig_mngr.num for fig_mngr in
                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_num)
                    plt.savefig(image_path % fig_num)
                    figure_list.append(image_fname % fig_num)
                for canvas in ROOT.gROOT.GetListOfCanvases():
                    canvas.SaveAs(root_image_path % root_fig_num)
                    canvas.Close()
                    figure_list.append(root_image_fname % root_fig_num)
                    root_fig_num += 1
            except:
                print 80 * '_'
                print '%s is not compiling:' % fname
                traceback.print_exc()
                print 80 * '_'
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print " - time elapsed : %.2g sec" % time_elapsed
        else:
            figure_list = [f[len(image_dir):]
                            for f in glob.glob(image_path % '[1-9]')]
                            #for f in glob.glob(image_path % '*')]

        # generate thumb file
        this_template = plot_rst_template
        from matplotlib import image
        if os.path.exists(first_image_file):
            image.thumbnail(first_image_file, thumb_file, 0.2)
        elif os.path.exists(first_root_image_file):
            image.thumbnail(first_root_image_file, thumb_file, 0.2)

    if not os.path.exists(thumb_file):
        # create something not to replace the thumbnail
        shutil.copy('images/blank_image.png', thumb_file)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

Example 5

Project: root_numpy
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
    """ Generate the rst file for a given example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%s.png' % base_image_name
    root_image_fname = 'root_%s_%%s.png' % base_image_name
    root_fig_num = 1

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    root_image_path = os.path.join(image_dir, root_image_fname)

    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                               'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        first_root_image_file = root_image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if (not os.path.exists(first_image_file) or
            not os.path.exists(first_root_image_file) or
                os.stat(first_image_file).st_mtime <=
                                    os.stat(src_file).st_mtime):
            # We need to execute the code
            print 'plotting %s' % fname
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()
                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                                            my_globals['__doc__'],
                                            '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                for fig_num in (fig_mngr.num for fig_mngr in
                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_num)
                    plt.savefig(image_path % fig_num)
                    figure_list.append(image_fname % fig_num)
                for canvas in ROOT.gROOT.GetListOfCanvases():
                    canvas.SaveAs(root_image_path % root_fig_num)
                    canvas.Close()
                    figure_list.append(root_image_fname % root_fig_num)
                    root_fig_num += 1
            except:
                print 80 * '_'
                print '%s is not compiling:' % fname
                traceback.print_exc()
                print 80 * '_'
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print " - time elapsed : %.2g sec" % time_elapsed
        else:
            figure_list = [f[len(image_dir):]
                            for f in glob.glob(image_path % '[1-9]')]
                            #for f in glob.glob(image_path % '*')]

        # generate thumb file
        this_template = plot_rst_template
        from matplotlib import image
        if os.path.exists(first_image_file):
            image.thumbnail(first_image_file, thumb_file, 0.2)
        elif os.path.exists(first_root_image_file):
            image.thumbnail(first_root_image_file, thumb_file, 0.2)

    if not os.path.exists(thumb_file):
        # create something not to replace the thumbnail
        shutil.copy('images/blank_image.png', thumb_file)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

Example 6

Project: babble
Source File: regrtest.py
View license
def main(tests=None, testdir=None, verbose=0, quiet=False, generate=False,
         exclude=False, single=False, randomize=False, fromfile=None,
         findleaks=False, use_resources=None, trace=False, coverdir='coverage',
         runleaks=False, huntrleaks=False, verbose2=False, expected=False,
         memo=None, junit_xml=None):
    """Execute a test suite.

    This also parses command-line options and modifies its behavior
    accordingly.

    tests -- a list of strings containing test names (optional)
    testdir -- the directory in which to look for tests (optional)

    Users other than the Python test suite will certainly want to
    specify testdir; if it's omitted, the directory containing the
    Python test suite is searched for.

    If the tests argument is omitted, the tests listed on the
    command-line will be used.  If that's empty, too, then all *.py
    files beginning with test_ will be used.

    The other default arguments (verbose, quiet, generate, exclude, single,
    randomize, findleaks, use_resources, trace and coverdir) allow programmers
    calling main() directly to set the values that would normally be set by
    flags on the command line.
    """

    test_support.record_original_stdout(sys.stdout)
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsrf:lu:t:TD:NLR:wM:em:j:',
                                   ['help', 'verbose', 'quiet', 'generate',
                                    'exclude', 'single', 'random', 'fromfile',
                                    'findleaks', 'use=', 'threshold=', 'trace',
                                    'coverdir=', 'nocoverdir', 'runleaks',
                                    'huntrleaks=', 'verbose2', 'memlimit=',
                                    'expected', 'memo'
                                    ])
    except getopt.error, msg:
        usage(2, msg)

    # Defaults
    allran = True
    if use_resources is None:
        use_resources = []
    for o, a in opts:
        if o in ('-h', '--help'):
            usage(0)
        elif o in ('-v', '--verbose'):
            verbose += 1
        elif o in ('-w', '--verbose2'):
            verbose2 = True
        elif o in ('-q', '--quiet'):
            quiet = True;
            verbose = 0
        elif o in ('-g', '--generate'):
            generate = True
        elif o in ('-x', '--exclude'):
            exclude = True
            allran = False
        elif o in ('-e', '--expected'):
            expected = True
            allran = False
        elif o in ('-s', '--single'):
            single = True
        elif o in ('-r', '--randomize'):
            randomize = True
        elif o in ('-f', '--fromfile'):
            fromfile = a
        elif o in ('-l', '--findleaks'):
            findleaks = True
        elif o in ('-L', '--runleaks'):
            runleaks = True
        elif o in ('-m', '--memo'):
            memo = a
        elif o in ('-j', '--junit-xml'):
            junit_xml = a
        elif o in ('-t', '--threshold'):
            import gc
            gc.set_threshold(int(a))
        elif o in ('-T', '--coverage'):
            trace = True
        elif o in ('-D', '--coverdir'):
            coverdir = os.path.join(os.getcwd(), a)
        elif o in ('-N', '--nocoverdir'):
            coverdir = None
        elif o in ('-R', '--huntrleaks'):
            huntrleaks = a.split(':')
            if len(huntrleaks) != 3:
                print a, huntrleaks
                usage(2, '-R takes three colon-separated arguments')
            if len(huntrleaks[0]) == 0:
                huntrleaks[0] = 5
            else:
                huntrleaks[0] = int(huntrleaks[0])
            if len(huntrleaks[1]) == 0:
                huntrleaks[1] = 4
            else:
                huntrleaks[1] = int(huntrleaks[1])
            if len(huntrleaks[2]) == 0:
                huntrleaks[2] = "reflog.txt"
        elif o in ('-M', '--memlimit'):
            test_support.set_memlimit(a)
        elif o in ('-u', '--use'):
            u = [x.lower() for x in a.split(',')]
            for r in u:
                if r == 'all':
                    use_resources[:] = RESOURCE_NAMES
                    continue
                remove = False
                if r[0] == '-':
                    remove = True
                    r = r[1:]
                if r not in RESOURCE_NAMES:
                    usage(1, 'Invalid -u/--use option: ' + a)
                if remove:
                    if r in use_resources:
                        use_resources.remove(r)
                elif r not in use_resources:
                    use_resources.append(r)
    if generate and verbose:
        usage(2, "-g and -v don't go together!")
    if single and fromfile:
        usage(2, "-s and -f don't go together!")

    good = []
    bad = []
    skipped = []
    resource_denieds = []

    if findleaks:
        try:
            if test_support.is_jython:
                raise ImportError()
            import gc
        except ImportError:
            print 'No GC available, disabling findleaks.'
            findleaks = False
        else:
            # Uncomment the line below to report garbage that is not
            # freeable by reference counting alone.  By default only
            # garbage that is not collectable by the GC is reported.
            #gc.set_debug(gc.DEBUG_SAVEALL)
            found_garbage = []

    if single:
        from tempfile import gettempdir
        filename = os.path.join(gettempdir(), 'pynexttest')
        try:
            fp = open(filename, 'r')
            next = fp.read().strip()
            tests = [next]
            fp.close()
        except IOError:
            pass

    if fromfile:
        tests = []
        fp = open(fromfile)
        for line in fp:
            guts = line.split() # assuming no test has whitespace in its name
            if guts and not guts[0].startswith('#'):
                tests.extend(guts)
        fp.close()

    # Strip .py extensions.
    if args:
        args = map(removepy, args)
        allran = False
    if tests:
        tests = map(removepy, tests)

    stdtests = STDTESTS[:]
    nottests = NOTTESTS[:]
    if exclude:
        for arg in args:
            if arg in stdtests:
                stdtests.remove(arg)
        nottests[:0] = args
        args = []
    tests = tests or args or findtests(testdir, stdtests, nottests)
    if single:
        tests = tests[:1]
    if randomize:
        random.shuffle(tests)
    if trace:
        import trace
        tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
                             trace=False, count=True)
    test_support.verbose = verbose      # Tell tests to be moderately quiet
    test_support.use_resources = use_resources
    test_support.junit_xml_dir = junit_xml
    save_modules = sys.modules.keys()
    skips = _ExpectedSkips()
    failures = _ExpectedFailures()
    for test in tests:
        if expected and (test in skips or test in failures):
            continue
        if not quiet:
            print test
            sys.stdout.flush()
        if trace:
            # If we're tracing code coverage, then we don't exit with status
            # if on a false return value from main.
            tracer.runctx('runtest(test, generate, verbose, quiet, testdir)',
                          globals=globals(), locals=vars())
        else:
            try:
                ok = runtest(test, generate, verbose, quiet, testdir,
                             huntrleaks, junit_xml)
            except KeyboardInterrupt:
                # print a newline separate from the ^C
                print
                break
            except:
                raise
            if ok > 0:
                good.append(test)
            elif ok == 0:
                bad.append(test)
            else:
                skipped.append(test)
                if ok == -2:
                    resource_denieds.append(test)
        if findleaks:
            gc.collect()
            if gc.garbage:
                print "Warning: test created", len(gc.garbage),
                print "uncollectable object(s)."
                # move the uncollectable objects somewhere so we don't see
                # them again
                found_garbage.extend(gc.garbage)
                del gc.garbage[:]
        # Unload the newly imported modules (best effort finalization)
        for module in sys.modules.keys():
            if module not in save_modules and module.startswith("test."):
                test_support.unload(module)

    # The lists won't be sorted if running with -r
    good.sort()
    bad.sort()
    skipped.sort()

    if good and not quiet:
        if not bad and not skipped and len(good) > 1:
            print "All",
        print count(len(good), "test"), "OK."
        if verbose:
            print "CAUTION:  stdout isn't compared in verbose mode:"
            print "a test that passes in verbose mode may fail without it."
    surprises = 0
    if skipped and not quiet:
        print count(len(skipped), "test"), "skipped:"
        surprises += countsurprises(skips, skipped, 'skip', 'ran', allran, resource_denieds)
    if bad:
        print count(len(bad), "test"), "failed:"
        surprises += countsurprises(failures, bad, 'fail', 'passed', allran, resource_denieds)

    if verbose2 and bad:
        print "Re-running failed tests in verbose mode"
        for test in bad:
            print "Re-running test %r in verbose mode" % test
            sys.stdout.flush()
            try:
                test_support.verbose = 1
                ok = runtest(test, generate, 1, quiet, testdir,
                             huntrleaks)
            except KeyboardInterrupt:
                # print a newline separate from the ^C
                print
                break
            except:
                raise

    if single:
        alltests = findtests(testdir, stdtests, nottests)
        for i in range(len(alltests)):
            if tests[0] == alltests[i]:
                if i == len(alltests) - 1:
                    os.unlink(filename)
                else:
                    fp = open(filename, 'w')
                    fp.write(alltests[i+1] + '\n')
                    fp.close()
                break
        else:
            os.unlink(filename)

    if trace:
        r = tracer.results()
        r.write_results(show_missing=True, summary=True, coverdir=coverdir)

    if runleaks:
        os.system("leaks %d" % os.getpid())

    if memo:
        savememo(memo,good,bad,skipped)

    sys.exit(surprises > 0)

Example 7

Project: pylon
Source File: main.py
View license
def main():
    """ Parses the command line and call Pylon with the correct data.
    """
    parser = optparse.OptionParser(usage="usage: pylon [options] input_file",
                                   version="%prog 0.4.4")

    parser.add_option("-o", "--output", dest="output", metavar="FILE",
        help="Write the solution report to FILE.")

#    parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
#        default=False, help="Print less information.")

    parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
        default=False, help="Print more information.")

#    parser.add_option("-g", "--gui", action="store_true", dest="gui",
#        default=False, help="Use the portable graphical interface to Pylon.")

#    parser.add_option("-n", "--no-report", action="store_true",
#        dest="no_report", default=False, help="Suppress report output.")

    parser.add_option("-d", "--debug", action="store_true", dest="debug",
        default=False, help="Print debug information.")

    parser.add_option("-t", "--input-type", dest="type", metavar="TYPE",
        default="any", help="The argument following the -t is used to "
        "indicate the format type of the input data file. The types which are "
        "currently supported include: matpower, psse [default: %default]"
        " If not specified Pylon will try to determine the type according to "
        "the file name extension and the file header.")

    parser.add_option("-s", "--solver", dest="solver", metavar="SOLVER",
        default="acpf", help="The argument following the -s is used to"
        "indicate the type of routine to use in solving. The types which are "
        "currently supported are: 'dcpf', 'acpf', 'dcopf', 'acopf', 'udopf' "
        "and 'none' [default: %default].")

    parser.add_option("-a", "--algorithm", action="store_true",
        metavar="ALGORITHM", dest="algorithm", default="newton",
        help="Indicates the algorithm type to be used for AC power flow. The "
        "types which are currently supported are: 'newton' and 'fdpf' "
        "[default: %default].")

    parser.add_option("-T", "--output-type", dest="output_type",
        metavar="OUTPUT_TYPE", default="rst", help="Indicates the output "
        "format type.  The type swhich are currently supported include: rst, "
        "matpower, csv, excel and none [default: %default].")

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(logging.INFO)
    elif options.debug:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.ERROR)

    # Output.
    outext = {'psse': '.raw', 'matpower': '.m'}
    if options.output:
        if options.output == "-":
            outfile = sys.stdout
            logger.setLevel(logging.CRITICAL) # must stay quiet
#            options.output_type = "none"
        else:
            outfile = open(options.output, "wb")
    elif options.output_type is not None:
        if options.output_type in outext.keys():
            inname, ext = os.path.splitext(args[0])
            outfile = inname + outext[options.output_type]
        else:
            outfile = sys.stdout
    else:
        outfile = sys.stdout
#        if not options.no_report:
#            logger.setLevel(logging.CRITICAL) # must stay quiet

    # Input.
    if len(args) > 1:
        parser.print_help()
        sys.exit(1)
    elif (len(args) == 0) or (args[0] == "-"):
        filename = ""
        if sys.stdin.isatty():
            # True if the file is connected to a tty device, and False
            # otherwise (pipeline or file redirection).
            parser.print_help()
            sys.exit(1)
        else:
            # Handle piped input ($ cat ehv3.raw | pylon | rst2pdf -o ans.pdf).
            infile = sys.stdin
    else:
        filename = args[0]
        infile = open(filename, "rb")

    if options.type == "any":
        type = detect_data_file(infile, filename)
    else:
        type = options.type

    # Get the case from the input file-like object.
    case = read_case(infile, type)

    if case is not None:
        # Routine (and algorithm) selection.
        if options.solver == "dcpf":
            solver = DCPF(case)
        elif options.solver == "acpf":
            if options.algorithm == "newton":
                solver = NewtonPF(case)
            elif options.algorithm == "fdpf":
                solver = FastDecoupledPF(case)
            else:
                logger.critical("Invalid algorithm [%s]." % options.algorithm)
                sys.exit(1)
        elif options.solver == "dcopf":
            solver = OPF(case, True)
        elif options.solver == "acopf":
            solver = OPF(case, False)
        elif options.solver == "udopf":
            solver = UDOPF(case)
        elif options.solver == "none":
            solver = None
        else:
            logger.critical("Invalid solver [%s]." % options.solver)
#            sys.exit(1)
            solver = None

        # Output writer selection.
        if options.output_type == "matpower":
            writer = MATPOWERWriter(case)
        elif options.output_type == "psse":
            writer = PSSEWriter(case)
        elif options.output_type == "rst":
            writer = ReSTWriter(case)
        elif options.output_type == "csv":
            from pylon.io.excel import CSVWriter
            writer = CSVWriter(case)
        elif options.output_type == "excel":
            from pylon.io.excel import ExcelWriter
            writer = ExcelWriter(case)
        elif options.output_type == "pickle":
            writer = PickleWriter(case)
        else:
            logger.critical("Invalid output type [%s]." % options.output_type)
            sys.exit(1)

        if solver is not None:
            solver.solve()
        if options.output_type != "none":
            writer.write(outfile)
            print('Output file {0} written'.format(outfile))
    else:
        logger.critical("Unable to read case data.")

    # Don't close stdin or stdout.
    if len(args) == 1:
        infile.close()
    if options.output and not (options.output == "-"):
        outfile.close()

Example 8

Project: polylearn
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 9

Project: scikit-video
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 10

View license
    def test_a_long_history(self):
        #return
        print datetime.datetime.now()
        print datetime.datetime.today()
        test_host_005 = self.sched.hosts.find_by_name("test_host_005")
        test_host_099 = self.sched.hosts.find_by_name("test_host_099")
        test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00")
        test_ok_01 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_01")
        test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04")
        test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16")
        test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01")
        days = 4

        etime = time.time()
        print "now it is", time.ctime(etime)
        print "now it is", time.gmtime(etime)
        etime_midnight = (etime - (etime % 86400)) + time.altzone
        print "midnight was", time.ctime(etime_midnight)
        print "midnight was", time.gmtime(etime_midnight)
        query_start = etime_midnight - (days - 1) * 86400
        query_end = etime_midnight
        print "query_start", time.ctime(query_start)
        print "query_end ", time.ctime(query_end)

        # |----------|----------|----------|----------|----------|---x
        #                                                            etime
        #                                                        etime_midnight
        #             ---x------
        #                etime -  4 days
        #                       |---
        #                       query_start
        #
        #                ............................................
        #                events in the log database ranging till now
        #
        #                       |________________________________|
        #                       events which will be read from db
        #
        loops = int(86400 / 192)
        time_hacker.time_warp(-1 * days * 86400)
        print "warp back to", time.ctime(time.time())
        # run silently
        old_stdout = sys.stdout
        sys.stdout = open(os.devnull, "w")
        should_be = 0
        for day in xrange(days):
            sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops))
            self.scheduler_loop(2, [
                [test_ok_00, 0, "OK"],
                [test_ok_01, 0, "OK"],
                [test_ok_04, 0, "OK"],
                [test_ok_16, 0, "OK"],
                [test_ok_99, 0, "OK"],
            ])
            self.update_broker()
            #for i in xrange(3600 * 24 * 7):
            for i in xrange(loops):
                if i % 10000 == 0:
                    sys.stderr.write(str(i))
                if i % 399 == 0:
                    self.scheduler_loop(3, [
                        [test_ok_00, 1, "WARN"],
                        [test_ok_01, 2, "CRIT"],
                        [test_ok_04, 3, "UNKN"],
                        [test_ok_16, 1, "WARN"],
                        [test_ok_99, 2, "CRIT"],
                    ])
                    if int(time.time()) >= query_start and int(time.time()) <= query_end:
                        should_be += 3
                        sys.stderr.write("now it should be %s\n" % should_be)
                time.sleep(62)
                if i % 399 == 0:
                    self.scheduler_loop(1, [
                        [test_ok_00, 0, "OK"],
                        [test_ok_01, 0, "OK"],
                        [test_ok_04, 0, "OK"],
                        [test_ok_16, 0, "OK"],
                        [test_ok_99, 0, "OK"],
                    ])
                    if int(time.time()) >= query_start and int(time.time()) <= query_end:
                        should_be += 1
                        sys.stderr.write("now it should be %s\n" % should_be)
                time.sleep(2)
                if i % 9 == 0:
                    self.scheduler_loop(3, [
                        [test_ok_00, 1, "WARN"],
                        [test_ok_01, 2, "CRIT"],
                    ])

                time.sleep(62)
                if i % 9 == 0:
                    self.scheduler_loop(1, [
                        [test_ok_00, 0, "OK"],
                        [test_ok_01, 0, "OK"],
                    ])
                time.sleep(2)
                if i % 9 == 0:
                    self.scheduler_loop(3, [
                        [test_host_005, 2, "DOWN"],
                    ])
                if i % 2 == 0:
                    self.scheduler_loop(3, [
                        [test_host_099, 2, "DOWN"],
                    ])
                time.sleep(62)
                if i % 9 == 0:
                    self.scheduler_loop(3, [
                        [test_host_005, 0, "UP"],
                    ])
                if i % 2 == 0:
                    self.scheduler_loop(3, [
                        [test_host_099, 0, "UP"],
                    ])
                time.sleep(2)
                self.update_broker()
                if i % 1000 == 0:
                    self.livestatus_broker.db.commit()
            endtime = time.time()
            self.livestatus_broker.db.commit()
            sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time())))
        sys.stdout.close()
        sys.stdout = old_stdout
        self.livestatus_broker.db.commit_and_rotate_log_db()

        numlogs = self.livestatus_broker.db.execute("SELECT count(*) FROM logs")
        print "numlogs is", numlogs

        # now we have a lot of events
        # find type = HOST ALERT for test_host_005
        request = """GET log
Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups
Filter: time >= """ + str(int(query_start)) + """
Filter: time <= """ + str(int(query_end)) + """
Filter: type = SERVICE ALERT
And: 1
Filter: type = HOST ALERT
And: 1
Filter: type = SERVICE FLAPPING ALERT
Filter: type = HOST FLAPPING ALERT
Filter: type = SERVICE DOWNTIME ALERT
Filter: type = HOST DOWNTIME ALERT
Filter: type ~ starting...
Filter: type ~ shutting down...
Or: 8
Filter: host_name = test_host_099
Filter: service_description = test_ok_01
And: 5
OutputFormat: json"""
        # switch back to realtime. we want to know how long it takes
        time_hacker.set_real_time()

        print self.livestatus_broker.db.database_file
        print request
        print "query 1 --------------------------------------------------"
        tic = time.time()
        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        tac = time.time()
        elapsed1 = tac - tic
        pyresponse = eval(response)
        print "pyresponse", len(pyresponse)
        print "should be", should_be
        self.assertEqual(should_be, len(pyresponse))
        print "query 2 cache---------------------------------------------"
        tic = time.time()
        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        tac = time.time()
        elapsed2 = tac - tic
        pyresponse = eval(response)
        self.assertEqual(should_be, len(pyresponse) )
        print "clear the cache"
        print "use aggressive sql"
        print "query 3 --------------------------------------------------"
        self.livestatus_broker.query_cache.wipeout()
        self.livestatus_broker.db.use_aggressive_sql = True
        tic = time.time()
        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        tac = time.time()
        elapsed3 = tac - tic
        pyresponse = eval(response)
        self.assertEqual(should_be, len(pyresponse))
        print "query 4 cache---------------------------------------------"
        tic = time.time()
        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        tac = time.time()
        elapsed4 = tac - tic
        pyresponse = eval(response)
        self.assertEqual(should_be, len(pyresponse))
        print "elapsed1", elapsed1
        print "elapsed2", elapsed2
        print "elapsed3", elapsed3
        print "elapsed4", elapsed4
        msg = """~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NB NB NB: This isn't necessarily a failure !!! This check highly depends on the system load there was while the test was running.
Maybe you could relaunch the test and it will succeed.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
        self.assertLess(elapsed2*0.9, elapsed1, msg)
        self.assertLess(elapsed3*0.9, elapsed1, msg)
        self.assertLess(elapsed4*0.9, elapsed3, msg)

        time_hacker.set_my_time()

Example 11

Project: montepython_public
Source File: mcmc.py
View license
def chain(cosmo, data, command_line):
    """
    Run a Markov chain of fixed length with a Metropolis Hastings algorithm.

    Main function of this module, this is the actual Markov chain procedure.
    After having selected a starting point in parameter space defining the
    first **last accepted** one, it will, for a given amount of steps :

    + choose randomnly a new point following the *proposal density*,
    + compute the cosmological *observables* through the cosmological module,
    + compute the value of the *likelihoods* of the desired experiments at this
      point,
    + *accept/reject* this point given its likelihood compared to the one of
      the last accepted one.

    Every time the code accepts :code:`data.write_step` number of points
    (quantity defined in the input parameter file), it will write the result to
    disk (flushing the buffer by forcing to exit the output file, and reopen it
    again.

    .. note::

        to use the code to set a fiducial file for certain fixed parameters,
        you can use two solutions. The first one is to put all input 1-sigma
        proposal density to zero (this method still works, but is not
        recommended anymore). The second one consist in using the flag "-f 0",
        to force a step of zero amplitude.

    """

    ## Initialisation
    loglike = 0

    # In case command_line.silent has been asked, outputs should only contain
    # data.out. Otherwise, it will also contain sys.stdout
    outputs = [data.out]
    if not command_line.silent:
        outputs.append(sys.stdout)

    # check for MPI
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        # suppress duplicate output from slaves
        if rank:
            command_line.quiet = True
    except ImportError:
        # set all chains to master if no MPI
        rank = 0

    # Recover the covariance matrix according to the input, if the varying set
    # of parameters is non-zero
    if (data.get_mcmc_parameters(['varying']) != []):
        sigma_eig, U, C = sampler.get_covariance_matrix(cosmo, data, command_line)
        if data.jumping_factor == 0:
            warnings.warn(
                "The jumping factor has been set to 0. The above covariance " +
                "matrix will not be used.")

    # In case of a fiducial run (all parameters fixed), simply run once and
    # print out the likelihood. This should not be used any more (one has to
    # modify the log.param, which is never a good idea. Instead, force the code
    # to use a jumping factor of 0 with the option "-f 0".
    else:
        warnings.warn(
            "You are running with no varying parameters... I will compute " +
            "only one point and exit")
        data.update_cosmo_arguments()  # this fills in the fixed parameters
        loglike = sampler.compute_lkl(cosmo, data)
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    # In the fast-slow method, one need the Cholesky decomposition of the
    # covariance matrix. Return the Cholesky decomposition as a lower
    # triangular matrix
    Cholesky = None
    Rotation = None
    if command_line.jumping == 'fast':
        Cholesky = la.cholesky(C).T
        Rotation = np.identity(len(sigma_eig))

    # If the update mode was selected, the previous (or original) matrix should be stored
    if command_line.update:
        previous = (sigma_eig, U, C, Cholesky)

    # If restart wanted, pick initial value for arguments
    if command_line.restart is not None:
        sampler.read_args_from_chain(data, command_line.restart)

    # If restart from best fit file, read first point (overwrite settings of
    # read_args_from_chain)
    if command_line.bf is not None:
        sampler.read_args_from_bestfit(data, command_line.bf)

    # Pick a position (from last accepted point if restart, from the mean value
    # else), with a 100 tries.
    for i in range(100):
        if get_new_position(data, sigma_eig, U, i,
                            Cholesky, Rotation) is True:
            break
        if i == 99:
            raise io_mp.ConfigurationError(
                "You should probably check your prior boundaries... because " +
                "no valid starting position was found after 100 tries")

    # Compute the starting Likelihood
    loglike = sampler.compute_lkl(cosmo, data)

    # Choose this step as the last accepted value
    # (accept_step), and modify accordingly the max_loglike
    sampler.accept_step(data)
    max_loglike = loglike

    # If the jumping factor is 0, the likelihood associated with this point is
    # displayed, and the code exits.
    if data.jumping_factor == 0:
        io_mp.print_vector(outputs, 1, loglike, data)
        return 1, loglike

    acc, rej = 0.0, 0.0  # acceptance and rejection number count
    N = 1   # number of time the system stayed in the current position

    # define path and covmat
    input_covmat = command_line.cov
    base = os.path.basename(command_line.folder)
    # the previous line fails when "folder" is a string ending with a slash. This issue is cured by the next lines:
    if base == '':
        base = os.path.basename(command_line.folder[:-1])
    command_line.cov = os.path.join(
        command_line.folder, base+'.covmat')

    # Print on screen the computed parameters
    if not command_line.silent and not command_line.quiet:
        io_mp.print_parameters(sys.stdout, data)

    # Suppress non-informative output after initializing
    command_line.quiet = True

    k = 1
    # Main loop, that goes on while the maximum number of failure is not
    # reached, and while the expected amount of steps (N) is not taken.
    while k <= command_line.N:

        # If the number of steps reaches the number set in the update method,
        # then the proposal distribution should be adapted.
        if command_line.update:

            # master chain behavior
            if not rank:
                # Add the folder to the list of files to analyze, and switch on the
                # options for computing only the covmat
                from parser_mp import parse
                info_command_line = parse(
                    'info %s --minimal --noplot --keep-fraction 0.5 --keep-non-markovian --want-covmat' % command_line.folder)
                info_command_line.update = command_line.update
                # the +10 below is here to ensure that the first master update will take place before the first slave updates,
                # but this is a detail, the code is robust against situations where updating is not possible, so +10 could be omitted
                if not (k+10) % command_line.update and k > 10:
                    # Try to launch an analyze
                    try:
                        from analyze import analyze
                        R_minus_one = analyze(info_command_line)
                    except:
                        if not command_line.silent:
                            print 'Step ',k,' chain ', rank,': Failed to calculate covariant matrix'
                        pass

                if not (k-1) % command_line.update:
                    try:
                        # Read the covmat
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0,0] == previous[2][0,0]:
                            previous = (sigma_eig, U, C, Cholesky)
                            if k == 1:
                                if not command_line.silent:
                                    if not input_covmat == None:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s instead of %s. '
                                            'If new input covmat is desired, please delete previous covmat.'
                                            % (command_line.cov, input_covmat))
                                    else:
                                        warnings.warn(
                                            'Appending to an existing folder: using %s. '
                                            'If no starting covmat is desired, please delete previous covmat.'
                                            % command_line.cov)
                            else:
                                data.out.write('# After %d accepted steps: update proposal with max(R-1) = %f \n' % (int(acc), max(R_minus_one)))
                                if not command_line.silent:
                                    print 'After %d accepted steps: update proposal with max(R-1) = %f \n' % (int(acc), max(R_minus_one))
                                try:
                                    if stop-after-update:
                                        k = command_line.N
                                        print 'Covariant matrix updated - stopping run'
                                except:
                                    pass

                    except:
                        pass

                    command_line.quiet = True

            # slave chain behavior
            else:
                if not (k-1) % command_line.update:
                    try:
                        sigma_eig, U, C = sampler.get_covariance_matrix(
                            cosmo, data, command_line)
                        if command_line.jumping == 'fast':
                            Cholesky = la.cholesky(C).T
                        # Test here whether the covariance matrix has really changed
                        # We should in principle test all terms, but testing the first one should suffice
                        if not C[0,0] == previous[2][0,0] and not k == 1:
                            data.out.write('# After %d accepted steps: update proposal \n' % int(acc))
                            if not command_line.silent:
                                print 'After %d accepted steps: update proposal \n' % int(acc)
                            try:
                                if stop_after_update:
                                    k = command_line.N
                                    print 'Covariant matrix updated - stopping run'
                            except:
                                pass
                        previous = (sigma_eig, U, C, Cholesky)

                    except IOError:
                        pass

        # Pick a new position ('current' flag in mcmc_parameters), and compute
        # its likelihood. If get_new_position returns True, it means it did not
        # encounter any boundary problem. Otherwise, just increase the
        # multiplicity of the point and start the loop again
        if get_new_position(
                data, sigma_eig, U, k, Cholesky, Rotation) is True:
            newloglike = sampler.compute_lkl(cosmo, data)
        else:  # reject step
            rej += 1
            N += 1
            k += 1
            continue

        # Harmless trick to avoid exponentiating large numbers. This decides
        # whether or not the system should move.
        if (newloglike != data.boundary_loglike):
            if (newloglike >= loglike):
                alpha = 1.
            else:
                alpha = np.exp(newloglike-loglike)
        else:
            alpha = -1

        if ((alpha == 1.) or (rd.uniform(0, 1) < alpha)):  # accept step

            # Print out the last accepted step (WARNING: this is NOT the one we
            # just computed ('current' flag), but really the previous one.)
            # with its proper multiplicity (number of times the system stayed
            # there).
            io_mp.print_vector(outputs, N, loglike, data)

            # Report the 'current' point to the 'last_accepted'
            sampler.accept_step(data)
            loglike = newloglike
            if loglike > max_loglike:
                max_loglike = loglike
            acc += 1.0
            N = 1  # Reset the multiplicity

        else:  # reject step
            rej += 1.0
            N += 1  # Increase multiplicity of last accepted point

        # Regularly (option to set in parameter file), close and reopen the
        # buffer to force to write on file.
        if acc % data.write_step == 0:
            io_mp.refresh_file(data)
            # Update the outputs list
            outputs[0] = data.out
        k += 1  # One iteration done
    # END OF WHILE LOOP

    # If at this moment, the multiplicity is higher than 1, it means the
    # current point is not yet accepted, but it also mean that we did not print
    # out the last_accepted one yet. So we do.
    if N > 1:
        io_mp.print_vector(outputs, N-1, loglike, data)

    # Print out some information on the finished chain
    rate = acc / (acc + rej)
    sys.stdout.write('\n#  {0} steps done, acceptance rate: {1}\n'.
                     format(command_line.N, rate))

    # In case the acceptance rate is too low, or too high, print a warning
    if rate < 0.05:
        warnings.warn("The acceptance rate is below 0.05. You might want to "
                      "set the jumping factor to a lower value than the "
                      "default (2.4), with the option `-f 1.5` for instance.")
    elif rate > 0.6:
        warnings.warn("The acceptance rate is above 0.6, which means you might"
                      " have difficulties exploring the entire parameter space"
                      ". Try analysing these chains, and use the output "
                      "covariance matrix to decrease the acceptance rate to a "
                      "value between 0.2 and 0.4 (roughly).")

    # For a restart, erase the starting point to keep only the new, longer
    # chain.
    if command_line.restart is not None:
        os.remove(command_line.restart)
        sys.stdout.write('    deleting starting point of the chain {0}\n'.
                         format(command_line.restart))

    return

Example 12

Project: sklearn-theano
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    time_m = 0
    time_s = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_mngr.num)
                    plt.savefig(image_path % fig_mngr.num)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/dev/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to
        # _build/html/dev/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    example_code_obj = identify_names(open(example_file).read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 13

Project: tensorlib
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    time_m = 0
    time_s = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_mngr.num)
                    plt.savefig(image_path % fig_mngr.num)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/dev/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to
        # _build/html/dev/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    example_code_obj = identify_names(open(example_file).read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 14

Project: maxfield
Source File: makePlan.py
View license
def main(args):
    start_time = time.time()
    if args.log is not None:
        sys.stdout = open(args.log,'w',0)
    GREEN = '#3BF256' # Actual faction text colors in the app
    BLUE  = '#2ABBFF'
    if args.res:
        color=BLUE
    else:
        color=GREEN
    # Use google?
    useGoogle = args.google
    api_key = args.api_key

    output_directory = args.output_dir
    # add ending separator
    if output_directory[-1] != os.sep:
        output_directory += os.sep
    # create directory if doesn't exist
    if not os.path.isdir(output_directory):
        os.mkdir(output_directory)
    output_file = args.output_file
    if output_file[-4:] != '.pkl':
        output_file += ".pkl"

    nagents = args.num_agents
    if nagents <= 0:
        print "Number of agents should be greater than zero"
        raise ValueError("Number of agents should be greater than zero")

    input_file = args.input_file

    if input_file[-3:] != 'pkl':
        # If the input file is a portal list, let's set things up
        a = nx.DiGraph() # network tool
        locs = [] # portal coordinates
        # each line should be name;intel_link;keys
        portals = pd.read_table(input_file,sep=';',
                                comment='#',index_col=False,
                                names=['name','link','keys','sbla'],
                                dtype=str)
        portals = np.array(portals)
        portals = np.array([portal for portal in portals if (isinstance(portal[0], basestring) and isinstance(portal[1], basestring))])
        print "Found {0} portals in portal list.".format(len(portals))

        intel_url = "https://www.ingress.com/intel?z=17&" # setup url for intel map
        ll_set = False
        pls = []

        if len(portals) < 3:
            print "Error: Must have more than 2 portals!"
            raise ValueError("Error: Must have more than 2 portals!")
        if len(portals) > _MAX_PORTALS_:
            print "Error: Portal limit is {0}".format(_MAX_PORTALS_)
            raise ValueError("Error: Portal limit is {0}".format(_MAX_PORTALS_))
        for num,portal in enumerate(portals):
            if len(portal) < 3:
                print "Error! Portal ",portal[0]," has a formatting problem."
                raise ValueError("Error! Portal ",portal[0]," has a formatting problem.")
            # loop over columns. Four possibilities:
            # 0. First entry is always portal name
            # 1. contains "pll=" it is the Intel URL
            # 2. contains an intenger, it is the number of keys
            # 3. contains "sbla", it is an SBLA portal
            loc = None
            keys = 0
            sbla = False
            for pind,pfoobar in enumerate(portal):
                if str(pfoobar) == 'nan':
                    continue
                if pind == 0: # This is the name
                    a.add_node(num)
                    a.node[num]['name'] = pfoobar.strip()
                    continue
                if 'pll=' in pfoobar: # this is the URL
                    if loc is not None:
                        print "Error! Already found URL for this portal: {0}".format(portal[0])
                        raise ValueError("Error! Already found URL for this portal: {0}".format(portal[0]))
                    coords = (pfoobar.strip().split('pll='))
                    if len(coords) < 2:
                        print "Error! Portal ",portal[0]," has a formatting problem."
                        raise ValueError("Error! Portal ",portal[0]," has a formatting problem.")
                    coord_parts = coords[1].split(',')
                    lat = int(float(coord_parts[0]) * 1.e6)
                    lon = int(float(coord_parts[1]) * 1.e6)
                    pls.append(coord_parts[0] + "," + coord_parts[1])
                    loc = np.array([lat,lon],dtype=float)
                    if not ll_set:
                        # use coordinates from first portal to center the map
                        intel_url += "ll=" + coord_parts[0] + "," + coord_parts[1] + "&"
                        ll_set = True
                    continue
                try: # this is the number of keys
                    keys = int(pfoobar.strip())
                    continue
                except ValueError:
                    pass
                try: # this is SBLA
                    sbla = pfoobar.strip()
                    sbla = (sbla.lower() == 'sbla')
                    continue
                except ValueError:
                    pass
                # we should never get here unless there was a bad column
                print "Error: bad data value here:"
                print portal
                print pfoobar
                raise ValueError()
            if loc is None:
                print "Formatting problem: {0}".format(portal[0])
                raise ValueError("Formatting problem: {0}".format(portal[0]))
            locs.append(loc)
            a.node[num]['keys'] = keys
            a.node[num]['sbla'] = sbla
            if sbla:
                print "{0} has SBLA".format(portal[0])

        n = a.order() # number of nodes
        locs = np.array(locs,dtype=float)

        # Convert coords to radians, then to cartesian, then to
        # gnomonic projection
        locs = geometry.LLtoRads(locs)
        xyz  = geometry.radstoxyz(locs)
        xy   = geometry.gnomonicProj(locs,xyz)

        for i in xrange(n):
            a.node[i]['geo'] = locs[i]
            a.node[i]['xyz'] = xyz[i]
            a.node[i]['xy' ] = xy[i]

        # build portal list for intel_url
        intel_url += "pls="
        json_output = []
        for p in xrange(len(pls)):
            if p < len(pls) - 1:
                intel_url += pls[p] + "," + pls[p+1]
                intel_url += "_"
                json_output.append({"type":"polyline", "latLngs":
                    [ { "lat": pls[p].split(',')[0],   "lng": pls[p].split(',')[1] },
                      { "lat": pls[p+1].split(',')[0], "lng": pls[p+1].split(',')[1] }
                    ],
                    "color": "#a24ac3"})
            elif p == len(pls)-1:
                intel_url += pls[p] + "," + pls[0]
                json_output.append({"type":"polyline", "latLngs":
                    [ { "lat": pls[p].split(',')[0], "lng": pls[p].split(',')[1] },
                      { "lat": pls[0].split(',')[0], "lng": pls[0].split(',')[1] }
                    ],
                    "color": "#a24ac3"})
        print intel_url
        print json.dumps(json_output, indent=4)

        # Below is remnants from "random optimization" technique
        """
        # EXTRA_SAMPLES attempts to get graph with few missing keys
        # Try to minimuze TK + 2*MK where
        # TK is the total number of missing keys
        # MK is the maximum number of missing keys for any single
        # portal
        bestgraph = None
        bestlack = np.inf
        bestTK = np.inf
        bestMK = np.inf

        allTK = []
        allMK = []
        allWeights = []

        sinceImprove = 0

        while sinceImprove<EXTRA_SAMPLES:
            b = a.copy()

            sinceImprove += 1

            if not maxfield.maxFields(b):
                print 'Randomization failure\nThe program may work if you try again. It is more likely to work if you remove some portals.'
                continue

            TK = 0
            MK = 0
            for j in xrange(n):
                keylack = max(b.in_degree(j)-b.node[j]['keys'],0)
                TK += keylack
                if keylack > MK:
                    MK = keylack
            
            weightedlack = TK+2*MK

            allTK.append(TK)
            allMK.append(MK)
            allWeights.append(weightedlack)

            if weightedlack < bestlack:
                sinceImprove = 0
                print 'IMPROVEMENT:\n\ttotal: %s\n\tmax:   %s\n\tweighted: %s'%\
                       (TK,MK,weightedlack)
                bestgraph = b
                bestlack  = weightedlack
                bestTK  = TK
                bestMK  = MK
            else:
                print 'this time:\n\ttotal: %s\n\tmax:   %s\n\tweighted: %s'%\
                       (TK,MK,weightedlack)

            if weightedlack <= 0:
                print 'KEY PERFECTION'
                bestlack  = weightedlack
                bestTK  = TK
                bestMK  = MK
                break
            # if num agent keys is zero, this code isn't true...
            # if all([ b.node[i]['keys'] <= b.out_degree(i) for i in xrange(n) ]):
            #     print 'All keys used. Improvement impossible'
            #     break

            print '%s tries since improvement'%sinceImprove

        if bestgraph == None:
            print 'EXITING RANDOMIZATION LOOP WITHOUT SOLUTION!'
            print ''
            exit()

        print 'Choosing plan requiring %s additional keys, max of %s from single portal'%(bestTK,bestMK)

        plt.clf()
        plt.scatter(allTK,allMK,c=allWeights,marker='o')
        plt.xlim(min(allTK)-1,max(allTK)+1)
        plt.ylim(min(allMK)-1,max(allMK)+1)
        plt.xlabel('Total keys required')
        plt.ylabel('Max keys required for a single portal')
        cbar = plt.colorbar()
        cbar.set_label('Optimization Weighting (lower=better)')
        plt.savefig(output_directory+'optimization.png')

        a = bestgraph
        """
        with open(output_directory+output_file,'w') as fout:
            pickle.dump(a,fout)
    else:
        with open(input_file,'r') as fin:
            a = pickle.load(fin)

    # Optimize the plan to get shortest walking distance
    best_plan = None
    best_PP = None
    best_time = 1.e9
    for foobar in xrange(args.attempts):
        if not args.quiet:
            tdiff = time.time() - start_time
            hrs = int(tdiff/3600.)
            mins = int((tdiff-3600.*hrs)/60.)
            secs = tdiff-3600.*hrs-60.*mins
            sys.stdout.write("\r[{0:20s}] {1}% ({2}/{3} iterations) : {4:02}h {5:02}m {6:05.2f}s".\
                         format('#'*(20*foobar/args.attempts),
                                100*foobar/args.attempts,
                                foobar,args.attempts,
                                hrs,mins,secs))
        b = copy.deepcopy(a)
        maxfield.maxFields(b,allow_suboptimal=(not args.optimal))
        # Attach to each edge a list of fields that it completes
        # catch no triangulation (bad portal file?)
        try:
            for t in b.triangulation:
                t.markEdgesWithFields()
        except AttributeError:
            print "Error: problem with bestgraph... no triangulation...?"
        agentOrder.improveEdgeOrder(b)
        PP = PlanPrinterMap.PlanPrinter(b,output_directory,nagents,useGoogle=useGoogle,
                                        api_key=api_key,color=color)
        totalTime = b.walktime+b.linktime+b.commtime
        if totalTime < best_time:
            best_plan = b
            best_PP = copy.deepcopy(PP)
            best_time = totalTime


    b = best_plan
    agentOrder.improveEdgeOrderMore(b)

    # Re-run to fix the animations and stars of edges that can be done early
    # (improveEdgeOrderMore may have modified the completion order)
    try:
        first = True
        for t in b.triangulation:
            t.markEdgesWithFields(clean = first)
            first = False
    except AttributeError:
        print "Error: problem with bestgraph... no triangulation...?"


    best_PP = PlanPrinterMap.PlanPrinter(b,output_directory,nagents,useGoogle=useGoogle,
                                    api_key=api_key,color=color)
    best_time = b.walktime+b.linktime+b.commtime


    if not args.quiet:
        tdiff = time.time() - start_time
        hrs = int(tdiff/3600.)
        mins = int((tdiff-3600.*hrs)/60.)
        secs = tdiff-3600.*hrs-60.*mins
        sys.stdout.write("\r[{0:20s}] {1}% ({2}/{3} iterations) : {4:02}h {5:02}m {6:05.2f}s".\
                         format('#'*(20),
                                100,args.attempts,args.attempts,
                                hrs,mins,secs))
        print ""

    # generate plan details and map
    best_PP.keyPrep()
    best_PP.agentKeys()
    best_PP.planMap(useGoogle=useGoogle)
    best_PP.agentLinks()

    # These make step-by-step instructional images
    if not args.skipplot:
        best_PP.animate(useGoogle=useGoogle)
        best_PP.split3instruct(useGoogle=useGoogle)

    print ""
    print ""
    print ""
    print "Found best plan after {0} iterations.".format(args.attempts)
    totalTime = best_plan.walktime+best_plan.linktime+best_plan.commtime
    print "Total time: {0} minutes".format(int(totalTime/60. + 0.5))
    print "Number of portals: {0}".format(best_PP.num_portals)
    print "Number of links: {0}".format(best_PP.num_links)
    print "Number of fields: {0}".format(best_PP.num_fields)
    portal_ap = (125*8 + 500 + 250)*best_PP.num_portals
    link_ap = 313 * best_PP.num_links
    field_ap = 1250 * best_PP.num_fields
    print "AP from portals capture: {0}".format(portal_ap)
    print "AP from link creation: {0}".format(link_ap)
    print "AP from field creation: {0}".format(field_ap)
    print "Total AP: {0}".format(portal_ap+link_ap+field_ap)

    tdiff = time.time() - start_time
    hrs = int(tdiff/3600.)
    mins = int((tdiff-3600.*hrs)/60.)
    secs = tdiff-3600.*hrs-60.*mins
    print "Runtime: {0:02}h {1:02}m {2:05.2f}s".format(hrs,mins,secs)

    plt.close('all')

Example 15

Project: VisTrails
Source File: shell.py
View license
def get_shell_dialog():
    global _shell_dialog

    if _shell_dialog is not None:
        return _shell_dialog

    try:
        deps = {'pip': 'ipython>=1.0',
                'linux-ubuntu': 'ipython-qtconsole',
                'linux-debian': 'ipython-qtconsole'}

        IPython = py_import('IPython.qt.console.rich_ipython_widget', deps,
                            True)
        RichIPythonWidget = \
                IPython.qt.console.rich_ipython_widget.RichIPythonWidget
        py_import('IPython.qt.inprocess', deps, True)
        QtInProcessKernelManager = \
                IPython.qt.inprocess.QtInProcessKernelManager
    except ImportError:
        return None

    km = QtInProcessKernelManager()
    km.start_kernel()
    kernel = km.kernel
    kernel.gui = 'qt4'

    kernel_client = km.client()
    kernel_client.start_channels()

    class IPythonDialog(RichIPythonWidget, QVistrailsPaletteInterface):
        """This class incorporates an  IPython shell into a dockable widget for use in the
        VisTrails environment"""
        def __init__(self, parent=None):
            RichIPythonWidget.__init__(self, parent)
            self.old_streams = None
            self.running_workflow = False
            self.kernel_manager = km
            self.kernel_client = kernel_client
            self.exit_requested.connect(self.stop)
            self.setWindowTitle("Console")
            self.vistrails_interpreter = get_default_interpreter()

        def visibility_changed(self, visible):
            QVistrailsPaletteInterface.visibility_changed(self, visible)
            if visible:
                self.show()
            else:
                self.hide()
        def stop(self):
            kernel_client.stop_channels()
            km.shutdown_kernel()

        def hide(self):
            """suspend() -> None
            Called when hiding the parent window in order to recover the previous
            state.

            """
            #recovering the state
            if self.old_streams is not None:
                sys.stdout, sys.stderr, sys.stdin = self.old_streams
                self.old_streams = None
            RichIPythonWidget.hide(self)

        def show(self):
            """show() -> None
            Store previous state and starts capturing all interactive input and
            output.

            """
            # capture all interactive input/output
            if self.old_streams is None:
                self.old_streams = sys.stdout, sys.stderr, sys.stdin
                sys.stdout   = self
                sys.stderr   = self
                sys.stdin    = self
            RichIPythonWidget.show(self)

        def showEvent(self, e):
            """showEvent(e) -> None
            Event handler called when the dialog acquires focus

            """
            self.show()

        def flush(self):
            """flush() -> None.
            Simulate stdin, stdout, and stderr.

            """
            pass

        def isatty(self):
            """isatty() -> int
            Simulate stdin, stdout, and stderr.

            """
            return 1

        def readline(self):
            """readline() -> str

            Simulate stdin, stdout, and stderr.

            """
            return ""

        def write(self, text):
            """write(text: str) -> None
            Simulate stdin, stdout, and stderr.

            """
            self.input_buffer = ''
            if not self.running_workflow:
                self.running_workflow = True
                # make text blue
                self._append_plain_text("\n\x1b[34m<STANDARD OUTPUT>\x1b[0m\n", True)
            self._append_plain_text(text, True)
            self._prompt_pos = self._get_end_cursor().position()
            self._control.ensureCursorVisible()
            self._control.moveCursor(QtGui.QTextCursor.End)

        def eventFilter(self, obj, event):
            """ Reimplemented to ensure a console-like behavior in the underlying
                text widgets.
            """
            etype = event.type()
            if etype == QtCore.QEvent.KeyPress:
                self.running_workflow = False
            return RichIPythonWidget.eventFilter(self, obj, event)

    _shell_dialog = IPythonDialog
    return IPythonDialog

Example 16

Project: pywikibot-core
Source File: win32_unicode.py
View license
def get_unicode_console():
    """
    Get Unicode console objects.

    @return: stdin, stdout, stderr, argv
    @rtype: tuple
    """
    # Make Unicode console output work independently of the current code page.
    # This also fixes <http://bugs.python.org/issue1602>.
    # Credit to Michael Kaplan <http://blogs.msdn.com/b/michkap/archive/2010/04/07/9989346.aspx>
    # and TZOmegaTZIOY
    # <https://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/1432462#1432462>.

    global stdin, stdout, stderr, argv

    if not OSWIN32:
        return stdin, stdout, stderr, argv

    try:
        # <https://msdn.microsoft.com/en-us/library/ms683231(VS.85).aspx>
        # HANDLE WINAPI GetStdHandle(DWORD nStdHandle);
        # returns INVALID_HANDLE_VALUE, NULL, or a valid handle
        #
        # <https://msdn.microsoft.com/en-us/library/aa364960(VS.85).aspx>
        # DWORD WINAPI GetFileType(DWORD hFile);
        #
        # <https://msdn.microsoft.com/en-us/library/ms683167(VS.85).aspx>
        # BOOL WINAPI GetConsoleMode(HANDLE hConsole, LPDWORD lpMode);

        GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(("GetStdHandle", windll.kernel32))
        STD_INPUT_HANDLE = DWORD(-10)
        STD_OUTPUT_HANDLE = DWORD(-11)
        STD_ERROR_HANDLE = DWORD(-12)
        GetFileType = WINFUNCTYPE(DWORD, DWORD)(("GetFileType", windll.kernel32))
        FILE_TYPE_CHAR = 0x0002
        FILE_TYPE_REMOTE = 0x8000
        GetConsoleMode = (WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))
                          (("GetConsoleMode", windll.kernel32)))
        INVALID_HANDLE_VALUE = DWORD(-1).value

        def not_a_console(handle):
            """Return whether the handle is not to a console."""
            if handle == INVALID_HANDLE_VALUE or handle is None:
                return True
            return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
                    GetConsoleMode(handle, byref(DWORD())) == 0)

        old_stdin_fileno = old_fileno('in')
        old_stdout_fileno = old_fileno('out')
        old_stderr_fileno = old_fileno('err')

        STDIN_FILENO = 0
        STDOUT_FILENO = 1
        STDERR_FILENO = 2
        real_stdin = (old_stdin_fileno == STDIN_FILENO)
        real_stdout = (old_stdout_fileno == STDOUT_FILENO)
        real_stderr = (old_stderr_fileno == STDERR_FILENO)

        if real_stdin:
            hStdin = GetStdHandle(STD_INPUT_HANDLE)
            if not_a_console(hStdin):
                real_stdin = False

        if real_stdout:
            hStdout = GetStdHandle(STD_OUTPUT_HANDLE)
            force_truetype_console(hStdout)
            if not_a_console(hStdout):
                real_stdout = False

        if real_stderr:
            hStderr = GetStdHandle(STD_ERROR_HANDLE)
            force_truetype_console(hStderr)
            if not_a_console(hStderr):
                real_stderr = False

        if real_stdout or real_stderr:
            if real_stdin:
                stdin = UnicodeInput(hStdin, name='<Unicode console stdin>')

            if real_stdout:
                stdout = UnicodeOutput(hStdout, sys.stdout, STDOUT_FILENO,
                                       '<Unicode console stdout>')
            else:
                stdout = UnicodeOutput(None, sys.stdout, old_stdout_fileno,
                                       '<Unicode redirected stdout>')

            if real_stderr:
                stderr = UnicodeOutput(hStderr, sys.stderr, STDERR_FILENO,
                                       '<Unicode console stderr>')
            else:
                stderr = UnicodeOutput(None, sys.stderr, old_stderr_fileno,
                                       '<Unicode redirected stderr>')
    except Exception as e:
        _complain("exception %r while fixing up sys.stdout and sys.stderr" % (e,))

    # While we're at it, let's unmangle the command-line arguments:

    # This works around <http://bugs.python.org/issue2128>.
    GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
    CommandLineToArgvW = (WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))
                          (("CommandLineToArgvW", windll.shell32)))

    argc = c_int(0)
    argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))

    argv = [argv_unicode[i].encode('utf-8') for i in range(0, argc.value)]

    if not hasattr(sys, 'frozen'):
        # If this is an executable produced by py2exe or bbfreeze, then it will
        # have been invoked directly. Otherwise, unicode_argv[0] is the Python
        # interpreter, so skip that.
        argv = argv[1:]

        # Also skip option arguments to the Python interpreter.
        while len(argv) > 0:
            arg = argv[0]
            if not arg.startswith(b"-") or arg == u"-":
                break
            argv = argv[1:]
            if arg == u'-m':
                # sys.argv[0] should really be the absolute path of the module source,
                # but never mind
                break
            if arg == u'-c':
                argv[0] = u'-c'
                break

    if argv == []:
        argv = [u'']

    return stdin, stdout, stderr, argv

Example 17

Project: acoular
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('source/_static/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 18

Project: acoular
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('source/_static/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 19

Project: cgat
Source File: Counts.py
View license
    def outputSpikes(self, indices, tracks_map, groups,
                     output_method, spike_type,
                     min_cbin, width_cbin, max_cbin,
                     min_ibin, width_ibin, max_ibin,
                     min_sbin=1, width_sbin=1, max_sbin=1,
                     append=False):
        ''' method to output spike-ins generated by shuffling rows
        (counts.shuffleRows) or clusters of rows (counts.shuffleCluster)

        parameters:
        * indices - indices for spike-ins
        * min_i_bins = minimum bins for initial values
        * width_i_bins = width of bins for initial values
        * max_i_bins = maximum bin for initial values
        * min_c_bins = minimum bin for change values
        * width_c_bins = width of bins for change values
        * max_c_bins = maximum bin for change values
        * min_s_bins = minimum bin for size values
        * width_s_bins = width of bins for size values
        * max_s_bins = maximum bin for size values
        * tracks_map = dictionary mapping groups to tracks
        * spike_type = "relative" or "logfold"
        * output_method = "append" or "seperate"
        '''

        def makeHeader(tracks_map, groups, keep_columns=None):
            if keep_columns:
                header = keep_columns
            else:
                header = []
            header.extend(tracks_map[groups[0]])
            header.extend(tracks_map[groups[1]])

            return header

        if spike_type == "row":
            index = True
            # keep_columns = ["spike"]
            keep_columns = None

        elif spike_type == "cluster":
            index = False
            keep_columns = ["contig", "position"]

        header = makeHeader(tracks_map, groups, keep_columns=keep_columns)

        if output_method == "append":
            self.table = self.table.ix[:, header]
            self.table.to_csv(sys.stdout, index=index, header=True, sep="\t",
                              dtype={'position': int})
        else:
            sys.stdout.write("%s\t%s\n" % (
                "spike", "\t".join(map(str, header))))

        def getInitialChangeSize(key, width_ibin, min_ibin, width_cbin,
                                 min_cbin, width_s_bin, min_s_bin):
            initial_bin, change_bin, size_bin = key

            # initial and change values are the center of the bin
            initial = ((initial_bin * width_ibin) +
                       min_ibin - (width_ibin * 0.5))
            change = ((change_bin * width_cbin) +
                      min_cbin - (width_cbin * 0.5))
            size = ((size_bin * width_sbin) + min_sbin - 1)
            return initial, change, size

        def getInitialChange(key, width_ibin, min_ibin, width_cbin, min_cbin):
            initial_bin, change_bin = key

            # initial and change values are the center of the bin
            initial = ((initial_bin * width_ibin) +
                       min_ibin - (width_ibin * 0.5))
            change = ((change_bin * width_cbin) +
                      min_cbin - (width_cbin * 0.5))
            return initial, change

        n = 0

        if spike_type == "row":

            for key in indices:
                for pair in indices[key]:
                    initial, change = getInitialChange(
                        key, width_ibin, min_ibin, width_cbin, min_cbin)
                    row = ["_".join(map(str,
                                        ("spike-in", initial, change, n)))]
                    row.extend(self.table.ix[pair[0], tracks_map[groups[0]]])
                    row.extend(self.table.ix[pair[1], tracks_map[groups[1]]])
                    sys.stdout.write("%s\n" % "\t".join(map(str, row)))
                    n += 1

        elif spike_type == "cluster":
            for key in sorted(indices.keys()):
                initial, change, size = getInitialChangeSize(
                    key, width_ibin, min_ibin, width_cbin,
                    min_cbin, width_sbin, min_sbin)
                for values in sorted(indices[key]):
                    (c1s, c1e, c2s, c2e, c1rs, c1re,
                     c2rs, c2re) = values
                    cluster_id = "_".join(
                        map(str, ("spike-in", initial, change,
                                  size, c1rs - c1s, n)))

                    temp_cluster_df = self.table.ix[c1s:c1e, keep_cols]
                    temp_cluster_df['contig'] = cluster_id
                    temp_cluster_swap = self.table.ix[
                        c2rs:c2re, tracks_map[groups[1]]]
                    temp_cluster_swap.set_index(self.table.ix[c1rs:c1re].index,
                                                drop=True,  inplace=True)
                    temp_cluster_df.ix[c1rs:c1re, tracks_map[
                        groups[1]]] = temp_cluster_swap
                    temp_cluster_df.to_csv(sys.stdout, index=index,
                                           header=False, sep="\t",
                                           dtype={'position': int})
                    n += 1

Example 20

Project: EmPyre
Source File: agent.py
View license
def processPacket(taskingID, data):

    try:
        taskingID = int(taskingID)
    except Exception as e:
        return None

    if taskingID == 1:
        # sysinfo request
        # get_sysinfo should be exposed from stager.py
        return encodePacket(1, get_sysinfo())

    elif taskingID == 2:
        # agent exit

        msg = "[!] Agent %s exiting" %(sessionID)
        sendMessage(encodePacket(2, msg))
        agent_exit()

    elif taskingID == 40:
        # run a command
        resultData = str(run_command(data))
        return encodePacket(40, resultData)

    elif taskingID == 41:
        # file download

        filePath = os.path.abspath(data)
        if not os.path.exists(filePath):
            return encodePacket(40, "file does not exist or cannot be accessed")

        offset = 0
        size = os.path.getsize(filePath)
        partIndex = 0

        while True:

            # get 512kb of the given file starting at the specified offset
            encodedPart = get_file_part(filePath, offset=offset, base64=False)
            c = compress()
            start_crc32 = c.crc32_data(encodedPart)
            comp_data = c.comp_data(encodedPart)
            encodedPart = c.build_header(comp_data, start_crc32)
            encodedPart = base64.b64encode(encodedPart)

            partData = "%s|%s|%s" %(partIndex, filePath, encodedPart)
            if not encodedPart or encodedPart == '' or len(encodedPart) == 16:
                break

            sendMessage(encodePacket(41, partData))

            global delay
            global jitter
            if jitter < 0: jitter = -jitter
            if jitter > 1: jitter = 1/jitter

            minSleep = int((1.0-jitter)*delay)
            maxSleep = int((1.0+jitter)*delay)
            sleepTime = random.randint(minSleep, maxSleep)
            time.sleep(sleepTime)
            partIndex += 1
            offset += 5120000

    elif taskingID == 42:
        # file upload
        try:
            parts = data.split("|")
            filePath = parts[0]
            base64part = parts[1]
            raw = base64.b64decode(base64part)
            d = decompress()
            dec_data = d.dec_data(raw, cheader=True)
            if not dec_data['crc32_check']:
                sendMessage(encodePacket(0, "[!] WARNING: File upload failed crc32 check during decompressing!."))
                sendMessage(encodePacket(0, "[!] HEADER: Start crc32: %s -- Received crc32: %s -- Crc32 pass: %s!." %(dec_data['header_crc32'],dec_data['dec_crc32'],dec_data['crc32_check'])))
            f = open(filePath, 'ab')
            f.write(dec_data['data'])
            f.close()

            sendMessage(encodePacket(42, "[*] Upload of %s successful" %(filePath) ))
        except Exception as e:
            sendec_datadMessage(encodePacket(0, "[!] Error in writing file %s during upload: %s" %(filePath, str(e)) ))

    elif taskingID == 50:
        # return the currently running jobs
        msg = ""
        if len(jobs) == 0:
            msg = "No active jobs"
        else:
            msg = "Active jobs:\n"
            for x in xrange(len(jobs)):
                msg += "\t%s" %(x)
        return encodePacket(50, msg)

    elif taskingID == 51:
        # stop and remove a specified job if it's running
        try:
            # Calling join first seems to hang
            # result = jobs[int(data)].join()
            sendMessage(encodePacket(0, "[*] Attempting to stop job thread"))
            result = jobs[int(data)].kill()
            sendMessage(encodePacket(0, "[*] Job thread stoped!"))
            jobs[int(data)]._Thread__stop()
            jobs.pop(int(data))
            if result and result != "":
                sendMessage(encodePacket(51, result))
        except:
            return encodePacket(0, "error stopping job: %s" %(data))

    elif taskingID == 100:
        # dynamic code execution, wait for output, don't save outputPicl
        try:
            buffer = StringIO()
            sys.stdout = buffer
            code_obj = compile(data, '<string>', 'exec')
            exec code_obj in globals()
            sys.stdout = sys.__stdout__
            results = buffer.getvalue()
            return encodePacket(100, str(results))
        except Exception as e:
            errorData = str(buffer.getvalue())
            return encodePacket(0, "error executing specified Python data: %s \nBuffer data recovered:\n%s" %(e, errorData))

    elif taskingID == 101:
        # dynamic code execution, wait for output, save output
        prefix = data[0:15].strip()
        extension = data[15:20].strip()
        data = data[20:]
        try:
            buffer = StringIO()
            sys.stdout = buffer
            code_obj = compile(data, '<string>', 'exec')
            exec code_obj in globals()
            sys.stdout = sys.__stdout__
            c = compress()
            start_crc32 = c.crc32_data(buffer.getvalue())
            comp_data = c.comp_data(buffer.getvalue())
            encodedPart = c.build_header(comp_data, start_crc32)
            encodedPart = base64.b64encode(encodedPart)
            return encodePacket(101, '{0: <15}'.format(prefix) + '{0: <5}'.format(extension) + encodedPart )
        except Exception as e:
            # Also return partial code that has been executed
            errorData = str(buffer.getvalue())
            return encodePacket(0, "error executing specified Python data %s \nBuffer data recovered:\n%s" %(e, errorData))

    elif taskingID == 102:
        # on disk code execution for modules that require multiprocessing not supported by exec
        try:
            implantHome = expanduser("~") + '/.Trash/'
            moduleName = ".mac-debug-data"
            implantPath = implantHome + moduleName
            result = "[*] Module disk path: %s \n" %(implantPath) 
            with open(implantPath, 'w') as f:
                f.write(data)
            result += "[*] Module properly dropped to disk \n"
            pythonCommand = "python %s" %(implantPath)
            process = subprocess.Popen(pythonCommand, stdout=subprocess.PIPE, shell=True)
            data = process.communicate()
            result += data[0].strip()
            try:
                os.remove(implantPath)
                result += "\n[*] Module path was properly removed: %s" %(implantPath) 
            except Exception as e:
                print "error removing module filed: %s" %(e)
            fileCheck = os.path.isfile(implantPath)
            if fileCheck:
                result += "\n\nError removing module file, please verify path: " + str(implantPath)
            return encodePacket(100, str(result))
        except Exception as e:
            fileCheck = os.path.isfile(implantPath)
            if fileCheck:
                return encodePacket(0, "error executing specified Python data: %s \nError removing module file, please verify path: %s" %(e, implantPath))
            return encodePacket(0, "error executing specified Python data: %s" %(e))

    elif taskingID == 110:
        start_job(data)
        return encodePacket(110, "job %s started" %(len(jobs)-1))

    elif taskingID == 111:
        # TASK_CMD_JOB_SAVE
        # TODO: implement job structure
        pass

    else:
        return encodePacket(0, "invalid tasking ID: %s" %(taskingID))

Example 21

Project: clusterlib
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    time_m = 0
    time_s = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(first_image_file) or \
           os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                        my_globals['__doc__'],
                        '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_mngr.num)
                    plt.savefig(image_path % fig_mngr.num)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if not os.path.exists(car_thumb_path):
            os.makedirs(car_thumb_path)
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
            first_img = image_fname % 1
            if first_img in carousel_thumbs:
                make_thumbnail((image_path % carousel_thumbs[first_img][0]),
                               carousel_tfile, carousel_thumbs[first_img][1], 190)
            make_thumbnail(first_image_file, thumb_file, 400, 280)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    example_code_obj = identify_names(open(example_file).read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 22

Project: mendeley2bibtex
Source File: mendeley2bibtex.py
View license
def convert(db_name, bibtex_file=sys.stdout, quiet=False):
    """Converts Mendely SQlite database to BibTeX file
    @param db_name The Mendeley SQlite file
    @param bibtex_file The BibTeX file to output the bibliography, if not
supplied the output is written to the system standard stdout.
    @param quiet If true do not show warnings and errors
    """
    
    db = sqlite3.connect(db_name)
    c = db.cursor()
    #c.row_factory = sqlite3.Row # CANNOT be used with unicode string formatting
                                 # since it expect str indexes, and we are using
                                 # unicode string... grrr... ascii is not dead
    c.row_factory = dict_factory # allows to use row (entry) as a dict with
                                 # unicode keys.
                                 
    if sys.stdout != bibtex_file:
        f = open(bibtex_file,'w')
        f.write("""This file was generated automatically by Mendeley To
BibTeX python script.\n\n""")
    else:
        f = bibtex_file

    for entry in c.execute('''
    SELECT
        D.id,
        D.citationKey,
        D.title,
        D.type,
        D.doi,
        D.publisher,
        D.publication,
        D.volume,
        D.issue,
        D.month,
        D.year,
        D.pages,
        F.localUrl
    FROM Documents D
    LEFT JOIN DocumentCanonicalIds DCI
        ON D.id = DCI.documentId
    LEFT JOIN DocumentFiles DF
        ON D.id = DF.documentId
    LEFT JOIN Files F
        ON F.hash = DF.hash
    WHERE D.confirmed = "true"
    GROUP BY D.citationKey
    ORDER BY D.citationKey
    ;'''):

        c2 = db.cursor()
        c2.execute('''
    SELECT lastName, firstNames
    FROM DocumentContributors
    WHERE documentId = ?
    ORDER BY id''', (entry['id'],))
        authors_list = c2.fetchall()
        authors = []
        for author in authors_list:
            authors.append(', '.join(author))
        entry['authors'] = ' and '.join(authors)

        #capitalize_title(entry)
        clean_char(entry)

        # If you need to add more templates:
        #    all types of templates are available at
        #    http://www.cs.vassar.edu/people/priestdo/tips/bibtex
        if "JournalArticle" == entry['type']:
            formatted_entry = u'''
@article{{{entry[citationKey]},
    author    = "{entry[authors]}",
    title     = "{entry[title]}",
    journal   = "{entry[publication]}",
    number    = "{entry[issue]}",
    volume    = "{entry[volume]}",
    pages     = "{entry[pages]}",
    year      = "{entry[year]}",
    doi       = "{entry[doi]}",
    localfile = "{entry[localUrl]}"
}}'''.format(entry=entry)


        elif "ConferenceProceedings" == entry['type']:
            formatted_entry = u'''
@proceedings{{{entry[citationKey]},
    author    = "{entry[authors]}",
    title     = "{entry[title]}",
    publisher = "{entry[publisher]}",
    pages     = "{entry[pages]}",
    year      = "{entry[year]}",
    doi       = "{entry[doi]}",
    localfile = "{entry[localUrl]}"
}}'''.format(entry=entry)


        elif "Book" == entry['type']:
            formatted_entry = u'''
@book{{{entry[citationKey]},
    author    = "{entry[authors]}",
    title     = "{entry[title]}",
    publisher = "{entry[publisher]}",
    year      = "{entry[year]}",
    volume    = "{entry[volume]}",
    doi       = "{entry[doi]}",
    localfile = "{entry[localUrl]}"
}}'''.format(entry=entry)

        else:
            if not quiet:
                print u'''Unhandled entry type {0}, please add your own
template.'''.format(entry['type'])
            continue
        
        f.write(formatted_entry.encode("UTF-8"))

    if sys.stdout != bibtex_file:
        f.close()

Example 23

View license
def run_tests(test_labels, verbosity=1, interactive=True,
        extra_tests=[], nodatabase=False, xml_out=False, callgraph=False, html_only=False):
    """
    Test runner which displays a code coverage report at the end of the
    run.
    """
    cov = coverage.coverage()
    cov.erase()
    cov.use_cache(0)

    test_labels = test_labels or getattr(settings, "TEST_APPS", None)
    cover_branch = getattr(settings, "COVERAGE_BRANCH_COVERAGE", False)
    cov = coverage.coverage(branch=cover_branch, cover_pylib=False)
    cov.use_cache(0)
     
    coverage_modules = []
    if test_labels:
        for label in test_labels:
            # Don't report coverage if you're only running a single
            # test case.
            if '.' not in label:
                app = get_app(label)
                coverage_modules.extend(get_all_coverage_modules(app))
    else:
        for app in get_apps():
            coverage_modules.extend(get_all_coverage_modules(app))

    morfs = filter(is_wanted_module, coverage_modules)

    if callgraph:
        try:
            import pycallgraph
            #_include = [i.__name__ for i in coverage_modules]
            _included = getattr(settings, "COVERAGE_INCLUDE_MODULES", [])
            _excluded = getattr(settings, "COVERAGE_EXCLUDE_MODULES", [])

            _included = [i.strip('*')+'*' for i in _included]
            _excluded = [i.strip('*')+'*' for i in _included]

            _filter_func = pycallgraph.GlobbingFilter(
                include=_included or ['*'],
                #include=['lotericas.*'],
                #exclude=[],
                #max_depth=options.max_depth,
            )

            pycallgraph_enabled = True
        except ImportError:
            pycallgraph_enabled = False
    else:
        pycallgraph_enabled = False

    cov.start()
    
    if pycallgraph_enabled:
        pycallgraph.start_trace(filter_func=_filter_func)

    if nodatabase:
        results = nodatabase_run_tests(test_labels, verbosity, interactive,
            extra_tests)
    else:
        results = django_test_runner(test_labels, verbosity, interactive,
            extra_tests)
    
    if callgraph and pycallgraph_enabled:
        pycallgraph.stop_trace()

    cov.stop()
    
    if getattr(settings, "COVERAGE_HTML_REPORT", False) or \
            os.environ.get("COVERAGE_HTML_REPORT"):
        output_dir = getattr(settings, "COVERAGE_HTML_DIRECTORY", "covhtml")
        report_method = curry(cov.html_report, directory=output_dir)
        if callgraph and pycallgraph_enabled:
            callgraph_path = output_dir + '/' + 'callgraph.png'
            pycallgraph.make_dot_graph(callgraph_path)

        print >>sys.stdout
        print >>sys.stdout, "Coverage HTML reports were output to '%s'" %output_dir
        if callgraph:
            if pycallgraph_enabled:
                print >>sys.stdout, "Call graph was output to '%s'" %callgraph_path
            else:
                print >>sys.stdout, "Call graph was not generated: Install 'pycallgraph' module to do so"

    else:
        report_method = cov.report

    if coverage_modules:
        if xml_out:
            # using the same output directory as the --xml function uses for testing
            if not os.path.isdir(os.path.join("temp", "xml")):
                os.makedirs(os.path.join("temp", "xml"))
            output_filename = 'temp/xml/coverage_output.xml'
            cov.xml_report(morfs=coverage_modules, outfile=output_filename)

        if not html_only:
            cov.report(coverage_modules, show_missing=1)

    return results

Example 24

Project: girder_worker
Source File: docker_test.py
View license
    @mock.patch('subprocess.check_output')
    @mock.patch('subprocess.Popen')
    def testDockerMode(self, mockPopen, checkOutput):
        mockPopen.return_value = processMock
        checkOutput.return_value = inspectOutput

        task = {
            'mode': 'docker',
            'docker_image': 'test/test:latest',
            'container_args': [
                '-f', '$input{foo}', '--temp-dir=$input{_tempdir}'],
            'pull_image': True,
            'inputs': [{
                'id': 'foo',
                'name': 'A variable',
                'format': 'string',
                'type': 'string',
                'target': 'filepath'
            }],
            'outputs': [{
                'id': '_stderr',
                'format': 'string',
                'type': 'string'
            }]
        }

        inputs = {
            'foo': {
                'mode': 'http',
                'url': 'https://foo.com/file.txt'
            }
        }

        @httmock.all_requests
        def fetchMock(url, request):
            if url.netloc == 'foo.com' and url.scheme == 'https':
                return 'dummy file contents'
            else:
                raise Exception('Unexpected url ' + repr(url))

        with httmock.HTTMock(fetchMock):
            # Use user-specified filename
            _old = sys.stdout
            mockedStdOut = six.StringIO()
            sys.stdout = mockedStdOut
            out = run(
                task, inputs=inputs, cleanup=False, validate=False,
                auto_convert=False)
            sys.stdout = _old

            # We didn't specify _stdout as an output, so it should just get
            # printed to sys.stdout (which we mocked)
            lines = mockedStdOut.getvalue().splitlines()
            self.assertEqual(lines[0],
                             'Pulling Docker image: test/test:latest')
            self.assertEqual(lines[-2], 'output message')
            self.assertEqual(
                lines[-1], 'Garbage collecting old containers and images.')

            # We bound _stderr as a task output, so it should be in the output
            self.assertEqual(out, {
                '_stderr': {
                    'data': 'error message\n',
                    'format': 'string'
                }
            })

            self.assertEqual(mockPopen.call_count, 3)
            cmd1, cmd2, cmd3 = [x[1]['args'] for x in mockPopen.call_args_list]

            self.assertEqual(cmd1, ('docker', 'pull', 'test/test:latest'))
            self.assertEqual(cmd2[:3],
                             ['docker', 'run', '-v'])
            six.assertRegex(self, cmd2[3], _tmp + '/.*:%s' % DATA_VOLUME)
            self.assertEqual(cmd2[4], '-v')
            six.assertRegex(self, cmd2[5],
                            '%s:%s:ro' % (SCRIPTS_DIR, SCRIPTS_VOLUME))
            self.assertEqual(cmd2[6:9], [
                '--entrypoint',
                '%s/entrypoint.sh' % SCRIPTS_VOLUME,
                'test/test:latest'
            ])
            self.assertEqual(cmd2[9:15], [
                str(os.getuid()), str(os.getgid()),
                '/usr/bin/foo', '--flag', '-f', '%s/file.txt' % DATA_VOLUME])
            self.assertEqual(cmd2[-1], '--temp-dir=%s' % DATA_VOLUME)
            self.assertEqual(len(cmd2), 16)

            self.assertEqual(len(cmd3), 1)
            six.assertRegex(self, cmd3[0], 'docker-gc$')

            # Make sure we can specify a custom entrypoint to the container
            mockPopen.reset_mock()
            task['entrypoint'] = '/bin/bash'

            # Make sure additional docker run args work
            task['docker_run_args'] = ['--net', 'none']

            inputs['foo'] = {
                'mode': 'http',
                'url': 'https://foo.com/file.txt'
            }
            out = run(task, inputs=inputs, validate=False,
                      auto_convert=False)
            self.assertEqual(mockPopen.call_count, 3)
            cmd2 = mockPopen.call_args_list[1][1]['args']
            self.assertEqual(cmd2[6:11], [
                '--entrypoint',
                '%s/entrypoint.sh' % SCRIPTS_VOLUME,
                '--net',
                'none',
                'test/test:latest'
            ])
            self.assertEqual(cmd2[11:16], [
                str(os.getuid()), str(os.getgid()),
                '/bin/bash', '-f', '%s/file.txt' % DATA_VOLUME])

            mockPopen.reset_mock()
            # Make sure custom config settings are respected
            girder_worker.config.set('docker', 'cache_timeout', '123456')
            girder_worker.config.set(
                'docker', 'exclude_images', 'test/test:latest')

            # Make sure we can skip pulling the image
            task['pull_image'] = False
            inputs['foo'] = {
                'mode': 'http',
                'url': 'https://foo.com/file.txt'
            }
            out = run(task, inputs=inputs, validate=False,
                      auto_convert=False)
            self.assertEqual(mockPopen.call_count, 2)
            cmd1, cmd2 = [x[1]['args'] for x in mockPopen.call_args_list]
            self.assertEqual(tuple(cmd1[:2]), ('docker', 'run'))
            self.assertEqual(cmd1[8:10], ['--net', 'none'])
            six.assertRegex(self, cmd2[0], 'docker-gc$')
            env = mockPopen.call_args_list[1][1]['env']
            self.assertEqual(env['GRACE_PERIOD_SECONDS'], '123456')
            six.assertRegex(self, env['EXCLUDE_FROM_GC'],
                            'docker_gc_scratch/.docker-gc-exclude$')

Example 25

Project: gkno_launcher
Source File: helpInformation.py
View license
  def pipelineHelp(self, superpipeline, graph, arguments, gknoArguments):

    # Write out general header information.
    print(file = sys.stdout)
    self.writeSimpleLine((len(superpipeline.pipeline) + 28) * '=', isIndent = False, noLeadingTabs = 0)
    self.writeSimpleLine('gkno pipeline usage - ' + superpipeline.pipeline, isIndent = True, noLeadingTabs = 0)
    self.writeSimpleLine((len(superpipeline.pipeline) + 28) * '=', isIndent = False, noLeadingTabs = 0)
    print(file = sys.stdout)
    self.writeSimpleLine('Usage: gkno ' + superpipeline.pipeline + ' [options]', isIndent = False, noLeadingTabs = 0)
    print(file = sys.stdout)

    # Print out the decription of the pipeline.
    self.writeSimpleLine('Description:', isIndent = False, noLeadingTabs = 0)
    self.writeSimpleLine(superpipeline.pipelineConfigurationData[superpipeline.pipeline].description, isIndent = False, noLeadingTabs = 1)
    print(file = sys.stdout)

    # Write out the pipeline workflow.
    self.writeSimpleLine('Workflow:', isIndent = False, noLeadingTabs = 0)
    length = len(max(graph.workflow, key = len)) + 3
    for task in graph.workflow:
      tool        = superpipeline.tasks[task]
      description = superpipeline.toolConfigurationData[tool].description
      self.writeComplexLine([task + ':', description + ' [Tool: ' + tool + ']'], [length, 0], noLeadingTabs = 1)
    print(file = sys.stdout)

    # List all the available parameter sets.
    parameterSets = superpipeline.pipelineConfigurationData[superpipeline.pipeline].getParameterSetNames()
    if parameterSets:
      self.writeSimpleLine('Parameter sets:', isIndent = False, noLeadingTabs = 0)

      # Loop over the parameter sets and get their descriptions.
      length = len(max(parameterSets, key = len)) + 3

      # Include the 'none' parameter set.
      description = 'Do not include any parameter set information.'
      self.writeComplexLine(['none:', description], [length, 0], noLeadingTabs = 1)
      for parameterSet in parameterSets:
        description = superpipeline.pipelineConfigurationData[superpipeline.pipeline].getParameterSetDescription(parameterSet)
        self.writeComplexLine([parameterSet + ':', description], [length, 0], noLeadingTabs = 1)
      print(file = sys.stdout)

    # Loop over all of the available arguments.
    argumentHelp    = {}
    argumentLengths = {}
    for argument in arguments:

      # If this argument was imported from a constituent tool, check to see if the tool highlights this
      # argument as hidden from the user.
      hideInHelp = False
      if arguments[argument].isImported:
        task       = arguments[argument].importedFromTask
        tool       = graph.getGraphNodeAttribute(task, 'tool')
        toolData   = superpipeline.getToolData(tool)
        hideInHelp = toolData.getArgumentAttribute(argument, 'hideInHelp')

      # Only process arguments for the top level pipeline (e.g. not for arguments that include
      # the address of the tool/pipeline that they point to). Also, do not include arguments that
      # have been marked in the configuration file as not to be included in the help message.
      if '.' not in argument and not arguments[argument].hideInHelp and not hideInHelp:
        graphNodeIds      = arguments[argument].graphNodeIds
        category          = arguments[argument].category
        dataType          = arguments[argument].dataType
        description       = arguments[argument].description
        shortFormArgument = arguments[argument].shortFormArgument

        # Determine if any of the nodes connected with this argument list the argument as required. If the node
        # already has values, they are coming from a parameter set. If this is required, make it clear that it
        # isn't necessary to define the value.
        isRequired = False
        for graphNodeId in graphNodeIds:
          isRequired = True if (graph.getGraphNodeAttribute(graphNodeId, 'isRequired') or isRequired) else False
          hasValues  = True if graph.getGraphNodeAttribute(graphNodeId, 'values') else False

        # if the argument is required, add [REQUIRED] to the end of the description.
        if isRequired:
          if hasValues: description += ' [REQUIRED AND SET]'
          else: description += ' [REQUIRED]'

        # Build the argument string to write to the screen.
        argumentString = str(argument + ' (' + shortFormArgument + '):')

        # Add the information to a list.
        if category not in argumentHelp: argumentHelp[category] = []
        if category not in argumentLengths: argumentLengths[category] = [0, 0, 0]
        argumentHelp[category].append([argumentString, dataType, description])

        # Update the lengths of the longest argument string and data type.
        if len(argumentString) + 3 > argumentLengths[category][0]: argumentLengths[category][0] = len(argumentString) + 3
        if dataType:
          if len(dataType) + 3 > argumentLengths[category][1]: argumentLengths[category][1] = len(dataType) + 3

    # Print the arguments to screen, starting with the inputs.
    if 'Inputs' in argumentHelp:
      self.writeSimpleLine('Inputs', isIndent = False, noLeadingTabs = 0)
      for argumentInformation in sorted(argumentHelp['Inputs']):
        self.writeComplexLine(argumentInformation, argumentLengths['Inputs'], noLeadingTabs = 1)
      print(file = sys.stdout)

    # Then the outputs.
    if 'Outputs' in argumentHelp:
      self.writeSimpleLine('Outputs', isIndent = False, noLeadingTabs = 0)
      for argumentInformation in sorted(argumentHelp['Outputs']):
        self.writeComplexLine(argumentInformation, argumentLengths['Outputs'], noLeadingTabs = 1)
      print(file = sys.stdout)

    # Then all other categories.
    for category in argumentHelp:
      if category != 'Inputs' and category != 'Outputs':
        self.writeSimpleLine(category, isIndent = False, noLeadingTabs = 0)
        for argumentInformation in sorted(argumentHelp[category]):
          self.writeComplexLine(argumentInformation, argumentLengths[category], noLeadingTabs = 1)
        print(file = sys.stdout)

    # If gkno specific arguments are to be displayed, display them after all of the pipeline arguments.
    if gknoArguments: self.gknoArgumentHelp(gknoArguments)

    # Write out all of the values included in the selected parameter set, if there are any.
    self.parameterSets(graph, superpipeline, arguments)

    # Terminate.
    exit(0)

Example 26

Project: jhbuild
Source File: goalreport.py
View license
    def run(self, config, options, args, help=None):
        if options.output:
            output = StringIO()
            global curses
            if curses and config.progress_bar:
                try:
                    curses.setupterm()
                except:
                    curses = None
        else:
            output = sys.stdout

        if not self.checks:
            self.load_checks_from_options(options.checks)

        self.load_bugs(options.bugfile)
        self.load_false_positives(options.falsepositivesfile)

        config.devhelp_dirname = options.devhelp_dirname
        config.partial_build = False

        module_set = jhbuild.moduleset.load(config)
        if options.list_all_modules:
            self.module_list = module_set.modules.values()
        else:
            self.module_list = module_set.get_module_list(args or config.modules, config.skip)

        results = {}
        try:
            cachedir = os.path.join(os.environ['XDG_CACHE_HOME'], 'jhbuild')
        except KeyError:
            cachedir = os.path.join(os.environ['HOME'], '.cache','jhbuild')
        if options.cache:
            try:
                results = cPickle.load(file(os.path.join(cachedir, options.cache)))
            except:
                pass

        self.repeat_row_header = 0
        if len(self.checks) > 4:
            self.repeat_row_header = 1

        for module_num, mod in enumerate(self.module_list):
            if mod.type in ('meta', 'tarball'):
                continue
            if not mod.branch or not mod.branch.repository.__class__.__name__ in (
                    'SubversionRepository', 'GitRepository'):
                if not mod.moduleset_name.startswith('gnome-external-deps'):
                    continue

            if not os.path.exists(mod.branch.srcdir):
                continue

            tree_id = mod.branch.tree_id()
            valid_cache = (tree_id and results.get(mod.name, {}).get('tree-id') == tree_id)

            if not mod.name in results:
                results[mod.name] = {
                    'results': {}
                }
            results[mod.name]['tree-id'] = tree_id
            r = results[mod.name]['results']
            for check in self.checks:
                if valid_cache and check.__name__ in r:
                    continue
                try:
                    c = check(config, mod)
                except ExcludedModuleException:
                    continue

                if output != sys.stdout and config.progress_bar:
                    progress_percent = 1.0 * (module_num-1) / len(self.module_list)
                    msg = '%s: %s' % (mod.name, check.__name__)
                    self.display_status_line(progress_percent, module_num, msg)

                try:
                    c.run()
                except CouldNotPerformCheckException:
                    continue
                except ExcludedModuleException:
                    continue

                try:
                    c.fix_false_positive(self.false_positives.get((mod.name, check.__name__)))
                except ExcludedModuleException:
                    continue

                r[check.__name__] = [c.status, c.complexity, c.result_comment]

        if not os.path.exists(cachedir):
            os.makedirs(cachedir)
        if options.cache:
            cPickle.dump(results, file(os.path.join(cachedir, options.cache), 'w'))

        print >> output, HTML_AT_TOP % {'title': self.title}
        if self.page_intro:
            print >> output, self.page_intro
        print >> output, '<table>'
        print >> output, '<thead>'
        print >> output, '<tr><td></td>'
        for check in self.checks:
            print >> output, '<th>%s</th>' % check.__name__
        print >> output, '<td></td></tr>'
        if [x for x in self.checks if x.header_note]:
            print >> output, '<tr><td></td>'
            for check in self.checks:
                print >> output, '<td>%s</td>' % (check.header_note or '')
            print >> output, '</tr>'
        print >> output, '</thead>'
        print >> output, '<tbody>'

        suites = []
        for module_key, module in module_set.modules.items():
            if not isinstance(module_set.get_module(module_key), MetaModule):
                continue
            if module_key.endswith('upcoming-deprecations'):
                # mark deprecated modules as processed, so they don't show in "Others"
                try:
                    metamodule = module_set.get_module(meta_key)
                except KeyError:
                    continue
                for module_name in metamodule.dependencies:
                    processed_modules[module_name] = True
            else:
                suites.append([module_key, module_key.replace('meta-', '')])

        processed_modules = {'gnome-common': True}

        not_other_module_names = []
        for suite_key, suite_label in suites:
            metamodule = module_set.get_module(suite_key)
            module_names = [x for x in metamodule.dependencies if x in results]
            if not module_names:
                continue
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                    1+len(self.checks)+self.repeat_row_header, suite_label)
            for module_name in module_names:
                if module_name in not_other_module_names:
                    continue
                r = results[module_name].get('results')
                print >> output, self.get_mod_line(module_name, r)
                processed_modules[module_name] = True
            not_other_module_names.extend(module_names)

        external_deps = [x for x in results.keys() if \
                         x in [y.name for y in self.module_list] and \
                         not x in processed_modules and \
                         module_set.get_module(x).moduleset_name.startswith('gnome-external-deps')]
        if external_deps:
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                    1+len(self.checks)+self.repeat_row_header, 'External Dependencies')
            for module_name in sorted(external_deps):
                if not module_name in results:
                    continue
                r = results[module_name].get('results')
                try:
                    version = module_set.get_module(module_name).branch.version
                except:
                    version = None
                print >> output, self.get_mod_line(module_name, r, version_number=version)

        other_module_names = [x for x in results.keys() if \
                              not x in processed_modules and not x in external_deps]
        if other_module_names:
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                    1+len(self.checks)+self.repeat_row_header, 'Others')
            for module_name in sorted(other_module_names):
                if not module_name in results:
                    continue
                r = results[module_name].get('results')
                print >> output, self.get_mod_line(module_name, r)
        print >> output, '</tbody>'
        print >> output, '<tfoot>'

        print >> output, '<tr><td></td>'
        for check in self.checks:
            print >> output, '<th>%s</th>' % check.__name__
        print >> output, '<td></td></tr>'

        print >> output, self.get_stat_line(results, not_other_module_names)
        print >> output, '</tfoot>'
        print >> output, '</table>'

        if (options.bugfile and options.bugfile.startswith('http://')) or \
                (options.falsepositivesfile and options.falsepositivesfile.startswith('http://')):
            print >> output, '<div id="data">'
            print >> output, '<p>The following data sources are used:</p>'
            print >> output, '<ul>'
            if options.bugfile.startswith('http://'):
                print >> output, '  <li><a href="%s">Bugs</a></li>' % options.bugfile
            if options.falsepositivesfile.startswith('http://'):
                print >> output, '  <li><a href="%s">False positives</a></li>' % options.falsepositivesfile
            print >> output, '</ul>'
            print >> output, '</div>'

        print >> output, '<div id="footer">'
        print >> output, 'Generated:', time.strftime('%Y-%m-%d %H:%M:%S %z')
        print >> output, 'on ', socket.getfqdn()
        print >> output, '</div>'

        print >> output, '</body>'
        print >> output, '</html>'

        if output != sys.stdout:
            file(options.output, 'w').write(output.getvalue())

        if output != sys.stdout and config.progress_bar:
            sys.stdout.write('\n')
            sys.stdout.flush()

Example 27

Project: gprMax
Source File: gprMax.py
View license
def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
    """Runs a model - processes the input file; builds the Yee cells; calculates update coefficients; runs main FDTD loop.

    Args:
        args (dict): Namespace with command line arguments
        modelrun (int): Current model run number.
        numbermodelruns (int): Total number of model runs.
        inputfile (str): Name of the input file to open.
        usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.

    Returns:
        tsolve (int): Length of time (seconds) of main FDTD calculations
    """

    # Monitor memory usage
    p = psutil.Process()

    # Declare variable to hold FDTDGrid class
    global G

    # Normal model reading/building process; bypassed if geometry information to be reused
    if 'G' not in globals():
        inputfilestr = '\n--- Model {} of {}, input file: {}'.format(modelrun, numbermodelruns, inputfile)
        print(Fore.GREEN + '{} {}\n'.format(inputfilestr, '-' * (get_terminal_width() - 1 - len(inputfilestr))) + Style.RESET_ALL)

        # Add the current model run to namespace that can be accessed by user in any Python code blocks in input file
        usernamespace['current_model_run'] = modelrun

        # Read input file and process any Python or include commands
        processedlines = process_python_include_code(inputfile, usernamespace)

        # Print constants/variables in user-accessable namespace
        uservars = ''
        for key, value in sorted(usernamespace.items()):
            if key != '__builtins__':
                uservars += '{}: {}, '.format(key, value)
        print('Constants/variables used/available for Python scripting: {{{}}}\n'.format(uservars[:-2]))

        # Write a file containing the input commands after Python or include commands have been processed
        if args.write_processed:
            write_processed_file(inputfile, modelrun, numbermodelruns, processedlines)

        # Check validity of command names and that essential commands are present
        singlecmds, multicmds, geometry = check_cmd_names(processedlines)

        # Initialise an instance of the FDTDGrid class
        G = FDTDGrid()
        G.inputfilename = os.path.split(inputfile)[1]
        G.inputdirectory = os.path.dirname(os.path.abspath(inputfile))

        # Create built-in materials
        m = Material(0, 'pec')
        m.se = float('inf')
        m.type = 'builtin'
        m.averagable = False
        G.materials.append(m)
        m = Material(1, 'free_space')
        m.type = 'builtin'
        G.materials.append(m)

        # Process parameters for commands that can only occur once in the model
        process_singlecmds(singlecmds, G)

        # Process parameters for commands that can occur multiple times in the model
        print()
        process_multicmds(multicmds, G)

        # Initialise an array for volumetric material IDs (solid), boolean arrays for specifying materials not to be averaged (rigid),
        # an array for cell edge IDs (ID)
        G.initialise_geometry_arrays()

        # Initialise arrays for the field components
        G.initialise_field_arrays()

        # Process geometry commands in the order they were given
        process_geometrycmds(geometry, G)

        # Build the PMLs and calculate initial coefficients
        print()
        if all(value == 0 for value in G.pmlthickness.values()):
            if G.messages:
                print('PML boundaries: switched off')
            pass # If all the PMLs are switched off don't need to build anything
        else:
            if G.messages:
                if all(value == G.pmlthickness['xminus'] for value in G.pmlthickness.values()):
                    pmlinfo = G.pmlthickness['xminus']
                else:
                    pmlinfo = ''
                    for key, value in G.pmlthickness.items():
                        pmlinfo += '{}: {}, '.format(key, value)
                    pmlinfo = pmlinfo[:-2]
                print('PML boundaries: {} cells'.format(pmlinfo))
            pbar = tqdm(total=sum(1 for value in G.pmlthickness.values() if value > 0), desc='Building PML boundaries', ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
            build_pmls(G, pbar)
            pbar.close()

        # Build the model, i.e. set the material properties (ID) for every edge of every Yee cell
        print()
        pbar = tqdm(total=2, desc='Building main grid', ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
        build_electric_components(G.solid, G.rigidE, G.ID, G)
        pbar.update()
        build_magnetic_components(G.solid, G.rigidH, G.ID, G)
        pbar.update()
        pbar.close()

        # Process any voltage sources (that have resistance) to create a new material at the source location
        for voltagesource in G.voltagesources:
            voltagesource.create_material(G)

        # Initialise arrays of update coefficients to pass to update functions
        G.initialise_std_update_coeff_arrays()

        # Initialise arrays of update coefficients and temporary values if there are any dispersive materials
        if Material.maxpoles != 0:
            G.initialise_dispersive_arrays()

        # Process complete list of materials - calculate update coefficients, store in arrays, and build text list of materials/properties
        materialsdata = process_materials(G)
        if G.messages:
            materialstable = AsciiTable(materialsdata)
            materialstable.outer_border = False
            materialstable.justify_columns[0] = 'right'
            print(materialstable.table)

        # Check to see if numerical dispersion might be a problem
        results = dispersion_analysis(G)
        if results['deltavp'] and np.abs(results['deltavp']) > G.maxnumericaldisp:
            print(Fore.RED + "\nWARNING: Potentially significant numerical dispersion. Largest physical phase-velocity error is {:.2f}% in material '{}' with wavelength sampled by {} cells (maximum significant frequency {:g}Hz)".format(results['deltavp'], results['material'].ID, round_value(results['N']), results['maxfreq']) + Style.RESET_ALL)
        elif results['deltavp']:
            print("\nNumerical dispersion analysis: largest physical phase-velocity error is {:.2f}% in material '{}' with wavelength sampled by {} cells (maximum significant frequency {:g}Hz)".format(results['deltavp'], results['material'].ID, round_value(results['N']), results['maxfreq']))

    # If geometry information to be reused between model runs
    else:
        inputfilestr = '\n--- Model {} of {}, input file (not re-processed, i.e. geometry fixed): {}'.format(modelrun, numbermodelruns, inputfile)
        print(Fore.GREEN + '{} {}\n'.format(inputfilestr, '-' * (get_terminal_width() - 1 - len(inputfilestr))) + Style.RESET_ALL)

        # Clear arrays for field components
        G.initialise_field_arrays()

        # Clear arrays for fields in PML
        for pml in G.pmls:
            pml.initialise_field_arrays()

    # Adjust position of simple sources and receivers if required
    if G.srcsteps[0] > 0 or G.srcsteps[1] > 0 or G.srcsteps[2] > 0:
        for source in itertools.chain(G.hertziandipoles, G.magneticdipoles):
            if modelrun == 1:
                if source.xcoord + G.srcsteps[0] * (numbermodelruns - 1) > G.nx or source.ycoord + G.srcsteps[1] * (numbermodelruns - 1) > G.ny or source.zcoord + G.srcsteps[2] * (numbermodelruns - 1) > G.nz:
                    raise GeneralError('Source(s) will be stepped to a position outside the domain.')
            source.xcoord = source.xcoordorigin + (modelrun - 1) * G.srcsteps[0]
            source.ycoord = source.ycoordorigin + (modelrun - 1) * G.srcsteps[1]
            source.zcoord = source.zcoordorigin + (modelrun - 1) * G.srcsteps[2]
    if G.rxsteps[0] > 0 or G.rxsteps[1] > 0 or G.rxsteps[2] > 0:
        for receiver in G.rxs:
            if modelrun == 1:
                if receiver.xcoord + G.rxsteps[0] * (numbermodelruns - 1) > G.nx or receiver.ycoord + G.rxsteps[1] * (numbermodelruns - 1) > G.ny or receiver.zcoord + G.rxsteps[2] * (numbermodelruns - 1) > G.nz:
                    raise GeneralError('Receiver(s) will be stepped to a position outside the domain.')
            receiver.xcoord = receiver.xcoordorigin + (modelrun - 1) * G.rxsteps[0]
            receiver.ycoord = receiver.ycoordorigin + (modelrun - 1) * G.rxsteps[1]
            receiver.zcoord = receiver.zcoordorigin + (modelrun - 1) * G.rxsteps[2]

    # Write files for any geometry views and geometry object outputs
    if not (G.geometryviews or G.geometryobjectswrite) and args.geometry_only:
        print(Fore.RED + '\nWARNING: No geometry views or geometry objects to output found.' + Style.RESET_ALL)
    if G.geometryviews:
        print()
        for i, geometryview in enumerate(G.geometryviews):
            geometryview.set_filename(modelrun, numbermodelruns, G)
            pbar = tqdm(total=geometryview.datawritesize, unit='byte', unit_scale=True, desc='Writing geometry view file {} of {}, {}'.format(i + 1, len(G.geometryviews), os.path.split(geometryview.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
            geometryview.write_vtk(modelrun, numbermodelruns, G, pbar)
            pbar.close()
    if G.geometryobjectswrite:
        
        for i, geometryobject in enumerate(G.geometryobjectswrite):
            pbar = tqdm(total=geometryobject.datawritesize, unit='byte', unit_scale=True, desc='Writing geometry object file {} of {}, {}'.format(i + 1, len(G.geometryobjectswrite), os.path.split(geometryobject.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
            geometryobject.write_hdf5(G, pbar)
            pbar.close()

    # Run simulation (if not doing geometry only)
    if not args.geometry_only:

        # Prepare any snapshot files
        for snapshot in G.snapshots:
            snapshot.prepare_vtk_imagedata(modelrun, numbermodelruns, G)

        # Output filename
        inputfileparts = os.path.splitext(inputfile)
        if numbermodelruns == 1:
            outputfile = inputfileparts[0] + '.out'
        else:
            outputfile = inputfileparts[0] + str(modelrun) + '.out'
        print('\nOutput file: {}\n'.format(outputfile))

        ####################################
        #  Start - Main FDTD calculations  #
        ####################################
        tsolvestart = perf_counter()

        # Absolute time
        abstime = 0

        for timestep in tqdm(range(G.iterations), desc='Running simulation, model ' + str(modelrun) + ' of ' + str(numbermodelruns), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable):
            # Store field component values for every receiver and transmission line
            store_outputs(timestep, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G)

            # Write any snapshots to file
            for i, snap in enumerate(G.snapshots):
                if snap.time == timestep + 1:
                    snapiters = 36 * (((snap.xf - snap.xs) / snap.dx) * ((snap.yf - snap.ys) / snap.dy) * ((snap.zf - snap.zs) / snap.dz))
                    pbar = tqdm(total=snapiters, leave=False, unit='byte', unit_scale=True, desc='  Writing snapshot file {} of {}, {}'.format(i + 1, len(G.snapshots), os.path.split(snap.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
                    snap.write_vtk_imagedata(G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G, pbar)
                    pbar.close()

            # Update electric field components
            if Material.maxpoles == 0:  # All materials are non-dispersive so do standard update
                update_electric(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsE, G.ID, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)
            elif Material.maxpoles == 1:  # If there are any dispersive materials do 1st part of dispersive update (it is split into two parts as it requires present and updated electric field values).
                update_electric_dispersive_1pole_A(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsE, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)
            elif Material.maxpoles > 1:
                update_electric_dispersive_multipole_A(G.nx, G.ny, G.nz, G.nthreads, Material.maxpoles, G.updatecoeffsE, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)

            # Update electric field components with the PML correction
            for pml in G.pmls:
                pml.update_electric(G)

            # Update electric field components from sources (update any Hertzian dipole sources last)
            for source in G.voltagesources + G.transmissionlines + G.hertziandipoles:
                source.update_electric(abstime, G.updatecoeffsE, G.ID, G.Ex, G.Ey, G.Ez, G)

            # If there are any dispersive materials do 2nd part of dispersive update (it is split into two parts as it requires present and updated electric field values). Therefore it can only be completely updated after the electric field has been updated by the PML and source updates.
            if Material.maxpoles == 1:
                update_electric_dispersive_1pole_B(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez)
            elif Material.maxpoles > 1:
                update_electric_dispersive_multipole_B(G.nx, G.ny, G.nz, G.nthreads, Material.maxpoles, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez)

            # Increment absolute time value
            abstime += 0.5 * G.dt

            # Update magnetic field components
            update_magnetic(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsH, G.ID, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)

            # Update magnetic field components with the PML correction
            for pml in G.pmls:
                pml.update_magnetic(G)

            # Update magnetic field components from sources
            for source in G.transmissionlines + G.magneticdipoles:
                source.update_magnetic(abstime, G.updatecoeffsH, G.ID, G.Hx, G.Hy, G.Hz, G)

            # Increment absolute time value
            abstime += 0.5 * G.dt

        tsolve = int(perf_counter() - tsolvestart)

        # Write an output file in HDF5 format
        write_hdf5_outputfile(outputfile, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G)

        ##################################
        #  End - Main FDTD calculations  #
        ##################################

    if G.messages:
        print('Memory (RAM) used: ~{}'.format(human_size(p.memory_info().rss)))

    # If geometry information to be reused between model runs then FDTDGrid class instance must be global so that it persists
    if not args.geometry_fixed:
        del G

    # Return time to complete solving if in benchmarking mode
    if args.benchmark:
        return tsolve

Example 28

Project: pymetawear
Source File: setup.py
View license
def build_solution():
    # Establish source paths.
    basedir = os.path.abspath(os.path.dirname(__file__))
    pkg_dir = os.path.join(basedir, 'pymetawear')
    path_to_metawear_python_wrappers = os.path.join(
        pkg_dir, 'Metawear-CppAPI', 'wrapper', 'python')

    if os.path.exists(os.path.join(basedir, '.git')):
        # The package was cloned from Github and the submodule can
        # therefore be brought in by Git methods.

        # Git submodule init
        p = subprocess.Popen(['git', 'submodule', 'init'],
                             cwd=basedir, stdout=sys.stdout, stderr=sys.stderr)
        p.communicate()

        # Git submodule update
        p = subprocess.Popen(['git', 'submodule', 'update'],
                             cwd=basedir, stdout=sys.stdout, stderr=sys.stderr)
        p.communicate()
    else:
        # The package was downloaded as zip or tar.gz from PyPI. It should
        # have the MetaWear-CppAPI folder bundled and the building can be done immediately.
        pass

    if platform.uname()[0] == 'Linux':
        arch = os.uname()[-1]
        if arch in ('x86_64', 'amd64'):
            dist_dir = 'x64'
        elif 'arm' in arch:
            dist_dir = 'arm'
        else:
            dist_dir = 'x86'

        # Run make file for MetaWear-CppAPI
        p = subprocess.Popen(
            ['make', 'clean'],
            cwd=os.path.join(pkg_dir, 'Metawear-CppAPI'),
            stdout=sys.stdout, stderr=sys.stderr)
        p.communicate()
        p = subprocess.Popen(
            ['make', 'build'],
            cwd=os.path.join(pkg_dir, 'Metawear-CppAPI'),
            stdout=sys.stdout, stderr=sys.stderr)
        p.communicate()

        path_to_dist_dir = os.path.join(
            pkg_dir, 'Metawear-CppAPI', 'dist', 'release', 'lib', dist_dir)

        for f in [s for s in os.listdir(pkg_dir) if s.startswith('libmetawear')]:
            os.remove(os.path.join(pkg_dir, f))

        symlinks_to_create = []
        # Copy the built shared library to pymetawear folder.
        for dist_file in glob.glob(path_to_dist_dir + "/libmetawear.*"):
            if os.path.islink(dist_file):
                symlinks_to_create.append(
                    (os.path.basename(os.readlink(dist_file)),
                     os.path.basename(dist_file)))
            else:
                destination_file = os.path.join(
                    pkg_dir, os.path.basename(dist_file))
                shutil.copy(dist_file, destination_file)

        # Create symlinks for the libmetawear shared library.
        for symlink_src, symlink_dest in symlinks_to_create:
            destination_symlink = os.path.join(pkg_dir, symlink_dest)
            os.symlink(symlink_src, destination_symlink)

    elif platform.uname()[0] == 'Windows':
        arch = platform.architecture()[0]
        if arch == '32bit':
            dist_dir = 'Win32'
            msbuild_file = 'MetaWear.Win32.vcxproj'
            build_options = '/p:Configuration=Release;Platform=Win32'
        elif 'arm' in arch:
            dist_dir = 'ARM'
            msbuild_file = 'MetaWear.WinRT.vcxproj'
            build_options = '/p:Configuration=Release;Platform=ARM'
        else:
            dist_dir = 'x64'
            msbuild_file = 'MetaWear.Win32.vcxproj'
            build_options = '/p:Configuration=Release;Platform=x64'

        # Run msbuild file for MetaWear-CppAPI
        vsvars_file = glob.glob('c:\\Progr*/**/**/Too*/vsvars32.bat')[0]
        p = subprocess.Popen('"{0}" & MSBuild.exe {1} {2}'.format(
            vsvars_file, msbuild_file, build_options),
            cwd=os.path.join(pkg_dir, 'Metawear-CppAPI'),
            stdout=sys.stdout, stderr=sys.stderr)
        p.communicate()

        for f in [s for s in os.listdir(pkg_dir) if (s.startswith('MetaWear') and s.endswith('.dll'))]:
            os.remove(os.path.join(pkg_dir, f))

        path_to_dist_dir = os.path.join(
            pkg_dir, 'Metawear-CppAPI', 'dist', 'Release', 'lib', dist_dir)

        # Copy the built shared library to pymetawear folder.
        for dist_file in glob.glob(path_to_dist_dir + "/MetaWear.*.dll"):
            destination_file = os.path.join(
                pkg_dir, os.path.basename(dist_file))
            shutil.copy(dist_file, destination_file)
    else:
        raise NotImplementedError("Building on this platform is not implemented.")

    # Copy the Mbientlab Python wrappers to pymetawear folder.
    # First create folders if needed.
    try:
        os.makedirs(os.path.join(pkg_dir, 'mbientlab', 'metawear'))
    except:
        pass

    init_files_to_create = [
        os.path.join(pkg_dir, 'mbientlab', '__init__.py'),
        os.path.join(pkg_dir, 'mbientlab', 'metawear', '__init__.py')
    ]
    for init_file in init_files_to_create:
        with open(init_file, 'w') as f:
            f.write("#!/usr/bin/env python\n# -*- coding: utf-8 -*-")

    # Copy all Python files from the MetWear C++ API Python wrapper
    for pth, _, pyfiles in os.walk(
            os.path.join(path_to_metawear_python_wrappers,
                         'mbientlab', 'metawear')):
        for py_file in filter(lambda x: os.path.splitext(x)[1] == '.py', pyfiles):
            try:
                shutil.copy(
                    os.path.join(pth, py_file),
                    os.path.join(pkg_dir, 'mbientlab', 'metawear', py_file))
            except:
                pass

Example 29

Project: hitchtest
Source File: suite.py
View license
    def run(self, quiet=False):
        """Run all tests in the defined suite of modules."""
        tests = self.tests()
        failedfast = False
        result_list = []

        for test in tests:
            if quiet:
                hijacked_stdout = sys.stdout
                hijacked_stderr = sys.stderr
                sys.stdout = open(path.join(self.settings['engine_folder'], ".hitch", "test.out"), "ab", 0)
                sys.stderr = open(path.join(self.settings['engine_folder'], ".hitch", "test.err"), "ab", 0)

            def run_test_in_separate_process(file_descriptor_stdin, result_queue):
                """Change process group, run test and return result via a queue."""
                orig_pgid = os.getpgrp()
                os.setpgrp()
                result_queue.put("pgrp")
                if not quiet:
                    sys.stdin = os.fdopen(file_descriptor_stdin)
                result = test.run()
                result_queue.put(result)
                if not quiet:
                    try:
                        os.tcsetpgrp(file_descriptor_stdin, orig_pgid)
                    except OSError as error:
                        if error.args[0] == 25:
                            pass

            if not quiet:
                try:
                    orig_stdin_termios = termios.tcgetattr(sys.stdin.fileno())
                except termios.error:
                    orig_stdin_termios = None
                orig_stdin_fileno = sys.stdin.fileno()
            orig_pgid = os.getpgrp()

            file_descriptor_stdin = sys.stdin.fileno()
            result_queue = multiprocessing.Queue()


            # Start new process to run test in, to isolate it from future test runs
            test_process = multiprocessing.Process(
                target=run_test_in_separate_process,
                args=(file_descriptor_stdin, result_queue)
            )

            test_timed_out = False

            test_process.start()

            # Ignore all exit signals but pass them on
            signal_pass_on_to_separate_process_group(test_process.pid)

            # Wait until PGRP is changed
            result_queue.get()

            # Make stdin go to the test process so that you can use ipython, etc.
            if not quiet:
                try:
                    os.tcsetpgrp(file_descriptor_stdin, os.getpgid(test_process.pid))
                except OSError as error:
                    if error.args[0] == 25:
                        pass

            # Wait until process has finished
            proc = psutil.Process(test_process.pid)
            test_timeout = self.settings.get("test_timeout", None)
            test_shutdown_timeout = self.settings.get("test_shutdown_timeout", 10)

            try:
                proc.wait(timeout=test_timeout)
            except psutil.TimeoutExpired:
                test_timed_out = True
                proc.send_signal(signal.SIGTERM)

                try:
                    proc.wait(timeout=test_shutdown_timeout)
                except psutil.TimeoutExpired:
                    for child in proc.get_children(recursive=True):
                        child.send_signal(signal.SIGKILL)
                    proc.send_signal(signal.SIGKILL)


            # Take back signal handling from test running code
            signals_trigger_exit()


            try:
                result = result_queue.get_nowait()
            except multiprocessing.queues.Empty:
                result = Result(test, True, 0.0)

            if test_timed_out:
                result.aborted = False
            result_list.append(result)

            if not quiet and orig_stdin_termios is not None:
                try:
                    termios.tcsetattr(orig_stdin_fileno, termios.TCSANOW, orig_stdin_termios)
                except termios.error as err:
                    # I/O error caused by another test stopping this one
                    if err[0] == 5:
                        pass

            if quiet:
                sys.stdout = hijacked_stdout
                sys.stderr = hijacked_stderr

            if quiet and result is not None:
                if result.failure:
                    warn("X")
                else:
                    warn(".")

            if result.aborted:
                warn("Aborted\n")
                sys.exit(1)

            if self.settings.get('failfast', False) and result.failure:
                failedfast = True
                break
        return Results(result_list, failedfast, self.settings.get('colorless', False))

Example 30

Project: gpkit
Source File: geometric_program.py
View license
    def solve(self, solver=None, verbosity=1, *args, **kwargs):
        """Solves a GeometricProgram and returns the solution.

        Arguments
        ---------
        solver : str or function (optional)
            By default uses one of the solvers found during installation.
            If set to "mosek", "mosek_cli", or "cvxopt", uses that solver.
            If set to a function, passes that function cs, A, p_idxs, and k.
        verbosity : int (optional)
            If greater than 0, prints solver name and solve time.
        *args, **kwargs :
            Passed to solver constructor and solver function.


        Returns
        -------
        result : dict
            A dictionary containing the translated solver result; keys below.

            cost : float
                The value of the objective at the solution.
            variables : dict
                The value of each variable at the solution.
            sensitivities : dict
                monomials : array of floats
                    Each monomial's dual variable value at the solution.
                posynomials : array of floats
                    Each posynomials's dual variable value at the solution.
        """
        def _get_solver(solver):
            """Get the solverfn and solvername associated with solver"""
            if solver is None:
                from . import settings
                solver = settings.get("default_solver", None)
                if not solver:
                    raise ValueError(
                        "No solver was given; perhaps gpkit was not properly"
                        " installed, or found no solvers during the"
                        " installation process.")

            if solver == "cvxopt":
                from ._cvxopt import cvxoptimize
                solverfn = cvxoptimize
            elif solver == "mosek_cli":
                from ._mosek import cli_expopt
                solverfn = cli_expopt.imize_fn(*args, **kwargs)
            elif solver == "mosek":
                from ._mosek import expopt
                solverfn = expopt.imize
            elif hasattr(solver, "__call__"):
                solverfn = solver
                solver = solver.__name__
            else:
                raise ValueError("Unknown solver '%s'." % solver)
            return solverfn, solver

        solverfn, solvername = _get_solver(solver)

        if verbosity > 0:
            print("Using solver '%s'" % solvername)
            print("Solving for %i variables." % len(self.varlocs))
            tic = time()

        default_kwargs = DEFAULT_SOLVER_KWARGS.get(solvername, {})
        for k in default_kwargs:
            kwargs.setdefault(k, default_kwargs[k])

        # NOTE: SIDE EFFECTS AS WE LOG SOLVER'S STDOUT AND OUTPUT
        original_stdout = sys.stdout
        self.solver_log = SolverLog(verbosity-1, original_stdout)
        try:
            sys.stdout = self.solver_log   # CAPTURED
            solver_out = solverfn(c=self.cs, A=self.A, p_idxs=self.p_idxs,
                                  k=self.k, *args, **kwargs)
            self.solver_out = solver_out
        finally:
            sys.stdout = original_stdout
         # STDOUT HAS BEEN RETURNED. ENDING SIDE EFFECTS.

        if verbosity > 0:
            soltime = time() - tic
            print("Solving took %.3g seconds." % (soltime,))
            tic = time()

        if solver_out.get("status", "").lower() != "optimal":
            raise RuntimeWarning(
                "final status of solver '%s' was '%s', not 'optimal'.\n\n"
                "The solver's result is stored in model.program.solver_out. "
                "A result dict can be generated via "
                "program._compile_result(program.solver_out)." %
                (solvername, solver_out.get("status", None)))

        self._generate_nula(solver_out)
        self.result = self._compile_result(solver_out)  # NOTE: SIDE EFFECTS
        if verbosity > 1:
            print ("result packing took %.2g%% of solve time" %
                   ((time() - tic) / soltime * 100))
            tic = time()

        self.check_solution(self.result["cost"], solver_out['primal'],
                            nu=solver_out["nu"], la=solver_out["la"])
        if verbosity > 1:
            print ("solution checking took %.2g%% of solve time" %
                   ((time() - tic) / soltime * 100))

        ## Let constraints process the results
        if hasattr(self.constraints, "process_result"):
            self.constraints.process_result(self.result)
        else:
            for constraint in self.constraints:
                if hasattr(constraint, "process_result"):
                    constraint.process_result(self.result)

        return self.result

Example 31

Project: corpkit
Source File: build.py
View license
def parse_corpus(proj_path=False, 
                 corpuspath=False, 
                 filelist=False, 
                 corenlppath=False, 
                 operations=False,
                 root=False, 
                 stdout=False, 
                 memory_mb=2000,
                 copula_head=True,
                 multiprocessing=False,
                 outname=False,
                 coref=True,
                 **kwargs
                ):
    """
    Create a CoreNLP-parsed and/or NLTK tokenised corpus
    """
    import subprocess
    from subprocess import PIPE, STDOUT, Popen
    from corpkit.process import get_corenlp_path
    import os
    import sys
    import re
    import chardet
    from time import localtime, strftime
    import time

    fileparse = kwargs.get('fileparse', False)
    url = 'http://nlp.stanford.edu/software/stanford-corenlp-full-2015-12-09.zip'
    
    if not check_jdk():
        print('Need latest Java.')
        return

    curdir = os.getcwd()
    note = kwargs.get('note', False)

    if proj_path is False:
        proj_path = os.path.dirname(os.path.abspath(corpuspath.rstrip('/')))

    basecp = os.path.basename(corpuspath)

    if fileparse:
        new_corpus_path = os.path.dirname(corpuspath)
    else:
        if outname:
            new_corpus_path = os.path.join(proj_path, 'data', outname)
        else:
            new_corpus_path = os.path.join(proj_path, 'data', '%s-parsed' % basecp)
            new_corpus_path = new_corpus_path.replace('-stripped-', '-')

    # todo:
    # this is not stable
    if os.path.join('data', 'data') in new_corpus_path:
        new_corpus_path = new_corpus_path.replace(os.path.join('data', 'data'), 'data')

    # this caused errors when multiprocessing
    # it used to be isdir, but supposedly there was a file there
    # i don't see how it's possible ...
    # i think it is a 'race condition', so we'll also put a try/except there
    
    if not os.path.exists(new_corpus_path):
        try:
            os.makedirs(new_corpus_path)
        except OSError:
            pass
    else:
        if not os.path.isfile(new_corpus_path):
            fs = get_filepaths(new_corpus_path, ext=False)
            if not multiprocessing:
                if any([f.endswith('.conll') for f in fs]):
                    print('Folder containing .conll files already exists: %s' % new_corpus_path)
                    return False
         
    corenlppath = get_corenlp_path(corenlppath)

    if not corenlppath:
        print("CoreNLP not found. Auto-installing.")
        cnlp_dir = os.path.join(os.path.expanduser("~"), 'corenlp')
        corenlppath, fpath = download_large_file(cnlp_dir, url,
                                                 root=root,
                                                 note=note,
                                                 actually_download=True,
                                                 custom_corenlp_dir=corenlppath)
        # cleanup
        if corenlppath is None and fpath is None:
            import shutil
            shutil.rmtree(new_corpus_path)
            shutil.rmtree(new_corpus_path.replace('-parsed', '-stripped'))
            os.remove(new_corpus_path.replace('-parsed', '-filelist.txt'))
            raise ValueError('CoreNLP needed to parse texts.')
        extract_cnlp(fpath)
        import glob
        globpath = os.path.join(corenlppath, 'stanford-corenlp*')
        corenlppath = [i for i in glob.glob(globpath) if os.path.isdir(i)]
        if corenlppath:
            corenlppath = corenlppath[-1]
        else:
            raise ValueError('CoreNLP installation failed for some reason. Try manual download.')

    # if not gui, don't mess with stdout
    if stdout is False:
        stdout = sys.stdout

    os.chdir(corenlppath)
    if root:
        root.update_idletasks()
        # not sure why reloading sys, but seems needed
        # in order to show files in the gui
        try:
            reload(sys)
        except NameError:
            import importlib
            importlib.reload(sys)
            pass
    if memory_mb is False:
        memory_mb = 2024

    # you can pass in 'coref' as kwarg now
    cof = ',dcoref' if coref else ''
    if operations is False:
        operations = 'tokenize,ssplit,pos,lemma,parse,ner' + cof

    if isinstance(operations, list):
        operations = ','.join([i.lower() for i in operations])

    with open(filelist, 'r') as fo:
        dat = fo.read()
    num_files_to_parse = len([l for l in dat.splitlines() if l])

    # get corenlp version number
    reg = re.compile(r'stanford-corenlp-([0-9].[0-9].[0-9])-javadoc.jar')
    fver = next(re.search(reg, s).group(1) for s in os.listdir('.') if re.search(reg, s))
    if fver == '3.6.0':
        extra_jar = 'slf4j-api.jar:slf4j-simple.jar:'
    else:
        extra_jar = ''

    out_form = 'xml' if kwargs.get('output_format') == 'xml' else 'json'
    out_ext = 'xml' if kwargs.get('output_format') == 'xml' else 'conll'

    arglist = ['java', '-cp', 
               'stanford-corenlp-%s.jar:stanford-corenlp-%s-models.jar:xom.jar:joda-time.jar:%sjollyday.jar:ejml-0.23.jar' % (fver, fver, extra_jar), 
               '-Xmx%sm' % str(memory_mb),
               'edu.stanford.nlp.pipeline.StanfordCoreNLP', 
               '-annotators',
               operations, 
               '-filelist', filelist,
               '-noClobber',
               '-outputExtension', '.%s' % out_ext,
               '-outputFormat', out_form,
               '-outputDirectory', new_corpus_path]
    if copula_head:
        arglist.append('--parse.flags')
        arglist.append(' -makeCopulaHead')
    try:
        proc = subprocess.Popen(arglist, stdout=sys.stdout)
    # maybe a problem with stdout. sacrifice it if need be
    except:
        proc = subprocess.Popen(arglist)            
    #p = TextProgressBar(num_files_to_parse)
    while proc.poll() is None:
        sys.stdout = stdout
        thetime = strftime("%H:%M:%S", localtime())
        if not fileparse:
            num_parsed = len([f for f in os.listdir(new_corpus_path) if f.endswith(out_ext)])  
            if num_parsed == 0:
                if root:
                    print('%s: Initialising parser ... ' % (thetime))
            if num_parsed > 0 and (num_parsed + 1) <= num_files_to_parse:
                if root:
                    print('%s: Parsing file %d/%d ... ' % \
                         (thetime, num_parsed + 1, num_files_to_parse))
                if kwargs.get('note'):
                    kwargs['note'].progvar.set((num_parsed) * 100.0 / num_files_to_parse)
                #p.animate(num_parsed - 1, str(num_parsed) + '/' + str(num_files_to_parse))
            time.sleep(1)
            if root:
                root.update()
    
    #p.animate(num_files_to_parse)
    if kwargs.get('note'):
        kwargs['note'].progvar.set(100)
    sys.stdout = stdout
    thetime = strftime("%H:%M:%S", localtime())
    print('%s: Parsing finished. Moving parsed files into place ...' % thetime)
    os.chdir(curdir)
    return new_corpus_path

Example 32

Project: word_cloud
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
    """ Generate the rst file for a given example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%s.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                               'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    if plot_gallery:
        # generate the plot as png image if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if (not os.path.exists(first_image_file) or
                os.stat(first_image_file).st_mtime <=
                                    os.stat(src_file).st_mtime):
            # We need to execute the code
            print 'plotting %s' % fname
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt, '__file__': src_file}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                # get variables so we can later add links to the documentation
                example_code_obj = {}
                for var_name, var in my_globals.iteritems():
                    if not hasattr(var, '__module__'):
                        continue
                    if not isinstance(var.__module__, basestring):
                        continue
                    if var.__module__.split('.')[0] not in DOCMODULES:
                        continue

                    # get the type as a string with other things stripped
                    tstr = str(type(var))
                    tstr = (tstr[tstr.find('\'')
                            + 1:tstr.rfind('\'')].split('.')[-1])
                    # get shortened module name
                    module_short = get_short_module_name(var.__module__,
                                                         tstr)
                    cobj = {'name': tstr, 'module': var.__module__,
                            'module_short': module_short,
                            'obj_type': 'object'}
                    example_code_obj[var_name] = cobj

                # find functions so we can later add links to the documentation
                funregex = re.compile('[\w.]+\(')
                with open(src_file, 'rt') as fid:
                    for line in fid.readlines():
                        if line.startswith('#'):
                            continue
                        for match in funregex.findall(line):
                            fun_name = match[:-1]
                            try:
                                exec('this_fun = %s' % fun_name, my_globals)
                            except Exception:
                                #print 'extracting function failed'
                                #print err
                                continue
                            this_fun = my_globals['this_fun']
                            if not callable(this_fun):
                                continue
                            if not hasattr(this_fun, '__module__'):
                                continue
                            if not isinstance(this_fun.__module__, basestring):
                                continue
                            if (this_fun.__module__.split('.')[0]
                                    not in DOCMODULES):
                                continue

                            # get shortened module name
                            fun_name_short = fun_name.split('.')[-1]
                            module_short = get_short_module_name(
                                this_fun.__module__, fun_name_short)
                            cobj = {'name': fun_name_short,
                                    'module': this_fun.__module__,
                                    'module_short': module_short,
                                    'obj_type': 'function'}
                            example_code_obj[fun_name] = cobj
                fid.close()

                if len(example_code_obj) > 0:
                    # save the dictionary, so we can later add hyperlinks
                    codeobj_fname = example_file[:-3] + '_codeobj.pickle'
                    with open(codeobj_fname, 'wb') as fid:
                        cPickle.dump(example_code_obj, fid,
                                     cPickle.HIGHEST_PROTOCOL)
                    fid.close()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                                            my_globals['__doc__'],
                                            '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                for fig_num in (fig_mngr.num for fig_mngr in
                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_num)
                    plt.savefig(image_path % fig_num)
                    figure_list.append(image_fname % fig_num)
            except:
                print 80 * '_'
                print '%s is not compiling:' % fname
                traceback.print_exc()
                print 80 * '_'
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print " - time elapsed : %.2g sec" % time_elapsed
        else:
            figure_list = [f[len(image_dir):]
                            for f in glob.glob(image_path % '[1-9]')]
                            #for f in glob.glob(image_path % '*')]

        # generate thumb file
        this_template = plot_rst_template
        if os.path.exists(first_image_file):
            make_thumbnail(first_image_file, thumb_file, 200, 140)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

Example 33

Project: deeppy
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.endswith('.py'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(stdout_path) or \
           os.stat(stdout_path).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            first_img = image_fname % 1
            make_thumbnail(first_image_file, thumb_file, 400, 280)

#    if not os.path.exists(thumb_file):
#        # create something to replace the thumbnail
#        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 34

Project: deeppy
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
    """ Generate the rst file for a given example.

    Returns the set of sklearn functions/classes imported in the example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%03d.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                             'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
    time_elapsed = 0
    if plot_gallery and fname.endswith('.py'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if not os.path.exists(stdout_path) or \
           os.stat(stdout_path).st_mtime <= os.stat(src_file).st_mtime:
            # We need to execute the code
            print('plotting %s' % fname)
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()
                my_stdout = my_stdout.strip().expandtabs()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
                for fig_mngr in fig_managers:
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    fig = plt.figure(fig_mngr.num)
                    kwargs = {}
                    to_rgba = matplotlib.colors.colorConverter.to_rgba
                    for attr in ['facecolor', 'edgecolor']:
                        fig_attr = getattr(fig, 'get_' + attr)()
                        default_attr = matplotlib.rcParams['figure.' + attr]
                        if to_rgba(fig_attr) != to_rgba(default_attr):
                            kwargs[attr] = fig_attr

                    fig.savefig(image_path % fig_mngr.num, **kwargs)
                    figure_list.append(image_fname % fig_mngr.num)
            except:
                print(80 * '_')
                print('%s is not compiling:' % fname)
                traceback.print_exc()
                print(80 * '_')
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print(" - time elapsed : %.2g sec" % time_elapsed)
        else:
            figure_list = [f[len(image_dir):]
                           for f in glob.glob(image_path.replace("%03d",
                                                '[0-9][0-9][0-9]'))]
        figure_list.sort()

        # generate thumb file
        this_template = plot_rst_template
        # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
        # which is within `auto_examples/../images/thumbs` depending on the example.
        # Because the carousel has different dimensions than those of the examples gallery,
        # I did not simply reuse them all as some contained whitespace due to their default gallery
        # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
        # just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
        # The special carousel thumbnails are written directly to _build/html/stable/_images/,
        # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
        # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
        # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
        # copied to the _images folder during the `Copying Downloadable Files` step like the rest.
        if os.path.exists(first_image_file):
            # We generate extra special thumbnails for the carousel
            first_img = image_fname % 1
            make_thumbnail(first_image_file, thumb_file, 400, 280)

#    if not os.path.exists(thumb_file):
#        # create something to replace the thumbnail
#        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    time_m, time_s = divmod(time_elapsed, 60)
    f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
    f.write(this_template % locals())
    f.flush()

    # save variables so we can later add links to the documentation
    if six.PY2:
        example_code_obj = identify_names(open(example_file).read())
    else:
        example_code_obj = \
            identify_names(open(example_file, encoding='utf-8').read())
    if example_code_obj:
        codeobj_fname = example_file[:-3] + '_codeobj.pickle'
        with open(codeobj_fname, 'wb') as fid:
            pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)

    backrefs = set('{module_short}.{name}'.format(**entry)
                   for entry in example_code_obj.values()
                   if entry['module'].startswith('sklearn'))
    return backrefs

Example 35

Project: calibre
Source File: __init__.py
View license
def debug(ioreg_to_tmp=False, buf=None, plugins=None,
        disabled_plugins=None):
    '''
    If plugins is None, then this method calls startup and shutdown on the
    device plugins. So if you are using it in a context where startup could
    already have been called (for example in the main GUI), pass in the list of
    device plugins as the plugins parameter.
    '''
    import textwrap
    from calibre.customize.ui import device_plugins, disabled_device_plugins
    from calibre.debug import print_basic_debug_info
    from calibre.devices.scanner import DeviceScanner
    from calibre.constants import iswindows, isosx
    from calibre import prints
    oldo, olde = sys.stdout, sys.stderr

    if buf is None:
        buf = StringIO()
    sys.stdout = sys.stderr = buf
    out = partial(prints, file=buf)

    devplugins = device_plugins() if plugins is None else plugins
    devplugins = list(sorted(devplugins, cmp=lambda
            x,y:cmp(x.__class__.__name__, y.__class__.__name__)))
    if plugins is None:
        for d in devplugins:
            try:
                d.startup()
            except:
                out('Startup failed for device plugin: %s'%d)

    if disabled_plugins is None:
        disabled_plugins = list(disabled_device_plugins())

    try:
        print_basic_debug_info(out=buf)
        s = DeviceScanner()
        s.scan()
        devices = (s.devices)
        if not iswindows:
            devices = [list(x) for x in devices]
            for d in devices:
                for i in range(3):
                    d[i] = hex(d[i])
        out('USB devices on system:')
        out(pprint.pformat(devices))

        ioreg = None
        if isosx:
            from calibre.devices.usbms.device import Device
            mount = '\n'.join(repr(x) for x in Device.osx_run_mount().splitlines())
            drives = pprint.pformat(Device.osx_get_usb_drives())
            ioreg = 'Output from mount:\n'+mount+'\n\n'
            ioreg += 'Output from osx_get_usb_drives:\n'+drives+'\n\n'
            ioreg += Device.run_ioreg()
        connected_devices = []
        if disabled_plugins:
            out('\nDisabled plugins:', textwrap.fill(' '.join([x.__class__.__name__ for x in
                disabled_plugins])))
            out(' ')
        else:
            out('\nNo disabled plugins')
        found_dev = False
        for dev in devplugins:
            if not dev.MANAGES_DEVICE_PRESENCE:
                continue
            out('Looking for devices of type:', dev.__class__.__name__)
            if dev.debug_managed_device_detection(s.devices, buf):
                found_dev = True
                break
            out(' ')

        if not found_dev:
            out('Looking for devices...')
            for dev in devplugins:
                if dev.MANAGES_DEVICE_PRESENCE:
                    continue
                connected, det = s.is_device_connected(dev, debug=True)
                if connected:
                    out('\t\tDetected possible device', dev.__class__.__name__)
                    connected_devices.append((dev, det))

            out(' ')
            errors = {}
            success = False
            out('Devices possibly connected:', end=' ')
            for dev, det in connected_devices:
                out(dev.name, end=', ')
            if not connected_devices:
                out('None', end='')
            out(' ')
            for dev, det in connected_devices:
                out('Trying to open', dev.name, '...', end=' ')
                dev.do_device_debug = True
                try:
                    dev.reset(detected_device=det)
                    dev.open(det, None)
                    out('OK')
                except:
                    import traceback
                    errors[dev] = traceback.format_exc()
                    out('failed')
                    continue
                dev.do_device_debug = False
                success = True
                if hasattr(dev, '_main_prefix'):
                    out('Main memory:', repr(dev._main_prefix))
                out('Total space:', dev.total_space())
                break
            if not success and errors:
                out('Opening of the following devices failed')
                for dev,msg in errors.items():
                    out(dev)
                    out(msg)
                    out(' ')

            if ioreg is not None:
                ioreg = 'IOREG Output\n'+ioreg
                out(' ')
                if ioreg_to_tmp:
                    lopen('/tmp/ioreg.txt', 'wb').write(ioreg)
                    out('Dont forget to send the contents of /tmp/ioreg.txt')
                    out('You can open it with the command: open /tmp/ioreg.txt')
                else:
                    out(ioreg)

        if hasattr(buf, 'getvalue'):
            return buf.getvalue().decode('utf-8', 'replace')
    finally:
        sys.stdout = oldo
        sys.stderr = olde
        if plugins is None:
            for d in devplugins:
                try:
                    d.shutdown()
                except:
                    pass

Example 36

Project: calibre
Source File: __init__.py
View license
def debug(ioreg_to_tmp=False, buf=None, plugins=None,
        disabled_plugins=None):
    '''
    If plugins is None, then this method calls startup and shutdown on the
    device plugins. So if you are using it in a context where startup could
    already have been called (for example in the main GUI), pass in the list of
    device plugins as the plugins parameter.
    '''
    import textwrap
    from calibre.customize.ui import device_plugins, disabled_device_plugins
    from calibre.debug import print_basic_debug_info
    from calibre.devices.scanner import DeviceScanner
    from calibre.constants import iswindows, isosx
    from calibre import prints
    oldo, olde = sys.stdout, sys.stderr

    if buf is None:
        buf = StringIO()
    sys.stdout = sys.stderr = buf
    out = partial(prints, file=buf)

    devplugins = device_plugins() if plugins is None else plugins
    devplugins = list(sorted(devplugins, cmp=lambda
            x,y:cmp(x.__class__.__name__, y.__class__.__name__)))
    if plugins is None:
        for d in devplugins:
            try:
                d.startup()
            except:
                out('Startup failed for device plugin: %s'%d)

    if disabled_plugins is None:
        disabled_plugins = list(disabled_device_plugins())

    try:
        print_basic_debug_info(out=buf)
        s = DeviceScanner()
        s.scan()
        devices = (s.devices)
        if not iswindows:
            devices = [list(x) for x in devices]
            for d in devices:
                for i in range(3):
                    d[i] = hex(d[i])
        out('USB devices on system:')
        out(pprint.pformat(devices))

        ioreg = None
        if isosx:
            from calibre.devices.usbms.device import Device
            mount = '\n'.join(repr(x) for x in Device.osx_run_mount().splitlines())
            drives = pprint.pformat(Device.osx_get_usb_drives())
            ioreg = 'Output from mount:\n'+mount+'\n\n'
            ioreg += 'Output from osx_get_usb_drives:\n'+drives+'\n\n'
            ioreg += Device.run_ioreg()
        connected_devices = []
        if disabled_plugins:
            out('\nDisabled plugins:', textwrap.fill(' '.join([x.__class__.__name__ for x in
                disabled_plugins])))
            out(' ')
        else:
            out('\nNo disabled plugins')
        found_dev = False
        for dev in devplugins:
            if not dev.MANAGES_DEVICE_PRESENCE:
                continue
            out('Looking for devices of type:', dev.__class__.__name__)
            if dev.debug_managed_device_detection(s.devices, buf):
                found_dev = True
                break
            out(' ')

        if not found_dev:
            out('Looking for devices...')
            for dev in devplugins:
                if dev.MANAGES_DEVICE_PRESENCE:
                    continue
                connected, det = s.is_device_connected(dev, debug=True)
                if connected:
                    out('\t\tDetected possible device', dev.__class__.__name__)
                    connected_devices.append((dev, det))

            out(' ')
            errors = {}
            success = False
            out('Devices possibly connected:', end=' ')
            for dev, det in connected_devices:
                out(dev.name, end=', ')
            if not connected_devices:
                out('None', end='')
            out(' ')
            for dev, det in connected_devices:
                out('Trying to open', dev.name, '...', end=' ')
                dev.do_device_debug = True
                try:
                    dev.reset(detected_device=det)
                    dev.open(det, None)
                    out('OK')
                except:
                    import traceback
                    errors[dev] = traceback.format_exc()
                    out('failed')
                    continue
                dev.do_device_debug = False
                success = True
                if hasattr(dev, '_main_prefix'):
                    out('Main memory:', repr(dev._main_prefix))
                out('Total space:', dev.total_space())
                break
            if not success and errors:
                out('Opening of the following devices failed')
                for dev,msg in errors.items():
                    out(dev)
                    out(msg)
                    out(' ')

            if ioreg is not None:
                ioreg = 'IOREG Output\n'+ioreg
                out(' ')
                if ioreg_to_tmp:
                    lopen('/tmp/ioreg.txt', 'wb').write(ioreg)
                    out('Dont forget to send the contents of /tmp/ioreg.txt')
                    out('You can open it with the command: open /tmp/ioreg.txt')
                else:
                    out(ioreg)

        if hasattr(buf, 'getvalue'):
            return buf.getvalue().decode('utf-8', 'replace')
    finally:
        sys.stdout = oldo
        sys.stderr = olde
        if plugins is None:
            for d in devplugins:
                try:
                    d.shutdown()
                except:
                    pass

Example 37

Project: pocketsphinx-python
Source File: main.py
View license
    def __init__(self):
        '''Init the GUI and everything else'''
        global stdout_old
        stdout_old = sys.stdout        #"Backup" the old sys.stdout
        global my_threads
        my_threads = []
        #global stderr_old
        #stderr_old = sys.stderr
        sys.stdout = self.ConsoleOutput(None)     #"Redirect output to the debug tab"
        #sys.stderr = self.ConsoleOutput(None)

        window = gtk.Window(gtk.WINDOW_TOPLEVEL)
        window.set_position(gtk.WIN_POS_CENTER)     #POS_CENTER , MOUSE
        window.set_keep_above(True);
        window.set_size_request(640, 480)
        window.set_title("PocketSphinx Speech Recogntition (by [email protected])")
        #self.window.set_tooltip_text("foo")

        file_menu = gtk.Menu()

        # Create the menubar and its items
        mb = gtk.MenuBar()

        filemenu = gtk.Menu()
        filem = gtk.MenuItem("File")
        filem.set_submenu(filemenu)

        exit = gtk.MenuItem("Exit")
        exit.connect("activate", gtk.main_quit)
        filemenu.append(exit)

        mb.append(filem)

        vbox = gtk.VBox(False, 0)
        vbox.pack_start(mb, False, False, 0)

        mb.show()
        filem.show()
        exit.show()

        #Vertical box for the pocketsphinx recognition tab
        ps_vbox = gtk.VBox(False, 0)

        label = gtk.Label("Partial results:")
        ps_vbox.pack_start(label, False, False, 0)
        label.show()

        #Textfield for partial results
        self.sw_partial = gtk.ScrolledWindow()
        self.sw_partial.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        textview_partial= gtk.TextView()
        textview_partial.set_editable(False)
        textview_partial.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        textview_partial.connect("size-allocate", self._autoscroll, self.sw_partial)
        global textbuffer_partial
        textbuffer_partial = textview_partial.get_buffer()
        self.sw_partial.add(textview_partial)
        textview_partial.show()
        self.sw_partial.set_size_request(-1, 150)
        self.sw_partial.show()

        ps_vbox.pack_start(self.sw_partial, False, False, 0)

        label = gtk.Label("End results:")
        ps_vbox.pack_start(label, False, False, 0)
        label.show()

        #Textfield for final results
        self.sw_end = gtk.ScrolledWindow()
        self.sw_end.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        textview_end= gtk.TextView()
        textview_end.set_editable(False)
        textview_end.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        textview_end.connect("size-allocate", self._autoscroll, self.sw_end)
        global textbuffer_end
        textbuffer_end = textview_end.get_buffer()
        self.sw_end.add(textview_end)
        textview_end.show()
        self.sw_end.set_size_request(-1, 150)
        self.sw_end.show()

        ps_vbox.pack_start(self.sw_end, False, False, 0)


        #Create button to start/stop the recognition
        self.ps_fixed = gtk.Fixed()
        self.button1 = gtk.Button("Start pocketsphinx")
        self.button1.connect("clicked", self.start_ps)
        self.button1.set_tooltip_text("This button will close this window")
        self.button1.show()
        self.ps_fixed.put(self.button1, 0, 10)

        self.button2 = gtk.Button("Stop pocketsphinx")
        self.button2.connect("clicked", self.stop_ps)
        self.button2.show()
        self.ps_fixed.put(self.button2, 140, 10)

        self.ps_fixed.show()

        ps_vbox.pack_start(self.ps_fixed, False, False, 0)
        ps_vbox.show()
        #fixed.put(self.sw, 0, 200)
        #self.ps_table_layout.show()

        #Horizontal box for the model-adaption tab
        self.ma_hbox = gtk.HBox(False, 0)
        global ma_level_progressbar
        ma_level_progressbar = gtk.ProgressBar(adjustment=None)
        ma_level_progressbar.set_fraction(0)
        ma_level_progressbar.set_orientation(gtk.PROGRESS_BOTTOM_TO_TOP)
        ma_level_progressbar.show()

        #Progressbar for indication the microphone input volume
        self.ma_hbox.pack_start(ma_level_progressbar, False, False, 0)
        self.ma_hbox.show()

        self.ma_fixed = gtk.Fixed()
        self.button1 = gtk.Button("Start Model Adaption")
        self.button1.connect("clicked", self.start_ma)
        self.button1.set_tooltip_text("This button will close this window")
        self.button1.show()
        self.ma_fixed.put(self.button1, 1, 10)

        self.button2 = gtk.Button("Stop Model Adaption")
        self.button2.connect("clicked", self.stop_ma)
        self.button2.show()
        self.ma_fixed.put(self.button2, 156, 10)

        #Colored record button
        global ma_colorbutton
        ma_colorbutton = gtk.Button("RECORD")
        ma_colorbutton.connect("clicked", self.ma_is_clicked_button)
        #ma_colorbutton.show()
        self.ma_fixed.put(ma_colorbutton, 10, 250)

        #Infobox for the model-adaption tab
        self.sw_ma_info = gtk.ScrolledWindow()
        self.sw_ma_info.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        self.ma_info_textview = gtk.TextView()
        self.ma_info_textview.set_editable(False)
        self.ma_info_textview.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        global ma_info_textbuffer
        ma_info_textbuffer = self.ma_info_textview.get_buffer()
        ma_info_textbuffer.set_text('Click on "Start Model Adaption" to start! After that you are prompted to select the sentence file.')
        self.ma_info_textview.connect("size-allocate", self._autoscroll, self.sw_ma_info)
        self.ma_info_textview.set_size_request(-1, 100)
        self.ma_info_textview.show()
        self.sw_ma_info.add(self.ma_info_textview)
        self.sw_ma_info.show()

        self.ma_fixed.put(self.sw_ma_info, 1, 50)
        self.ma_info_textview.set_size_request(590, 150)

        self.ma_fixed.show()
        self.ma_hbox.pack_start(self.ma_fixed, False, False, 0)
        self.ma_hbox.show()


        #Textfield in the debug tab
        self.sw_output = gtk.ScrolledWindow()
        self.sw_output.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        textview_output = gtk.TextView()
        textview_output.set_editable(False)
        textview_output.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        textview_output.connect("size-allocate", self._autoscroll, self.sw_output)
        global textbuffer_output
        textbuffer_output = textview_output.get_buffer()
        self.sw_output.add(textview_output)
        textview_output.show()
        self.sw_output.show()
        
        #Create notebook with 3 tabs
        self.notebook = gtk.Notebook()
        self.notebook.set_scrollable(True)

        self.notebook.append_page(ps_vbox, gtk.Label('PocketSphinx Output'))
        self.notebook.append_page(self.ma_hbox, gtk.Label('Model Adaption'))
        self.notebook.append_page(self.sw_output, gtk.Label('Debug'))
        self.notebook.props.border_width = 1
        self.notebook.set_tab_reorderable(ps_vbox, True)
        self.notebook.set_tab_reorderable(self.ma_hbox, True)
        self.notebook.set_tab_reorderable(self.sw_output, True)
        self.notebook.show()
        vbox.pack_start(self.notebook)

        #create simple statusbar
        global statusbar
        statusbar = gtk.Statusbar()
        statusbar.set_has_resize_grip( False)
        statusbar.push(0, "Started succesfullly.")
        statusbar.show()
        vbox.pack_start(statusbar, expand=False)
        vbox.show()

        #self.window.add(fixed)
        window.add(vbox)
        window.show()
        window.connect("destroy", self.destroy)

Example 38

Project: pocketsphinx-python
Source File: main.py
View license
    def __init__(self):
        '''Init the GUI and everything else'''
        global stdout_old
        stdout_old = sys.stdout        #"Backup" the old sys.stdout
        global my_threads
        my_threads = []
        #global stderr_old
        #stderr_old = sys.stderr
        sys.stdout = self.ConsoleOutput(None)     #"Redirect output to the debug tab"
        #sys.stderr = self.ConsoleOutput(None)

        window = gtk.Window(gtk.WINDOW_TOPLEVEL)
        window.set_position(gtk.WIN_POS_CENTER)     #POS_CENTER , MOUSE
        window.set_keep_above(True);
        window.set_size_request(640, 480)
        window.set_title("PocketSphinx Speech Recogntition (by [email protected])")
        #self.window.set_tooltip_text("foo")

        file_menu = gtk.Menu()

        # Create the menubar and its items
        mb = gtk.MenuBar()

        filemenu = gtk.Menu()
        filem = gtk.MenuItem("File")
        filem.set_submenu(filemenu)

        exit = gtk.MenuItem("Exit")
        exit.connect("activate", gtk.main_quit)
        filemenu.append(exit)

        mb.append(filem)

        vbox = gtk.VBox(False, 0)
        vbox.pack_start(mb, False, False, 0)

        mb.show()
        filem.show()
        exit.show()

        #Vertical box for the pocketsphinx recognition tab
        ps_vbox = gtk.VBox(False, 0)

        label = gtk.Label("Partial results:")
        ps_vbox.pack_start(label, False, False, 0)
        label.show()

        #Textfield for partial results
        self.sw_partial = gtk.ScrolledWindow()
        self.sw_partial.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        textview_partial= gtk.TextView()
        textview_partial.set_editable(False)
        textview_partial.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        textview_partial.connect("size-allocate", self._autoscroll, self.sw_partial)
        global textbuffer_partial
        textbuffer_partial = textview_partial.get_buffer()
        self.sw_partial.add(textview_partial)
        textview_partial.show()
        self.sw_partial.set_size_request(-1, 150)
        self.sw_partial.show()

        ps_vbox.pack_start(self.sw_partial, False, False, 0)

        label = gtk.Label("End results:")
        ps_vbox.pack_start(label, False, False, 0)
        label.show()

        #Textfield for final results
        self.sw_end = gtk.ScrolledWindow()
        self.sw_end.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        textview_end= gtk.TextView()
        textview_end.set_editable(False)
        textview_end.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        textview_end.connect("size-allocate", self._autoscroll, self.sw_end)
        global textbuffer_end
        textbuffer_end = textview_end.get_buffer()
        self.sw_end.add(textview_end)
        textview_end.show()
        self.sw_end.set_size_request(-1, 150)
        self.sw_end.show()

        ps_vbox.pack_start(self.sw_end, False, False, 0)


        #Create button to start/stop the recognition
        self.ps_fixed = gtk.Fixed()
        self.button1 = gtk.Button("Start pocketsphinx")
        self.button1.connect("clicked", self.start_ps)
        self.button1.set_tooltip_text("This button will close this window")
        self.button1.show()
        self.ps_fixed.put(self.button1, 0, 10)

        self.button2 = gtk.Button("Stop pocketsphinx")
        self.button2.connect("clicked", self.stop_ps)
        self.button2.show()
        self.ps_fixed.put(self.button2, 140, 10)

        self.ps_fixed.show()

        ps_vbox.pack_start(self.ps_fixed, False, False, 0)
        ps_vbox.show()
        #fixed.put(self.sw, 0, 200)
        #self.ps_table_layout.show()

        #Horizontal box for the model-adaption tab
        self.ma_hbox = gtk.HBox(False, 0)
        global ma_level_progressbar
        ma_level_progressbar = gtk.ProgressBar(adjustment=None)
        ma_level_progressbar.set_fraction(0)
        ma_level_progressbar.set_orientation(gtk.PROGRESS_BOTTOM_TO_TOP)
        ma_level_progressbar.show()

        #Progressbar for indication the microphone input volume
        self.ma_hbox.pack_start(ma_level_progressbar, False, False, 0)
        self.ma_hbox.show()

        self.ma_fixed = gtk.Fixed()
        self.button1 = gtk.Button("Start Model Adaption")
        self.button1.connect("clicked", self.start_ma)
        self.button1.set_tooltip_text("This button will close this window")
        self.button1.show()
        self.ma_fixed.put(self.button1, 1, 10)

        self.button2 = gtk.Button("Stop Model Adaption")
        self.button2.connect("clicked", self.stop_ma)
        self.button2.show()
        self.ma_fixed.put(self.button2, 156, 10)

        #Colored record button
        global ma_colorbutton
        ma_colorbutton = gtk.Button("RECORD")
        ma_colorbutton.connect("clicked", self.ma_is_clicked_button)
        #ma_colorbutton.show()
        self.ma_fixed.put(ma_colorbutton, 10, 250)

        #Infobox for the model-adaption tab
        self.sw_ma_info = gtk.ScrolledWindow()
        self.sw_ma_info.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        self.ma_info_textview = gtk.TextView()
        self.ma_info_textview.set_editable(False)
        self.ma_info_textview.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        global ma_info_textbuffer
        ma_info_textbuffer = self.ma_info_textview.get_buffer()
        ma_info_textbuffer.set_text('Click on "Start Model Adaption" to start! After that you are prompted to select the sentence file.')
        self.ma_info_textview.connect("size-allocate", self._autoscroll, self.sw_ma_info)
        self.ma_info_textview.set_size_request(-1, 100)
        self.ma_info_textview.show()
        self.sw_ma_info.add(self.ma_info_textview)
        self.sw_ma_info.show()

        self.ma_fixed.put(self.sw_ma_info, 1, 50)
        self.ma_info_textview.set_size_request(590, 150)

        self.ma_fixed.show()
        self.ma_hbox.pack_start(self.ma_fixed, False, False, 0)
        self.ma_hbox.show()


        #Textfield in the debug tab
        self.sw_output = gtk.ScrolledWindow()
        self.sw_output.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        textview_output = gtk.TextView()
        textview_output.set_editable(False)
        textview_output.set_wrap_mode(gtk.WRAP_WORD) #WRAP_WORD WRAP_CHAR
        textview_output.connect("size-allocate", self._autoscroll, self.sw_output)
        global textbuffer_output
        textbuffer_output = textview_output.get_buffer()
        self.sw_output.add(textview_output)
        textview_output.show()
        self.sw_output.show()
        
        #Create notebook with 3 tabs
        self.notebook = gtk.Notebook()
        self.notebook.set_scrollable(True)

        self.notebook.append_page(ps_vbox, gtk.Label('PocketSphinx Output'))
        self.notebook.append_page(self.ma_hbox, gtk.Label('Model Adaption'))
        self.notebook.append_page(self.sw_output, gtk.Label('Debug'))
        self.notebook.props.border_width = 1
        self.notebook.set_tab_reorderable(ps_vbox, True)
        self.notebook.set_tab_reorderable(self.ma_hbox, True)
        self.notebook.set_tab_reorderable(self.sw_output, True)
        self.notebook.show()
        vbox.pack_start(self.notebook)

        #create simple statusbar
        global statusbar
        statusbar = gtk.Statusbar()
        statusbar.set_has_resize_grip( False)
        statusbar.push(0, "Started succesfullly.")
        statusbar.show()
        vbox.pack_start(statusbar, expand=False)
        vbox.show()

        #self.window.add(fixed)
        window.add(vbox)
        window.show()
        window.connect("destroy", self.destroy)

Example 39

Project: pyCAF
Source File: analyze_server.py
View license
    def analyze_redhat_server(self):
#        centos_packages = src.analyzer.CentOS.AnalyzeCentosPackages(self.server, self.config, self.lock)
        
#        a_packages = None
#        
#        a_packages = asrh.AnalyzeRedHatPackages(self.server, self.config, self.lock)
#        
#        start_time = time.time()
#        a_packages.start()
#        a_packages.join() 
#        stop_time = time.time()
#        
#        # Print reports if setted in the configuration file
#        if self.config.print_reports:
#            a_packages.print_results()
#        
#        # Save results in a file at the logs path if setted in the configuration file
#        if self.config.server_results_save_in_file or self.config.save_reports_in_file:
#            file_result_name = self.server.name + "_server_report.txt"
#            file_result = open(self.config.logs_path + file_result_name,"wb")
#            
#            orig_stdout = sys.stdout
#            sys.stdout = file_result
#    
#            print self.server
#            
#            if a_packages is not None:
#                a_packages.print_results(True)
#            
#            sys.stdout = orig_stdout
#            file_result.close()
#        
#        # Merge different logs files in a single file
#        tools.merge_logs(self.config)
#        
#        print "Ellapsed time = " + str(stop_time - start_time)
        
        a_packages = None
        a_process = None
        a_files = None
        a_ssh = None
        a_cron = None     
        
        if "AnalyzeRedHatPackages" in self.config.server_centos_scenarii:
            a_packages = asrh.AnalyzeRedHatPackages(self.server, self.config, self.lock)
        if "AnalyzeProcesses" in self.config.server_centos_scenarii:
            a_process = asl.AnalyzeProcesses(self.server, self.config)
        if "AnalyzeFiles" in self.config.server_centos_scenarii:
            a_files = asl.AnalyzeFiles(self.server, self.config)
        if "AnalyzeSSH" in self.config.server_centos_scenarii:
            a_ssh = asl.AnalyzeSSH(self.server, self.config)
        if "AnalyzeCron" in self.config.server_centos_scenarii:
            a_cron = asl.AnalyzeCron(self.server, self.config)
            
        a_list = [a_packages, a_process, a_files, a_ssh, a_cron]
        
        start_time = time.time()
        for scenario in a_list:
            if scenario is not None:
                scenario.start()
        
        for scenario in a_list:
            if scenario is not None:
                scenario.join()
        
        stop_time = time.time()
        
        # Print reports if setted in the configuration file
        if self.config.print_reports:
            for scenario in a_list:
                if scenario is not None:
                    scenario.print_results()
        
        # Save results in a file at the logs path if setted in the configuration file
        if self.config.server_results_save_in_file or self.config.save_reports_in_file:
            file_result_name = self.server.name + "_server_report.txt"
            file_result = open(self.config.logs_path + file_result_name,"wb")
            
            orig_stdout = sys.stdout
            sys.stdout = file_result
    
            print self.server
            
            if a_ssh is not None:
                a_ssh.print_results()
            if a_packages is not None:
                a_packages.print_results(True)  
            if a_files is not None:
                a_files.print_results(True)
            if a_process is not None:
                a_process.print_results()
            if a_cron is not None:
                a_cron.print_results()
            
            sys.stdout = orig_stdout
            file_result.close()
        
        # Merge different logs files in a single file
        tools.merge_logs(self.config)
        
        print "Ellapsed time = " + str(stop_time - start_time)

Example 40

Project: multiscanner
Source File: multiscanner.py
View license
def multiscan(Files, recursive=False, configregen=False, configfile=CONFIG, config=None, module_list=None):
    """
    The meat and potatoes. Returns the list of module results

    Files - A list of files and dirs to be scanned
    recursive - If true it will search the dirs in Files recursively
    configregen - If True a new config file will be created overwriting the old
    configfile - What config file to use. Can be None.
    config - A dictionary containing the configuration options to be used.
    """
    # Redirect stdout to stderr
    stdout = sys.stdout
    sys.stdout = sys.stderr
    # TODO: Make sure the cleanup from this works is something breaks

    # Init some vars
    # If recursive is None we don't parse the file list and take it as is.
    if recursive is not None:
        filelist = parseFileList(Files, recursive=recursive)
    else:
        filelist = Files
    # A list of files in the module dir
    if module_list is None:
        module_list = parseDir(MODULEDIR, recursive=True)
    # A dictionary used for the copyfileto parameter
    filedic = {}
    # What will be the config file object
    config_object = None

    # Read in config
    if configfile:
        config_object = configparser.SafeConfigParser()
        config_object.optionxform = str
        # Regen the config if needed or wanted
        if configregen or not os.path.isfile(configfile):
            _rewite_config(module_list, config_object, filepath=configfile)

        config_object.read(configfile)
        main_config = _get_main_config(config_object, filepath=configfile)
        if config:
            file_conf = parse_config(config_object)
            for key in config:
                if key not in file_conf:
                    file_conf[key] = config[key]
                    file_conf[key]['_load_default'] = True
                else:
                    file_conf[key].update(config[key])
            config = file_conf
        else:
            config = parse_config(config_object)
    else:
        if config is None:
            config = {}
        else:
            config['_load_default'] = True
        if 'main' in config:
            main_config = config['main']
        else:
            main_config = DEFAULTCONF

    # If none of the files existed
    if not filelist:
        sys.stdout = stdout
        raise ValueError("No valid files")

    # Copy files to a share if configured
    if "copyfilesto" not in main_config:
        main_config["copyfilesto"] = False
    if main_config["copyfilesto"]:
        if os.path.isdir(main_config["copyfilesto"]):
            filelist = _copy_to_share(filelist, filedic, main_config["copyfilesto"])
        else:
            sys.stdout = stdout
            raise IOError('The copyfilesto dir" ' + main_config["copyfilesto"] + '" is not a valid dir')

    # Create the global module interface
    global_module_interface = _GlobalModuleInterface()

    # Start a thread for each module
    thread_list = _start_module_threads(filelist, module_list, config, global_module_interface)

    # Write the default configure settings for missing ones
    if config_object:
        _write_missing_module_configs(module_list, config_object, filepath=configfile)

    # Warn about spaces in file names
    for f in filelist:
        if ' ' in f:
            print('WARNING: You are using file paths with spaces. This may result in modules not reporting correctly.')
            break

    # Wait for all threads to finish
    thread_wait_list = thread_list[:]
    i = 0
    while thread_wait_list:
        i += 1
        for thread in thread_wait_list:
            if not thread.is_alive():
                i = 0
                thread_wait_list.remove(thread)
                if VERBOSE:
                    print(thread.name, "took", thread.endtime-thread.starttime)
        if i == 15:
            i = 0
            if VERBOSE:
                p = 'Waiting on'
                for thread in thread_wait_list:
                    p += ' ' + thread.name
                p += '...'
                print(p)
        time.sleep(1)

    # Delete copied files
    if main_config["copyfilesto"]:
        for item in filelist:
            os.remove(item)

    # Get Result list
    results = []
    for thread in thread_list:
        if thread.ret is not None:
            results.append(thread.ret)
        del thread

    # Translates file names back to the originals
    if filedic:
        # I have no idea if this is the best way to do in-place modifications
        for i in range(0, len(results)):
            (result, metadata) = results[i]
            modded = False
            for j in range(0, len(result)):
                (filename, hit) = result[j]
                base = basename(filename)
                if base in filedic:
                    filename = filedic[base]
                    modded = True
                    result[j] = (filename, hit)
            if modded:
                results[i] = (result, metadata)

    # Scan subfiles if needed
    subscan_list = global_module_interface._get_subscan_list()
    if subscan_list:
        # Translate from_filename back to original if needed
        if filedic:
            for i in range(0, len(subscan_list)):
                file_path, from_filename, module_name = subscan_list[i]
                base = basename(from_filename)
                if base in filedic:
                    from_filename = filedic[base]
                    subscan_list[i] = (file_path, from_filename, module_name)

        results.extend(_subscan(subscan_list, config, main_config, module_list, global_module_interface))

    global_module_interface._cleanup()

    # Return stdout to previous state
    sys.stdout = stdout
    return results

Example 41

Project: multiscanner
Source File: multiscanner.py
View license
def multiscan(Files, recursive=False, configregen=False, configfile=CONFIG, config=None, module_list=None):
    """
    The meat and potatoes. Returns the list of module results

    Files - A list of files and dirs to be scanned
    recursive - If true it will search the dirs in Files recursively
    configregen - If True a new config file will be created overwriting the old
    configfile - What config file to use. Can be None.
    config - A dictionary containing the configuration options to be used.
    """
    # Redirect stdout to stderr
    stdout = sys.stdout
    sys.stdout = sys.stderr
    # TODO: Make sure the cleanup from this works is something breaks

    # Init some vars
    # If recursive is None we don't parse the file list and take it as is.
    if recursive is not None:
        filelist = parseFileList(Files, recursive=recursive)
    else:
        filelist = Files
    # A list of files in the module dir
    if module_list is None:
        module_list = parseDir(MODULEDIR, recursive=True)
    # A dictionary used for the copyfileto parameter
    filedic = {}
    # What will be the config file object
    config_object = None

    # Read in config
    if configfile:
        config_object = configparser.SafeConfigParser()
        config_object.optionxform = str
        # Regen the config if needed or wanted
        if configregen or not os.path.isfile(configfile):
            _rewite_config(module_list, config_object, filepath=configfile)

        config_object.read(configfile)
        main_config = _get_main_config(config_object, filepath=configfile)
        if config:
            file_conf = parse_config(config_object)
            for key in config:
                if key not in file_conf:
                    file_conf[key] = config[key]
                    file_conf[key]['_load_default'] = True
                else:
                    file_conf[key].update(config[key])
            config = file_conf
        else:
            config = parse_config(config_object)
    else:
        if config is None:
            config = {}
        else:
            config['_load_default'] = True
        if 'main' in config:
            main_config = config['main']
        else:
            main_config = DEFAULTCONF

    # If none of the files existed
    if not filelist:
        sys.stdout = stdout
        raise ValueError("No valid files")

    # Copy files to a share if configured
    if "copyfilesto" not in main_config:
        main_config["copyfilesto"] = False
    if main_config["copyfilesto"]:
        if os.path.isdir(main_config["copyfilesto"]):
            filelist = _copy_to_share(filelist, filedic, main_config["copyfilesto"])
        else:
            sys.stdout = stdout
            raise IOError('The copyfilesto dir" ' + main_config["copyfilesto"] + '" is not a valid dir')

    # Create the global module interface
    global_module_interface = _GlobalModuleInterface()

    # Start a thread for each module
    thread_list = _start_module_threads(filelist, module_list, config, global_module_interface)

    # Write the default configure settings for missing ones
    if config_object:
        _write_missing_module_configs(module_list, config_object, filepath=configfile)

    # Warn about spaces in file names
    for f in filelist:
        if ' ' in f:
            print('WARNING: You are using file paths with spaces. This may result in modules not reporting correctly.')
            break

    # Wait for all threads to finish
    thread_wait_list = thread_list[:]
    i = 0
    while thread_wait_list:
        i += 1
        for thread in thread_wait_list:
            if not thread.is_alive():
                i = 0
                thread_wait_list.remove(thread)
                if VERBOSE:
                    print(thread.name, "took", thread.endtime-thread.starttime)
        if i == 15:
            i = 0
            if VERBOSE:
                p = 'Waiting on'
                for thread in thread_wait_list:
                    p += ' ' + thread.name
                p += '...'
                print(p)
        time.sleep(1)

    # Delete copied files
    if main_config["copyfilesto"]:
        for item in filelist:
            os.remove(item)

    # Get Result list
    results = []
    for thread in thread_list:
        if thread.ret is not None:
            results.append(thread.ret)
        del thread

    # Translates file names back to the originals
    if filedic:
        # I have no idea if this is the best way to do in-place modifications
        for i in range(0, len(results)):
            (result, metadata) = results[i]
            modded = False
            for j in range(0, len(result)):
                (filename, hit) = result[j]
                base = basename(filename)
                if base in filedic:
                    filename = filedic[base]
                    modded = True
                    result[j] = (filename, hit)
            if modded:
                results[i] = (result, metadata)

    # Scan subfiles if needed
    subscan_list = global_module_interface._get_subscan_list()
    if subscan_list:
        # Translate from_filename back to original if needed
        if filedic:
            for i in range(0, len(subscan_list)):
                file_path, from_filename, module_name = subscan_list[i]
                base = basename(from_filename)
                if base in filedic:
                    from_filename = filedic[base]
                    subscan_list[i] = (file_path, from_filename, module_name)

        results.extend(_subscan(subscan_list, config, main_config, module_list, global_module_interface))

    global_module_interface._cleanup()

    # Return stdout to previous state
    sys.stdout = stdout
    return results

Example 42

View license
def _exec_command( command, use_shell=None, use_tee = None, **env ):
    log.debug('_exec_command(...)')

    if use_shell is None:
        use_shell = os.name=='posix'
    if use_tee is None:
        use_tee = os.name=='posix'
    using_command = 0
    if use_shell:
        # We use shell (unless use_shell==0) so that wildcards can be
        # used.
        sh = os.environ.get('SHELL', '/bin/sh')
        if is_sequence(command):
            argv = [sh, '-c', ' '.join(list(command))]
        else:
            argv = [sh, '-c', command]
    else:
        # On NT, DOS we avoid using command.com as it's exit status is
        # not related to the exit status of a command.
        if is_sequence(command):
            argv = command[:]
        else:
            argv = shlex.split(command)

    if hasattr(os, 'spawnvpe'):
        spawn_command = os.spawnvpe
    else:
        spawn_command = os.spawnve
        argv[0] = find_executable(argv[0]) or argv[0]
        if not os.path.isfile(argv[0]):
            log.warn('Executable %s does not exist' % (argv[0]))
            if os.name in ['nt', 'dos']:
                # argv[0] might be internal command
                argv = [os.environ['COMSPEC'], '/C'] + argv
                using_command = 1

    _so_has_fileno = _supports_fileno(sys.stdout)
    _se_has_fileno = _supports_fileno(sys.stderr)
    so_flush = sys.stdout.flush
    se_flush = sys.stderr.flush
    if _so_has_fileno:
        so_fileno = sys.stdout.fileno()
        so_dup = os.dup(so_fileno)
    if _se_has_fileno:
        se_fileno = sys.stderr.fileno()
        se_dup = os.dup(se_fileno)

    outfile = temp_file_name()
    fout = open(outfile, 'w')
    if using_command:
        errfile = temp_file_name()
        ferr = open(errfile, 'w')

    log.debug('Running %s(%s,%r,%r,os.environ)' \
              % (spawn_command.__name__, os.P_WAIT, argv[0], argv))

    if sys.version_info[0] >= 3 and os.name == 'nt':
        # Pre-encode os.environ, discarding un-encodable entries,
        # to avoid it failing during encoding as part of spawn. Failure
        # is possible if the environment contains entries that are not
        # encoded using the system codepage as windows expects.
        #
        # This is not necessary on unix, where os.environ is encoded
        # using the surrogateescape error handler and decoded using
        # it as part of spawn.
        encoded_environ = {}
        for k, v in os.environ.items():
            try:
                encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(
                    sys.getfilesystemencoding())
            except UnicodeEncodeError:
                log.debug("ignoring un-encodable env entry %s", k)
    else:
        encoded_environ = os.environ

    argv0 = argv[0]
    if not using_command:
        argv[0] = quote_arg(argv0)

    so_flush()
    se_flush()
    if _so_has_fileno:
        os.dup2(fout.fileno(), so_fileno)

    if _se_has_fileno:
        if using_command:
            #XXX: disabled for now as it does not work from cmd under win32.
            #     Tests fail on msys
            os.dup2(ferr.fileno(), se_fileno)
        else:
            os.dup2(fout.fileno(), se_fileno)
    try:
        status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
    except Exception:
        errmess = str(get_exception())
        status = 999
        sys.stderr.write('%s: %s'%(errmess, argv[0]))

    so_flush()
    se_flush()
    if _so_has_fileno:
        os.dup2(so_dup, so_fileno)
        os.close(so_dup)
    if _se_has_fileno:
        os.dup2(se_dup, se_fileno)
        os.close(se_dup)

    fout.close()
    fout = open_latin1(outfile, 'r')
    text = fout.read()
    fout.close()
    os.remove(outfile)

    if using_command:
        ferr.close()
        ferr = open_latin1(errfile, 'r')
        errmess = ferr.read()
        ferr.close()
        os.remove(errfile)
        if errmess and not status:
            # Not sure how to handle the case where errmess
            # contains only warning messages and that should
            # not be treated as errors.
            #status = 998
            if text:
                text = text + '\n'
            #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
            text = text + errmess
            print (errmess)
    if text[-1:]=='\n':
        text = text[:-1]
    if status is None:
        status = 0

    if use_tee:
        print (text)

    return status, text

Example 43

View license
def _exec_command( command, use_shell=None, use_tee = None, **env ):
    log.debug('_exec_command(...)')

    if use_shell is None:
        use_shell = os.name=='posix'
    if use_tee is None:
        use_tee = os.name=='posix'
    using_command = 0
    if use_shell:
        # We use shell (unless use_shell==0) so that wildcards can be
        # used.
        sh = os.environ.get('SHELL', '/bin/sh')
        if is_sequence(command):
            argv = [sh, '-c', ' '.join(list(command))]
        else:
            argv = [sh, '-c', command]
    else:
        # On NT, DOS we avoid using command.com as it's exit status is
        # not related to the exit status of a command.
        if is_sequence(command):
            argv = command[:]
        else:
            argv = shlex.split(command)

    if hasattr(os, 'spawnvpe'):
        spawn_command = os.spawnvpe
    else:
        spawn_command = os.spawnve
        argv[0] = find_executable(argv[0]) or argv[0]
        if not os.path.isfile(argv[0]):
            log.warn('Executable %s does not exist' % (argv[0]))
            if os.name in ['nt', 'dos']:
                # argv[0] might be internal command
                argv = [os.environ['COMSPEC'], '/C'] + argv
                using_command = 1

    _so_has_fileno = _supports_fileno(sys.stdout)
    _se_has_fileno = _supports_fileno(sys.stderr)
    so_flush = sys.stdout.flush
    se_flush = sys.stderr.flush
    if _so_has_fileno:
        so_fileno = sys.stdout.fileno()
        so_dup = os.dup(so_fileno)
    if _se_has_fileno:
        se_fileno = sys.stderr.fileno()
        se_dup = os.dup(se_fileno)

    outfile = temp_file_name()
    fout = open(outfile, 'w')
    if using_command:
        errfile = temp_file_name()
        ferr = open(errfile, 'w')

    log.debug('Running %s(%s,%r,%r,os.environ)' \
              % (spawn_command.__name__, os.P_WAIT, argv[0], argv))

    if sys.version_info[0] >= 3 and os.name == 'nt':
        # Pre-encode os.environ, discarding un-encodable entries,
        # to avoid it failing during encoding as part of spawn. Failure
        # is possible if the environment contains entries that are not
        # encoded using the system codepage as windows expects.
        #
        # This is not necessary on unix, where os.environ is encoded
        # using the surrogateescape error handler and decoded using
        # it as part of spawn.
        encoded_environ = {}
        for k, v in os.environ.items():
            try:
                encoded_environ[k.encode(sys.getfilesystemencoding())] = v.encode(
                    sys.getfilesystemencoding())
            except UnicodeEncodeError:
                log.debug("ignoring un-encodable env entry %s", k)
    else:
        encoded_environ = os.environ

    argv0 = argv[0]
    if not using_command:
        argv[0] = quote_arg(argv0)

    so_flush()
    se_flush()
    if _so_has_fileno:
        os.dup2(fout.fileno(), so_fileno)

    if _se_has_fileno:
        if using_command:
            #XXX: disabled for now as it does not work from cmd under win32.
            #     Tests fail on msys
            os.dup2(ferr.fileno(), se_fileno)
        else:
            os.dup2(fout.fileno(), se_fileno)
    try:
        status = spawn_command(os.P_WAIT, argv0, argv, encoded_environ)
    except Exception:
        errmess = str(get_exception())
        status = 999
        sys.stderr.write('%s: %s'%(errmess, argv[0]))

    so_flush()
    se_flush()
    if _so_has_fileno:
        os.dup2(so_dup, so_fileno)
        os.close(so_dup)
    if _se_has_fileno:
        os.dup2(se_dup, se_fileno)
        os.close(se_dup)

    fout.close()
    fout = open_latin1(outfile, 'r')
    text = fout.read()
    fout.close()
    os.remove(outfile)

    if using_command:
        ferr.close()
        ferr = open_latin1(errfile, 'r')
        errmess = ferr.read()
        ferr.close()
        os.remove(errfile)
        if errmess and not status:
            # Not sure how to handle the case where errmess
            # contains only warning messages and that should
            # not be treated as errors.
            #status = 998
            if text:
                text = text + '\n'
            #text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
            text = text + errmess
            print (errmess)
    if text[-1:]=='\n':
        text = text[:-1]
    if status is None:
        status = 0

    if use_tee:
        print (text)

    return status, text

Example 44

Project: socorro
Source File: TestToolsUI.py
View license
    def compare(self, f1, f2):
        """This is essentially cmp.py from the testtools directory."""
        # XXX Stop being lazy and using the remapping cout/cerr cheat
        # XXX at some point.
        cout, cerr = sys.stdout, sys.stderr
        sys.stdout = StringIO.StringIO()
        sys.stderr = StringIO.StringIO()

        def suck(f):
            fns = []
            fps = []
            hamdev = []
            spamdev = []
            hamdevall = spamdevall = (0.0, 0.0)

            get = f.readline
            while 1:
                line = get()
                if line.startswith('-> <stat> tested'):
                    print line,
                if line.find(' items; mean ') != -1:
                    # -> <stat> Ham distribution for this pair: 1000 items; mean 0.05; sample sdev 0.68
                    # and later "sample " went away
                    vals = line.split(';')
                    mean = float(vals[1].split()[-1])
                    sdev = float(vals[2].split()[-1])
                    val = (mean, sdev)
                    typ = vals[0].split()[2]
                    if line.find('for all runs') != -1:
                        if typ == 'Ham':
                            hamdevall = val
                        else:
                            spamdevall = val
                    elif line.find('all in this') != -1:
                        if typ == 'Ham':
                            hamdev.append(val)
                        else:
                            spamdev.append(val)
                    continue
                if line.startswith('-> '):
                    continue
                if line.startswith('total'):
                    break
                if len(line) == 0:
                    continue
                # A line with an f-p rate and an f-n rate.
                p, n = map(float, line.split())
                fps.append(p)
                fns.append(n)

            # "total unique false pos 0"
            # "total unique false neg 0"
            # "average fp % 0.0"
            # "average fn % 0.0"
            fptot = int(line.split()[-1])
            fntot = int(get().split()[-1])
            fpmean = float(get().split()[-1])
            fnmean = float(get().split()[-1])
            return (fps, fns, fptot, fntot, fpmean, fnmean,
                    hamdev, spamdev, hamdevall, spamdevall)

        def tag(p1, p2):
            if p1 == p2:
                t = "tied          "
            else:
                t = p1 < p2 and "lost " or "won  "
                if p1:
                    p = (p2 - p1) * 100.0 / p1
                    t += " %+7.2f%%" % p
                else:
                    t += " +(was 0)"
            return t

        def mtag(m1, m2):
            mean1, dev1 = m1
            mean2, dev2 = m2
            t = "%7.2f %7.2f " % (mean1, mean2)
            if mean1:
                mp = (mean2 - mean1) * 100.0 / mean1
                t += "%+7.2f%%" % mp
            else:
                t += "+(was 0)"
            t += "     %7.2f %7.2f " % (dev1, dev2)
            if dev1:
                dp = (dev2 - dev1) * 100.0 / dev1
                t += "%+7.2f%%" % dp
            else:
                t += "+(was 0)"
            return t

        def dump(p1s, p2s):
            alltags = ""
            for p1, p2 in zip(p1s, p2s):
                t = tag(p1, p2)
                print "    %5.3f  %5.3f  %s" % (p1, p2, t)
                alltags += t + " "
            print
            for t in "won", "tied", "lost":
                print "%-4s %2d times" % (t, alltags.count(t))
            print

        def dumpdev(meandev1, meandev2):
            for m1, m2 in zip(meandev1, meandev2):
                print mtag(m1, m2)

        (fp1, fn1, fptot1, fntot1, fpmean1, fnmean1,
         hamdev1, spamdev1, hamdevall1, spamdevall1) = suck(f1)

        (fp2, fn2, fptot2, fntot2, fpmean2, fnmean2,
         hamdev2, spamdev2, hamdevall2, spamdevall2) = suck(f2)

        print
        print "false positive percentages"
        dump(fp1, fp2)
        print "total unique fp went from", fptot1, "to", fptot2, tag(fptot1, fptot2)
        print "mean fp % went from", fpmean1, "to", fpmean2, tag(fpmean1, fpmean2)

        print
        print "false negative percentages"
        dump(fn1, fn2)
        print "total unique fn went from", fntot1, "to", fntot2, tag(fntot1, fntot2)
        print "mean fn % went from", fnmean1, "to", fnmean2, tag(fnmean1, fnmean2)

        print
        if len(hamdev1) == len(hamdev2) and len(spamdev1) == len(spamdev2):
            print "ham mean                     ham sdev"
            dumpdev(hamdev1, hamdev2)
            print
            print "ham mean and sdev for all runs"
            dumpdev([hamdevall1], [hamdevall2])


            print
            print "spam mean                    spam sdev"
            dumpdev(spamdev1, spamdev2)
            print
            print "spam mean and sdev for all runs"
            dumpdev([spamdevall1], [spamdevall2])

            print
            diff1 = spamdevall1[0] - hamdevall1[0]
            diff2 = spamdevall2[0] - hamdevall2[0]
            print "ham/spam mean difference: %2.2f %2.2f %+2.2f" % (diff1,
                                                                    diff2,
                                                                    diff2 - diff1)
        else:
            print "[info about ham & spam means & sdevs not available in both files]"

        sys.stdout.seek(0)
        sys.stderr.seek(0)
        out, err = sys.stdout, sys.stderr
        sys.stdout = cout
        sys.stderr = cerr
        return out, err

Example 45

Project: socorro
Source File: TestToolsUI.py
View license
    def compare(self, f1, f2):
        """This is essentially cmp.py from the testtools directory."""
        # XXX Stop being lazy and using the remapping cout/cerr cheat
        # XXX at some point.
        cout, cerr = sys.stdout, sys.stderr
        sys.stdout = StringIO.StringIO()
        sys.stderr = StringIO.StringIO()

        def suck(f):
            fns = []
            fps = []
            hamdev = []
            spamdev = []
            hamdevall = spamdevall = (0.0, 0.0)

            get = f.readline
            while 1:
                line = get()
                if line.startswith('-> <stat> tested'):
                    print line,
                if line.find(' items; mean ') != -1:
                    # -> <stat> Ham distribution for this pair: 1000 items; mean 0.05; sample sdev 0.68
                    # and later "sample " went away
                    vals = line.split(';')
                    mean = float(vals[1].split()[-1])
                    sdev = float(vals[2].split()[-1])
                    val = (mean, sdev)
                    typ = vals[0].split()[2]
                    if line.find('for all runs') != -1:
                        if typ == 'Ham':
                            hamdevall = val
                        else:
                            spamdevall = val
                    elif line.find('all in this') != -1:
                        if typ == 'Ham':
                            hamdev.append(val)
                        else:
                            spamdev.append(val)
                    continue
                if line.startswith('-> '):
                    continue
                if line.startswith('total'):
                    break
                if len(line) == 0:
                    continue
                # A line with an f-p rate and an f-n rate.
                p, n = map(float, line.split())
                fps.append(p)
                fns.append(n)

            # "total unique false pos 0"
            # "total unique false neg 0"
            # "average fp % 0.0"
            # "average fn % 0.0"
            fptot = int(line.split()[-1])
            fntot = int(get().split()[-1])
            fpmean = float(get().split()[-1])
            fnmean = float(get().split()[-1])
            return (fps, fns, fptot, fntot, fpmean, fnmean,
                    hamdev, spamdev, hamdevall, spamdevall)

        def tag(p1, p2):
            if p1 == p2:
                t = "tied          "
            else:
                t = p1 < p2 and "lost " or "won  "
                if p1:
                    p = (p2 - p1) * 100.0 / p1
                    t += " %+7.2f%%" % p
                else:
                    t += " +(was 0)"
            return t

        def mtag(m1, m2):
            mean1, dev1 = m1
            mean2, dev2 = m2
            t = "%7.2f %7.2f " % (mean1, mean2)
            if mean1:
                mp = (mean2 - mean1) * 100.0 / mean1
                t += "%+7.2f%%" % mp
            else:
                t += "+(was 0)"
            t += "     %7.2f %7.2f " % (dev1, dev2)
            if dev1:
                dp = (dev2 - dev1) * 100.0 / dev1
                t += "%+7.2f%%" % dp
            else:
                t += "+(was 0)"
            return t

        def dump(p1s, p2s):
            alltags = ""
            for p1, p2 in zip(p1s, p2s):
                t = tag(p1, p2)
                print "    %5.3f  %5.3f  %s" % (p1, p2, t)
                alltags += t + " "
            print
            for t in "won", "tied", "lost":
                print "%-4s %2d times" % (t, alltags.count(t))
            print

        def dumpdev(meandev1, meandev2):
            for m1, m2 in zip(meandev1, meandev2):
                print mtag(m1, m2)

        (fp1, fn1, fptot1, fntot1, fpmean1, fnmean1,
         hamdev1, spamdev1, hamdevall1, spamdevall1) = suck(f1)

        (fp2, fn2, fptot2, fntot2, fpmean2, fnmean2,
         hamdev2, spamdev2, hamdevall2, spamdevall2) = suck(f2)

        print
        print "false positive percentages"
        dump(fp1, fp2)
        print "total unique fp went from", fptot1, "to", fptot2, tag(fptot1, fptot2)
        print "mean fp % went from", fpmean1, "to", fpmean2, tag(fpmean1, fpmean2)

        print
        print "false negative percentages"
        dump(fn1, fn2)
        print "total unique fn went from", fntot1, "to", fntot2, tag(fntot1, fntot2)
        print "mean fn % went from", fnmean1, "to", fnmean2, tag(fnmean1, fnmean2)

        print
        if len(hamdev1) == len(hamdev2) and len(spamdev1) == len(spamdev2):
            print "ham mean                     ham sdev"
            dumpdev(hamdev1, hamdev2)
            print
            print "ham mean and sdev for all runs"
            dumpdev([hamdevall1], [hamdevall2])


            print
            print "spam mean                    spam sdev"
            dumpdev(spamdev1, spamdev2)
            print
            print "spam mean and sdev for all runs"
            dumpdev([spamdevall1], [spamdevall2])

            print
            diff1 = spamdevall1[0] - hamdevall1[0]
            diff2 = spamdevall2[0] - hamdevall2[0]
            print "ham/spam mean difference: %2.2f %2.2f %+2.2f" % (diff1,
                                                                    diff2,
                                                                    diff2 - diff1)
        else:
            print "[info about ham & spam means & sdevs not available in both files]"

        sys.stdout.seek(0)
        sys.stderr.seek(0)
        out, err = sys.stdout, sys.stderr
        sys.stdout = cout
        sys.stderr = cerr
        return out, err

Example 46

Project: mpi4py
Source File: run.py
View license
def main():
    """Entry-point for ``python -m mpi4py.run ...``."""
    # pylint: disable=missing-docstring
    import os
    import sys

    def version():
        from . import __version__
        print(__package__, __version__, file=sys.stdout)
        sys.exit(0)

    def usage(errmess=None):
        from textwrap import dedent
        if __name__ == '__main__':
            prog_name = __package__ + '.run'
        else:
            prog_name = __package__
        python_exe = os.path.basename(sys.executable)
        subs = dict(prog=prog_name, python=python_exe)

        cmdline = dedent("""
        usage: {python} -m {prog} [options] <pyfile> [arg] ...
           or: {python} -m {prog} [options] -m <mod> [arg] ...
           or: {python} -m {prog} [options] -c <cmd> [arg] ...
           or: {python} -m {prog} [options] - [arg] ...
        """).strip().format(**subs)

        helptip = dedent("""
        Try `{python} -m {prog} -h` for more information.
        """).strip().format(**subs)

        options = dedent("""
        options:
          --version            show version number and exit
          -h|--help            show this help message and exit
          -rc <key=value,...>  set 'mpi4py.rc.key=value'
          -p|--profile <pmpi>  use <pmpi> for profiling
          --mpe                profile with MPE
          --vt                 profile with VampirTrace
        """).strip()

        if errmess:
            print(errmess, file=sys.stderr)
            print(cmdline, file=sys.stderr)
            print(helptip, file=sys.stderr)
            sys.exit(1)
        else:
            print(cmdline, file=sys.stdout)
            print(options, file=sys.stdout)
            sys.exit(0)

    def parse_command_line(args=None):
        # pylint: disable=too-many-branches

        class Options(object):
            # pylint: disable=too-few-public-methods
            rc_args = {}
            profile = None

        def poparg(args):
            if len(args) < 2 or args[1].startswith('-'):
                usage('Argument expected for option: ' + args[0])
            return args.pop(1)

        options = Options()
        args = sys.argv[1:] if args is None else args[:]
        while args and args[0].startswith('-'):
            if args[0] in ('-m', '-c', '-'):
                break  # Stop processing options
            if args[0] in ('-h', '-help', '--help'):
                usage()  # Print help and exit
            if args[0] in ('-version', '--version'):
                version()  # Print version and exit
            try:
                arg0 = args[0]
                if arg0.startswith('--'):
                    if '=' in arg0:
                        i = arg0.index('=')
                        opt, arg = arg0[1:i], arg0[i+1:]
                        if opt in ('-rc', '-profile'):
                            arg0, args[1:1] = opt, [arg]
                    else:
                        arg0 = arg0[1:]
                if arg0 == '-rc':
                    for entry in poparg(args).split(','):
                        i = entry.index('=')
                        key = entry[:i].strip()
                        val = entry[i+1:].strip()
                        if not key or not val:
                            raise ValueError(entry)
                        try:
                            # pylint: disable=eval-used
                            options.rc_args[key] = eval(val, {})
                        except NameError:
                            options.rc_args[key] = val
                elif arg0 in ('-p', '-profile'):
                    options.profile = poparg(args) or None
                elif arg0 in ('-mpe', '-vt'):
                    options.profile = arg0[1:]
                else:
                    usage('Unknown option: ' + args[0])
                del args[0]
            except Exception:  # pylint: disable=broad-except
                # Bad option, print usage and exit with error
                usage('Cannot parse option: ' + args[0])
        # Check remaining args and return to caller
        if len(args) < 1:
            usage("No path specified for execution")
        elif args[0] in ('-m', '-c') and len(args) < 2:
            usage("Argument expected for option: " + args[0])
        return options, args

    def bootstrap(options):
        if options.rc_args:  # Set mpi4py.rc parameters
            from . import rc
            rc(**options.rc_args)
        if options.profile:  # Load profiling library
            from . import profile
            profile(options.profile)

    # Parse and process command line options
    options, args = parse_command_line()
    bootstrap(options)

    # Run user code. In case of an unhandled exception, abort
    # execution of the MPI program by calling 'MPI_Abort()'.
    try:
        run_command_line(args)
    except SystemExit as exc:
        set_abort_status(exc.code)
        raise
    except:
        set_abort_status(1)
        raise

Example 47

Project: mpi4py
Source File: run.py
View license
def main():
    """Entry-point for ``python -m mpi4py.run ...``."""
    # pylint: disable=missing-docstring
    import os
    import sys

    def version():
        from . import __version__
        print(__package__, __version__, file=sys.stdout)
        sys.exit(0)

    def usage(errmess=None):
        from textwrap import dedent
        if __name__ == '__main__':
            prog_name = __package__ + '.run'
        else:
            prog_name = __package__
        python_exe = os.path.basename(sys.executable)
        subs = dict(prog=prog_name, python=python_exe)

        cmdline = dedent("""
        usage: {python} -m {prog} [options] <pyfile> [arg] ...
           or: {python} -m {prog} [options] -m <mod> [arg] ...
           or: {python} -m {prog} [options] -c <cmd> [arg] ...
           or: {python} -m {prog} [options] - [arg] ...
        """).strip().format(**subs)

        helptip = dedent("""
        Try `{python} -m {prog} -h` for more information.
        """).strip().format(**subs)

        options = dedent("""
        options:
          --version            show version number and exit
          -h|--help            show this help message and exit
          -rc <key=value,...>  set 'mpi4py.rc.key=value'
          -p|--profile <pmpi>  use <pmpi> for profiling
          --mpe                profile with MPE
          --vt                 profile with VampirTrace
        """).strip()

        if errmess:
            print(errmess, file=sys.stderr)
            print(cmdline, file=sys.stderr)
            print(helptip, file=sys.stderr)
            sys.exit(1)
        else:
            print(cmdline, file=sys.stdout)
            print(options, file=sys.stdout)
            sys.exit(0)

    def parse_command_line(args=None):
        # pylint: disable=too-many-branches

        class Options(object):
            # pylint: disable=too-few-public-methods
            rc_args = {}
            profile = None

        def poparg(args):
            if len(args) < 2 or args[1].startswith('-'):
                usage('Argument expected for option: ' + args[0])
            return args.pop(1)

        options = Options()
        args = sys.argv[1:] if args is None else args[:]
        while args and args[0].startswith('-'):
            if args[0] in ('-m', '-c', '-'):
                break  # Stop processing options
            if args[0] in ('-h', '-help', '--help'):
                usage()  # Print help and exit
            if args[0] in ('-version', '--version'):
                version()  # Print version and exit
            try:
                arg0 = args[0]
                if arg0.startswith('--'):
                    if '=' in arg0:
                        i = arg0.index('=')
                        opt, arg = arg0[1:i], arg0[i+1:]
                        if opt in ('-rc', '-profile'):
                            arg0, args[1:1] = opt, [arg]
                    else:
                        arg0 = arg0[1:]
                if arg0 == '-rc':
                    for entry in poparg(args).split(','):
                        i = entry.index('=')
                        key = entry[:i].strip()
                        val = entry[i+1:].strip()
                        if not key or not val:
                            raise ValueError(entry)
                        try:
                            # pylint: disable=eval-used
                            options.rc_args[key] = eval(val, {})
                        except NameError:
                            options.rc_args[key] = val
                elif arg0 in ('-p', '-profile'):
                    options.profile = poparg(args) or None
                elif arg0 in ('-mpe', '-vt'):
                    options.profile = arg0[1:]
                else:
                    usage('Unknown option: ' + args[0])
                del args[0]
            except Exception:  # pylint: disable=broad-except
                # Bad option, print usage and exit with error
                usage('Cannot parse option: ' + args[0])
        # Check remaining args and return to caller
        if len(args) < 1:
            usage("No path specified for execution")
        elif args[0] in ('-m', '-c') and len(args) < 2:
            usage("Argument expected for option: " + args[0])
        return options, args

    def bootstrap(options):
        if options.rc_args:  # Set mpi4py.rc parameters
            from . import rc
            rc(**options.rc_args)
        if options.profile:  # Load profiling library
            from . import profile
            profile(options.profile)

    # Parse and process command line options
    options, args = parse_command_line()
    bootstrap(options)

    # Run user code. In case of an unhandled exception, abort
    # execution of the MPI program by calling 'MPI_Abort()'.
    try:
        run_command_line(args)
    except SystemExit as exc:
        set_abort_status(exc.code)
        raise
    except:
        set_abort_status(1)
        raise

Example 48

Project: msmbuilder-legacy
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
    """ Generate the rst file for a given example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%s.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                               'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if (not os.path.exists(first_image_file) or
                os.stat(first_image_file).st_mtime <=
                                    os.stat(src_file).st_mtime):
            # We need to execute the code
            print 'plotting %s' % fname
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                # get variables so we can later add links to the documentation
                example_code_obj = {}
                for var_name, var in my_globals.iteritems():
                    if not hasattr(var, '__module__'):
                        continue
                    if not isinstance(var.__module__, basestring):
                        continue
                    if var.__module__.split('.')[0] not in DOCMODULES:
                        continue

                    # get the type as a string with other things stripped
                    tstr = str(type(var))
                    tstr = (tstr[tstr.find('\'')
                            + 1:tstr.rfind('\'')].split('.')[-1])
                    # get shortened module name
                    module_short = get_short_module_name(var.__module__,
                                                         tstr)
                    cobj = {'name': tstr, 'module': var.__module__,
                            'module_short': module_short,
                            'obj_type': 'object'}
                    example_code_obj[var_name] = cobj

                # find functions so we can later add links to the documentation
                funregex = re.compile('[\w.]+\(')
                with open(src_file, 'rt') as fid:
                    for line in fid.readlines():
                        if line.startswith('#'):
                            continue
                        for match in funregex.findall(line):
                            fun_name = match[:-1]
                            try:
                                exec('this_fun = %s' % fun_name, my_globals)
                            except Exception as err:
                                print 'extracting function failed'
                                print err
                                continue
                            this_fun = my_globals['this_fun']
                            if not callable(this_fun):
                                continue
                            if not hasattr(this_fun, '__module__'):
                                continue
                            if not isinstance(this_fun.__module__, basestring):
                                continue
                            if (this_fun.__module__.split('.')[0]
                                    not in DOCMODULES):
                                continue

                            # get shortened module name
                            fun_name_short = fun_name.split('.')[-1]
                            module_short = get_short_module_name(
                                this_fun.__module__, fun_name_short)
                            cobj = {'name': fun_name_short,
                                    'module': this_fun.__module__,
                                    'module_short': module_short,
                                    'obj_type': 'function'}
                            example_code_obj[fun_name] = cobj
                fid.close()

                if len(example_code_obj) > 0:
                    # save the dictionary, so we can later add hyperlinks
                    codeobj_fname = example_file[:-3] + '_codeobj.pickle'
                    with open(codeobj_fname, 'wb') as fid:
                        cPickle.dump(example_code_obj, fid,
                                     cPickle.HIGHEST_PROTOCOL)
                    fid.close()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                                            my_globals['__doc__'],
                                            '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                for fig_num in (fig_mngr.num for fig_mngr in
                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_num)
                    plt.savefig(image_path % fig_num)
                    figure_list.append(image_fname % fig_num)
            except:
                print 80 * '_'
                print '%s is not compiling:' % fname
                traceback.print_exc()
                print 80 * '_'
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print " - time elapsed : %.2g sec" % time_elapsed
        else:
            figure_list = [f[len(image_dir):]
                            for f in glob.glob(image_path % '[1-9]')]
                            #for f in glob.glob(image_path % '*')]

        # generate thumb file
        this_template = plot_rst_template
        if os.path.exists(first_image_file):
            make_thumbnail(first_image_file, thumb_file, 200, 140)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

Example 49

Project: msmbuilder-legacy
Source File: gen_rst.py
View license
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
    """ Generate the rst file for a given example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%s.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                               'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    if plot_gallery and fname.startswith('plot'):
        # generate the plot as png image if file name
        # starts with plot and if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if (not os.path.exists(first_image_file) or
                os.stat(first_image_file).st_mtime <=
                                    os.stat(src_file).st_mtime):
            # We need to execute the code
            print 'plotting %s' % fname
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                # get variables so we can later add links to the documentation
                example_code_obj = {}
                for var_name, var in my_globals.iteritems():
                    if not hasattr(var, '__module__'):
                        continue
                    if not isinstance(var.__module__, basestring):
                        continue
                    if var.__module__.split('.')[0] not in DOCMODULES:
                        continue

                    # get the type as a string with other things stripped
                    tstr = str(type(var))
                    tstr = (tstr[tstr.find('\'')
                            + 1:tstr.rfind('\'')].split('.')[-1])
                    # get shortened module name
                    module_short = get_short_module_name(var.__module__,
                                                         tstr)
                    cobj = {'name': tstr, 'module': var.__module__,
                            'module_short': module_short,
                            'obj_type': 'object'}
                    example_code_obj[var_name] = cobj

                # find functions so we can later add links to the documentation
                funregex = re.compile('[\w.]+\(')
                with open(src_file, 'rt') as fid:
                    for line in fid.readlines():
                        if line.startswith('#'):
                            continue
                        for match in funregex.findall(line):
                            fun_name = match[:-1]
                            try:
                                exec('this_fun = %s' % fun_name, my_globals)
                            except Exception as err:
                                print 'extracting function failed'
                                print err
                                continue
                            this_fun = my_globals['this_fun']
                            if not callable(this_fun):
                                continue
                            if not hasattr(this_fun, '__module__'):
                                continue
                            if not isinstance(this_fun.__module__, basestring):
                                continue
                            if (this_fun.__module__.split('.')[0]
                                    not in DOCMODULES):
                                continue

                            # get shortened module name
                            fun_name_short = fun_name.split('.')[-1]
                            module_short = get_short_module_name(
                                this_fun.__module__, fun_name_short)
                            cobj = {'name': fun_name_short,
                                    'module': this_fun.__module__,
                                    'module_short': module_short,
                                    'obj_type': 'function'}
                            example_code_obj[fun_name] = cobj
                fid.close()

                if len(example_code_obj) > 0:
                    # save the dictionary, so we can later add hyperlinks
                    codeobj_fname = example_file[:-3] + '_codeobj.pickle'
                    with open(codeobj_fname, 'wb') as fid:
                        cPickle.dump(example_code_obj, fid,
                                     cPickle.HIGHEST_PROTOCOL)
                    fid.close()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                                            my_globals['__doc__'],
                                            '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                for fig_num in (fig_mngr.num for fig_mngr in
                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_num)
                    plt.savefig(image_path % fig_num)
                    figure_list.append(image_fname % fig_num)
            except:
                print 80 * '_'
                print '%s is not compiling:' % fname
                traceback.print_exc()
                print 80 * '_'
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print " - time elapsed : %.2g sec" % time_elapsed
        else:
            figure_list = [f[len(image_dir):]
                            for f in glob.glob(image_path % '[1-9]')]
                            #for f in glob.glob(image_path % '*')]

        # generate thumb file
        this_template = plot_rst_template
        if os.path.exists(first_image_file):
            make_thumbnail(first_image_file, thumb_file, 200, 140)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

Example 50

View license
    def run(self):
        self.res_fname = "result.txt"
        s1_conn_str = self.build_connection_string(self.server1)
        s2_conn_str = self.build_connection_string(self.server2)
        from_conn = "--server={0}".format(s1_conn_str)
        to_conn = "--server={0}".format(s2_conn_str)
        cmp_options = {"no_checksum_table": False,
                       "run_all_tests": True,
                       "quiet": True}
        cmd = ("mysqldbimport.py {0} --import=definitions "
               "{1}").format(to_conn, self.export_import_file)

        case_num = 1
        comment = "Test case {0} - help".format(case_num)
        cmd_opts = " --help"
        cmd_str = "{0} {1}".format(cmd, cmd_opts)
        res = self.run_test_case(0, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Remove version information
        self.remove_result_and_lines_after("MySQL Utilities mysqldbimport.py "
                                           "version", 6)

        # Now test the skips

        # Note: data and blobs must be done separately
        _SKIPS = ("grants", "events", "triggers", "views", "procedures",
                  "functions", "tables", "create_db")
        _FORMATS = ("CSV", "SQL")

        case_num += 1
        for frmt in _FORMATS:
            # Create an import file
            export_cmd = ("mysqldbexport.py {0} util_test --export=BOTH "
                          "--skip-gtid --format={1} --display=BRIEF > "
                          "{2}").format(from_conn, frmt,
                                        self.export_import_file)
            comment = "Generating import file"
            res = self.run_test_case(0, export_cmd, comment)
            if not res:
                raise MUTLibError("{0}: failed".format(comment))

            cmd_opts = "{0} --format={1} --skip=".format(cmd, frmt)
            for skip in _SKIPS:
                if case_num != 2 and case_num != 2 + len(_SKIPS):
                    cmd_opts = "{0},".format(cmd_opts)
                cmd_opts = "{0}{1}".format(cmd_opts, skip)
                comment = "Test case {0} - no {1}".format(case_num, skip)
                self.do_skip_test(cmd_opts, comment)
                case_num += 1

        # Now test --skip=data, --skip-blobs
        # Create an import file with blobs
        try:
            self.server1.exec_query("ALTER TABLE util_test.t3 "
                                    "ADD COLUMN me_blob BLOB")
            self.server1.exec_query("UPDATE util_test.t3 SET "
                                    "me_blob = 'This, is a BLOB!'")
        except UtilDBError as err:
            raise MUTLibError("Failed to add blob column: "
                              "{0}".format(err.errmsg))

        export_cmd = ("mysqldbexport.py {0} util_test --export=BOTH "
                      "--skip-gtid --format={1} --display=BRIEF > "
                      "{2} ").format(from_conn, "CSV", self.export_import_file)
        comment = "Generating import file"
        res = self.run_test_case(0, export_cmd, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # No skips for reference (must skip events for deterministic reasons
        cmd_str = ("mysqldbimport.py {0} {1} --import=both --dryrun "
                   "--format=CSV --bulk-insert "
                   "--skip=events").format(to_conn, self.export_import_file)
        comment = "Test case {0} - no {1}".format(case_num, "events")
        res = self.run_test_case(0, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        case_num += 1
        cmd_str = ("mysqldbimport.py {0} {1} --import=both --dryrun "
                   "--format=CSV --bulk-insert "
                   "--skip=events,"
                   "data".format(to_conn, self.export_import_file))
        comment = "Test case {0} - no {1}".format(case_num, "data")
        res = self.run_test_case(0, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        case_num += 1
        cmd_str = ("mysqldbimport.py {0} {1} --import=both --dryrun "
                   "--format=CSV --skip-blobs --bulk-insert "
                   "--skip=events").format(to_conn, self.export_import_file)
        comment = "Test case {0} - no {1}".format(case_num, "blobs")
        res = self.run_test_case(0, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Do a quiet import
        case_num += 1
        cmd_str = ("mysqldbimport.py {0} {1} --import=both --quiet "
                   "--format=CSV "
                   "--bulk-insert").format(to_conn, self.export_import_file)
        comment = "Test case {0} - no {1}".format(case_num, "messages (quiet)")
        res = self.run_test_case(0, cmd_str, comment)
        if not res:
            raise MUTLibError("{0}: failed".format(comment))

        # Import using multiprocessing.
        case_num += 1
        comment = "Test case {0} - multiprocessing.".format(case_num)
        import_opts = "--multiprocess=2"
        self.drop_db(self.server2, 'util_test')  # drop db before import.
        self.run_import_test(0, from_conn, to_conn, ['util_test'], "SQL",
                             "BOTH", comment, "", import_opts)

        # Import using autocommit.
        case_num += 1
        comment = "Test case {0} - autocommit.".format(case_num)
        import_opts = "--autocommit"
        self.drop_db(self.server2, 'util_test')  # drop db before import.
        self.run_import_test(0, from_conn, to_conn, ['util_test'], "SQL",
                             "BOTH", comment, "", import_opts)

        # Import multiple files at once
        # Test multiple formats and displays
        _FORMATS = ("SQL", "CSV", "TAB", "GRID", "VERTICAL")
        _DISPLAYS = ("BRIEF", "FULL")
        case_num += 1
        res = True
        database_list = ['util_test_fk', 'util_test_fk2', 'util_test_fk3']

        # Drop existing databases on both servers and load databases from
        # fkeys.sql into both server1 and server2, since they do not have
        # unsupported features (for formats other than sql) such as
        # auto-increment.
        self.server1.disable_foreign_key_checks(True)
        self.server2.disable_foreign_key_checks(True)
        self.drop_all()
        for db in database_list:
            self.drop_db(self.server1, db)
            self.drop_db(self.server2, db)

        # Load databases from fkeys.sql into both server1 and server2, since
        # they do not have unsupported csv features such as auto-increment.
        data_file = os.path.normpath("./std_data/fkeys.sql")
        try:
            self.server1.read_and_exec_SQL(data_file, self.debug)
            self.server2.read_and_exec_SQL(data_file, self.debug)
        except UtilError as err:
            raise MUTLibError("Failed to read commands from file "
                              "{0}: {1}".format(data_file, err.errmsg))
        self.server1.disable_foreign_key_checks(False)
        self.server2.disable_foreign_key_checks(False)
        for frmt in _FORMATS:
            for display in _DISPLAYS:
                comment = ("Test Case {0} : Testing multiple import with {1} "
                           "format and {2} display".format(case_num, frmt,
                                                           display))
                # We test DEFINITIONS and DATA separately in other tests
                self.run_import_test(
                    0, from_conn, to_conn, database_list,
                    frmt, "BOTH", comment, " --display={0}".format(display)
                )
                old_stdout = sys.stdout
                try:
                    # redirect stdout to prevent database_compare prints
                    # to reach the MUT output.
                    if not self.verbose:
                        sys.stdout = open(os.devnull, 'w')
                    # Test correctness of data
                    for database in database_list:
                        res = database_compare(s1_conn_str, s2_conn_str,
                                               database, database, cmp_options)
                        if not res:
                            break
                finally:
                    # restore stdout
                    if not self.verbose:
                        sys.stdout.close()
                    sys.stdout = old_stdout

                # Drop dbs from server2 to import them again from the
                # export file
                self.server2.disable_foreign_key_checks(True)
                for db in database_list:
                    self.drop_db(self.server2, db)
                self.server2.disable_foreign_key_checks(False)
                if not res:
                    raise MUTLibError("{0} failed".format(comment))

                case_num += 1

        # Mask multiprocessing warning.
        self.remove_result("# WARNING: Number of processes ")

        # Mask version
        self.replace_result(
            "MySQL Utilities mysqldbimport version",
            "MySQL Utilities mysqldbimport version X.Y.Z\n")

        return res