os.path.dirname

Here are the examples of the python api os.path.dirname taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: paramnormal
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 2

Project: tp-libvirt
Source File: virsh_vol_create.py
View license
def run(test, params, env):
    """
    Test virsh vol-create command to cover the following matrix:
    pool_type = [dir, fs, netfs]
    volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed,
                     vmdk, vpc]

    pool_type = [disk]
    volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm,
                     linux-raid, extended]

    pool_type = [logical]
    volume_format = [none]

    pool_type = [iscsi, scsi]
    Not supported with format type

    TODO:
    pool_type = [rbd, glusterfs]

    Reference: http://www.libvirt.org/storage.html
    """

    src_pool_type = params.get("src_pool_type")
    src_pool_target = params.get("src_pool_target")
    src_pool_format = params.get("src_pool_format", "")
    pool_vol_num = int(params.get("src_pool_vol_num", '1'))
    src_emulated_image = params.get("src_emulated_image")
    extra_option = params.get("extra_option", "")
    prefix_vol_name = params.get("vol_name", "vol_create_test")
    vol_format = params.get("vol_format", "raw")
    vol_capacity = params.get("vol_capacity", 1048576)
    vol_allocation = params.get("vol_allocation", 1048576)
    image_size = params.get("emulate_image_size", "1G")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    status_error = "yes" == params.get("status_error", "no")
    by_xml = "yes" == params.get("create_vol_by_xml", "yes")
    incomplete_target = "yes" == params.get("incomplete_target", "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if "--prealloc-metadata" in extra_option:
            raise error.TestNAError("metadata preallocation not supported in"
                                    " current libvirt version.")
        if incomplete_target:
            raise error.TestNAError("It does not support generate target "
                                    "path in thi libvirt version.")

    pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi']
    if src_pool_type not in pool_type:
        raise error.TestNAError("pool type %s not in supported type list: %s" %
                                (src_pool_type, pool_type))

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = 'testacl'

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    def post_process_vol(ori_vol_path):
        """
        Create or disactive a volume without libvirt

        :param ori_vol_path: Full path of an original volume
        :retur: Volume name for checking
        """
        process_vol_name = params.get("process_vol_name", "process_vol")
        process_vol_options = params.get("process_vol_options", "")
        process_vol_capacity = params.get("process_vol_capacity", vol_capacity)
        process_vol_cmd = ""
        unsupport_err = "Unsupport do '%s %s' in this test" % (process_vol_by,
                                                               process_vol_type)
        if process_vol_by == "lvcreate":
            process_vol_cmd = "lvcreate -L %s " % process_vol_capacity
            if process_vol_type == "thin":
                if not process_vol_options:
                    process_vol_options = "-T "
                process_vol_cmd += "%s " % process_vol_options
                processthin_pool_name = params.get("processthin_pool_name", "thinpool")
                processthin_vol_name = params.get("processthin_vol_name", "thinvol")
                process_vol_capacity = params.get("process_vol_capacity", "1G")
                os.path.dirname(ori_vol_path)
                process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path),
                                               processthin_pool_name)
                process_vol_cmd += "-V %s " % process_vol_capacity
                process_vol_cmd += "-n %s " % processthin_vol_name
                process_vol_name = processthin_vol_name
            elif process_vol_type == "snapshot":
                if not process_vol_options:
                    process_vol_options = "-s "
                process_vol_cmd += "%s " % process_vol_options
                process_vol_cmd += "-n %s " % process_vol_name
                process_vol_cmd += "%s " % (ori_vol_path)
            else:
                logging.error(unsupport_err)
                return
        elif process_vol_by == "qemu-img" and process_vol_type == "create":
            process_vol_cmd = "qemu-img create "
            process_vol_path = os.path.dirname(ori_vol_path) + "/"
            process_vol_path += process_vol_name
            process_vol_cmd += "%s " % process_vol_options
            process_vol_cmd += "%s " % process_vol_path
            process_vol_cmd += "%s " % process_vol_capacity
        elif process_vol_by == "lvchange" and process_vol_type == "deactivate":
            process_vol_cmd = "lvchange %s " % ori_vol_path
            if not process_vol_options:
                process_vol_options = "-an"
            process_vol_cmd += process_vol_options
        else:
            logging.error(unsupport_err)
            return
        rst = process.run(process_vol_cmd, ignore_status=True, shell=True)
        if rst.exit_status:
            if "Snapshots of snapshots are not supported" in rst.stderr:
                logging.debug("%s is already a snapshot volume", ori_vol_path)
                process_vol_name = os.path.basename(ori_vol_path)
            else:
                logging.error(rst.stderr)
                return
        return process_vol_name

    def check_vol(pool_name, vol_name, expect_exist=True):
        """
        Check volume vol_name in pool pool_name
        """
        src_volumes = src_pv.list_volumes().keys()
        logging.debug("Current volumes in %s: %s", pool_name, src_volumes)
        if expect_exist:
            if vol_name not in src_volumes:
                raise error.TestFail("Can't find volume %s in pool %s"
                                     % (vol_name, pool_name))
            # check format in volume xml
            post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name)
            logging.debug("Volume %s XML: %s" % (vol_name,
                                                 post_xml.xmltreefile))
            if 'format' in post_xml.keys() and vol_format is not None:
                if post_xml.format != vol_format:
                    raise error.TestFail("Volume format %s is not expected"
                                         % vol_format + " as defined.")
        else:
            if vol_name in src_volumes:
                raise error.TestFail("Find volume %s in pool %s, but expect not"
                                     % (vol_name, pool_name))

    fmt_err0 = "Unknown file format '%s'" % vol_format
    fmt_err1 = "Formatting or formatting option not "
    fmt_err1 += "supported for file format '%s'" % vol_format
    fmt_err2 = "Driver '%s' does not support " % vol_format
    fmt_err2 += "image creation"
    fmt_err_list = [fmt_err0, fmt_err1, fmt_err2]
    skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format
    vol_path_list = []
    try:
        # Create the src pool
        src_pool_name = "virt-%s-pool" % src_pool_type
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target,
                     src_emulated_image, image_size=image_size,
                     source_format=src_pool_format)

        src_pv = libvirt_storage.PoolVolume(src_pool_name)
        # Print current pools for debugging
        logging.debug("Current pools:%s",
                      libvirt_storage.StoragePool().list_pools())

        # Create volumes by virsh in a loop
        while pool_vol_num > 0:
            # Set volume xml file
            vol_name = prefix_vol_name + "_%s" % pool_vol_num
            pool_vol_num -= 1
            if by_xml:
                # According to BZ#1138523, we need inpect the right name
                # (disk partition) for new volume
                if src_pool_type == "disk":
                    vol_name = utlv.new_disk_vol_name(src_pool_name)
                    if vol_name is None:
                        raise error.TestError("Fail to generate volume name")
                vol_arg['name'] = vol_name
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']
                if params.get('setup_libvirt_polkit') == 'yes':
                    process.run("chmod 666 %s" % vol_xml, ignore_status=True,
                                shell=True)

                # Run virsh_vol_create to create vol
                logging.debug("Create volume from XML: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(
                    src_pool_name, vol_xml, extra_option,
                    unprivileged_user=unprivileged_user, uri=uri,
                    ignore_status=True, debug=True)
            else:
                # Run virsh_vol_create_as to create_vol
                cmd_result = virsh.vol_create_as(
                    vol_name, src_pool_name, vol_capacity, vol_allocation,
                    vol_format, unprivileged_user=unprivileged_user, uri=uri,
                    ignore_status=True, debug=True)
            # Check result
            try:
                utlv.check_exit_status(cmd_result, status_error)
                check_vol(src_pool_name, vol_name, not status_error)
                if not status_error:
                    vol_path = virsh.vol_path(vol_name,
                                              src_pool_name).stdout.strip()
                    logging.debug("Full path of %s: %s", vol_name, vol_path)
                    vol_path_list.append(vol_path)
            except error.TestFail, e:
                stderr = cmd_result.stderr
                if any(err in stderr for err in fmt_err_list):
                    raise error.TestNAError(skip_msg)
                else:
                    raise e
        # Post process vol by other programs
        process_vol_by = params.get("process_vol_by")
        process_vol_type = params.get("process_vol_type", "")
        expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes")
        if process_vol_by and vol_path_list:
            process_vol = post_process_vol(vol_path_list[0])
            if process_vol is not None:
                try:
                    virsh.pool_refresh(src_pool_name, ignore_status=False)
                    check_vol(src_pool_name, process_vol, expect_vol_exist)
                except (process.CmdError, error.TestFail), e:
                    if process_vol_type == "thin":
                        logging.error(e)
                        raise error.TestNAError("You may encounter bug BZ#1060287")
                    else:
                        raise e
            else:
                raise error.TestFail("Post process volume failed")
    finally:
        # Cleanup
        # For old version lvm2(2.02.106 or early), deactivate volume group
        # (destroy libvirt logical pool) will fail if which has deactivated
        # lv snapshot, so before destroy the pool, we need activate it manually
        if src_pool_type == 'logical' and vol_path_list:
            vg_name = vol_path_list[0].split('/')[2]
            process.run("lvchange -ay %s" % vg_name, shell=True)
        try:
            pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target,
                             src_emulated_image)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()

Example 3

Project: fb2mobi
Source File: config.py
View license
    def _load(self):
        config = etree.parse(self.config_file)
        for e in config.getroot():
            if e.tag == 'debug':
                self.debug = e.text.lower() == 'true'

            elif e.tag == 'logFile':
                if e.text:
                    self.original_log_file = e.text
                    self.log_file = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(self.config_file)), e.text))

            elif e.tag == 'logLevel':
                self.log_level = e.text

            elif e.tag == 'consoleLevel':
                self.console_level = e.text

            elif e.tag == 'outputFormat':
                self.output_format = e.text

            elif e.tag == 'kindleCompressionLevel':
                self.kindle_compression_level = int(e.text)

            elif e.tag == 'noDropcapsSymbols':
                self.no_dropcaps_symbols = e.text

            elif e.tag == 'transliterate':
                self.transliterate = e.text.lower() == 'true'

            elif e.tag == 'screenWidth':
                self.screen_width = int(e.text)

            elif e.tag == 'screenHeight':
                self.screen_height = int(e.text)

            elif e.tag == 'defaultProfile':
                self.default_profile = e.text

            elif e.tag == 'noMOBIoptimization':
                self.noMOBIoptimization = e.text.lower() == 'true'

            elif e.tag == 'sendToKindle':
                for s in e:
                    if s.tag == 'send':
                        self.send_to_kindle['send'] = s.text.lower() == 'true'

                    elif s.tag == 'deleteSendedBook':
                        self.send_to_kindle['deleteSendedBook'] = s.text.lower() == 'true'

                    elif s.tag == 'smtpServer':
                        self.send_to_kindle['smtpServer'] = s.text

                    elif s.tag == 'smtpPort':
                        self.send_to_kindle['smtpPort'] = int(s.text)

                    elif s.tag == 'smtpLogin':
                        self.send_to_kindle['smtpLogin'] = s.text

                    elif s.tag == 'smtpPassword':
                        self.send_to_kindle['smtpPassword'] = s.text

                    elif s.tag == 'fromUserEmail':
                        self.send_to_kindle['fromUserEmail'] = s.text

                    elif s.tag == 'toKindleEmail':
                        self.send_to_kindle['toKindleEmail'] = s.text

            elif e.tag == 'profiles':
                self.profiles = {}
                for prof in e:
                    prof_name = prof.attrib['name']
                    self.profiles[prof_name] = {}
                    self.profiles[prof_name]['name'] = prof.attrib['name']
                    self.profiles[prof_name]['description'] = prof.attrib['description']
                    self.profiles[prof_name]['vignettes'] = {}

                    self.profiles[prof_name]['generateTOCPage'] = True
                    self.profiles[prof_name]['generateAnnotationPage'] = True
                    self.profiles[prof_name]['generateOPFGuide'] = True
                    self.profiles[prof_name]['flatTOC'] = True
                    self.profiles[prof_name]['kindleRemovePersonalLabel'] = True
                    self.profiles[prof_name]['removePngTransparency'] = False

                    for p in prof:
                        if p.tag == 'hyphens':
                            self.profiles[prof_name]['hyphens'] = p.text.lower() == 'true'

                        elif p.tag == 'hyphensReplaceNBSP':
                            self.profiles[prof_name]['hyphensReplaceNBSP'] = p.text.lower() == 'true'

                        elif p.tag == 'dropcaps':
                            self.profiles[prof_name]['dropcaps'] = p.text

                        elif p.tag == 'outputFormat':
                            self.profiles[prof_name]['outputFormat'] = p.text

                        elif p.tag == 'transliterate':
                            self.profiles[prof_name]['transliterate'] = p.text.lower() == 'true'

                        elif p.tag == 'screenWidth':
                            self.profiles[prof_name]['screenWidth'] = int(p.text)

                        elif p.tag == 'screenHeight':
                            self.profiles[prof_name]['screenHeight'] = int(p.text)

                        elif p.tag == 'transliterateAuthorAndTitle':
                            self.profiles[prof_name]['transliterateAuthorAndTitle'] = p.text.lower() == 'true'

                        elif p.tag == 'tocMaxLevel':
                            self.profiles[prof_name]['tocMaxLevel'] = int(p.text)

                        elif p.tag == 'generateTOCPage':
                            self.profiles[prof_name]['generateTOCPage'] = p.text.lower() == 'true'

                        elif p.tag == 'flatTOC':
                            self.profiles[prof_name]['flatTOC'] = p.text.lower() == 'true'

                        elif p.tag == 'kindleRemovePersonalLabel':
                            self.profiles[prof_name]['kindleRemovePersonalLabel'] = p.text.lower() == 'true'

                        elif p.tag == 'removePngTransparency':
                            self.profiles[prof_name]['removePngTransparency'] = p.text.lower() == 'true'

                        elif p.tag == 'generateAnnotationPage':
                            self.profiles[prof_name]['generateAnnotationPage'] = p.text.lower() == 'true'

                        elif p.tag == 'generateOPFGuide':
                            self.profiles[prof_name]['generateOPFGuide'] = p.text.lower() == 'true'

                        elif p.tag == 'tocBeforeBody':
                            self.profiles[prof_name]['tocBeforeBody'] = p.text.lower() == 'true'

                        elif p.tag == 'css':
                            self.profiles[prof_name]['originalcss'] = p.text
                            self.profiles[prof_name]['css'] = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(self.config_file)), p.text))
                            if 'parse' in p.attrib:
                                self.profiles[prof_name]['parse_css'] = p.attrib['parse'].lower() == 'true'
                            else:
                                self.profiles[prof_name]['parse_css'] = True

                        elif p.tag == 'xslt':
                            self.profiles[prof_name]['xslt'] = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(self.config_file)), p.text))

                        elif p.tag == 'chapterOnNewPage':
                            self.profiles[prof_name]['chapterOnNewPage'] = p.text.lower() == 'true'

                        elif p.tag == 'authorFormat':
                            self.profiles[prof_name]['authorFormat'] = p.text

                        elif p.tag == 'bookTitleFormat':
                            self.profiles[prof_name]['bookTitleFormat'] = p.text

                        elif p.tag == 'annotationTitle':
                            self.profiles[prof_name]['annotationTitle'] = p.text

                        elif p.tag == 'tocTitle':
                            self.profiles[prof_name]['tocTitle'] = p.text

                        elif p.tag == 'notesMode':
                            self.profiles[prof_name]['notesMode'] = p.text

                        elif p.tag == 'notesBodies':
                            self.profiles[prof_name]['notesBodies'] = p.text

                        elif p.tag == 'vignettes':
                            self.profiles[prof_name]['vignettes'] = {}
                            self.profiles[prof_name]['vignettes_save'] = {}

                            for vignettes in p:
                                vignettes_level = vignettes.attrib['level']
                                vign_arr = {}
                                vign_arr_save = {}

                                for v in vignettes:
                                    vign_arr[v.tag] = None if v.text.lower() == 'none' else os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(self.config_file)), v.text))
                                    vign_arr_save[v.tag] = None if v.text.lower() == 'none' else v.text

                                self.profiles[prof_name]['vignettes'][vignettes_level] = vign_arr
                                self.profiles[prof_name]['vignettes_save'][vignettes_level] = vign_arr_save

Example 4

Project: scipy
Source File: test_solvers.py
View license
def test_solve_continuous_are():
    mat6 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                'data', 'carex_6_data.npz'))
    mat15 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_15_data.npz'))
    mat18 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_18_data.npz'))
    mat19 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_19_data.npz'))
    mat20 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                 'data', 'carex_20_data.npz'))
    cases = [
        # Carex examples taken from (with default parameters):
        # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
        #     Examples for the Numerical Solution of Algebraic Riccati
        #     Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
        #     Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
        #
        # The format of the data is (a, b, q, r, knownfailure), where
        # knownfailure is None if the test passes or a string
        # indicating the reason for failure.
        #
        # Test Case 0: carex #1
        (np.diag([1.], 1),
         np.array([[0], [1]]),
         block_diag(1., 2.),
         1,
         None),
        # Test Case 1: carex #2
        (np.array([[4, 3], [-4.5, -3.5]]),
         np.array([[1], [-1]]),
         np.array([[9, 6], [6, 4.]]),
         1,
         None),
        # Test Case 2: carex #3
        (np.array([[0, 1, 0, 0],
                   [0, -1.89, 0.39, -5.53],
                   [0, -0.034, -2.98, 2.43],
                   [0.034, -0.0011, -0.99, -0.21]]),
         np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
         np.array([[2.313, 2.727, 0.688, 0.023],
                   [2.727, 4.271, 1.148, 0.323],
                   [0.688, 1.148, 0.313, 0.102],
                   [0.023, 0.323, 0.102, 0.083]]),
         np.eye(2),
         None),
        # Test Case 3: carex #4
        (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
                   [0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
                   [0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
                   [0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
                   [0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
                   [0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
                   [0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
                   [0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
         np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
                   [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
                  ).T * 0.001,
         np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
                   [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
                   [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
                   [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
                   [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
                   [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
                   [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
                   [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
         np.eye(2),
         None),
        # Test Case 4: carex #5
        (np.array(
          [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
           [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
           [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
           [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
           [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
           [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
           [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
           [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
           [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
         np.array([[0.010, -0.011, -0.151],
                   [0.003, -0.021, 0.000],
                   [0.009, -0.059, 0.000],
                   [0.024, -0.162, 0.000],
                   [0.068, -0.445, 0.000],
                   [0.000, 0.000, 0.000],
                   [0.000, 0.000, 0.000],
                   [0.000, 0.000, 0.000],
                   [0.000, 0.000, 0.000]]),
         np.eye(9),
         np.eye(3),
         None),
        # Test Case 5: carex #6
        (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
        # Test Case 6: carex #7
        (np.array([[1, 0], [0, -2.]]),
         np.array([[1e-6], [0]]),
         np.ones((2, 2)),
         1.,
         'Bad residual accuracy'),
        # Test Case 7: carex #8
        (block_diag(-0.1, -0.02),
         np.array([[0.100, 0.000], [0.001, 0.010]]),
         np.array([[100, 1000], [1000, 10000]]),
         np.ones((2, 2)) + block_diag(1e-6, 0),
         None),
        # Test Case 8: carex #9
        (np.array([[0, 1e6], [0, 0]]),
         np.array([[0], [1.]]),
         np.eye(2),
         1.,
         None),
        # Test Case 9: carex #10
        (np.array([[1.0000001, 1], [1., 1.0000001]]),
         np.eye(2),
         np.eye(2),
         np.eye(2),
         None),
        # Test Case 10: carex #11
        (np.array([[3, 1.], [4, 2]]),
         np.array([[1], [1]]),
         np.array([[-11, -5], [-5, -2.]]),
         1.,
         None),
        # Test Case 11: carex #12
        (np.array([[7000000., 2000000., -0.],
                   [2000000., 6000000., -2000000.],
                   [0., -2000000., 5000000.]]) / 3,
         np.eye(3),
         np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
                np.diag([1e-6, 1, 1e6])).dot(
            np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
         np.eye(3) * 1e6,
         'Bad Residual Accuracy'),
        # Test Case 12: carex #13
        (np.array([[0, 0.4, 0, 0],
                   [0, 0, 0.345, 0],
                   [0, -0.524e6, -0.465e6, 0.262e6],
                   [0, 0, 0, -1e6]]),
         np.array([[0, 0, 0, 1e6]]).T,
         np.diag([1, 0, 1, 0]),
         1.,
         None),
        # Test Case 13: carex #14
        (np.array([[-1e-6, 1, 0, 0],
                   [-1, -1e-6, 0, 0],
                   [0, 0, 1e-6, 1],
                   [0, 0, -1, 1e-6]]),
         np.ones((4, 1)),
         np.ones((4, 4)),
         1.,
         None),
        # Test Case 14: carex #15
        (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
        # Test Case 15: carex #16
        (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
                 block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
         np.eye(64),
         np.eye(64),
         np.eye(64),
         None),
        # Test Case 16: carex #17
        (np.diag(np.ones((20, )), 1),
         np.flipud(np.eye(21, 1)),
         np.eye(21, 1) * np.eye(21, 1).T,
         1,
         'Bad Residual Accuracy'),
        # Test Case 17: carex #18
        (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
        # Test Case 18: carex #19
        (mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
         'Bad Residual Accuracy'),
        # Test Case 19: carex #20
        (mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
         'Bad Residual Accuracy')
        ]
    # Makes the minimum precision requirements customized to the test.
    # Here numbers represent the number of decimals that agrees with zero
    # matrix when the solution x is plugged in to the equation.
    #
    # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
    #
    # If the test is failing use "None" for that entry.
    #
    min_decimal = (14, 12, 14, 14, 11, 7, None, 5, 7, 14, 14,
                   None, 10, 14, 13, 14, None, 12, None, None)

    def _test_factory(case, dec):
        """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
        a, b, q, r, knownfailure = case
        if knownfailure:
            raise KnownFailureTest(knownfailure)

        x = solve_continuous_are(a, b, q, r)
        res = x.dot(a) + a.conj().T.dot(x) + q
        out_fact = x.dot(b)
        res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
        assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)

    for ind, case in enumerate(cases):
        yield _test_factory, case, min_decimal[ind]

Example 5

Project: cartopy
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    try:
        build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    except ValueError:
        # on Windows, relpath raises ValueError when path and start are on
        # different mounts/drives
        build_dir_link = build_dir
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 6

Project: sfs-python
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 7

Project: trackpy
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config

    options.setdefault('include-source', config.plot_include_source)

    # determine input
    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if arguments:
        if not config.plot_basedir:
            source_file_name = os.path.join(rst_dir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))
        code = open(source_file_name, 'r').read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if options.has_key('format'):
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = makefig(code, source_file_name, build_dir, output_base,
                          config)
        errors = []
    except PlotError, err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s: %s" % (output_base, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        opts = [':%s: %s' % (key, val) for key, val in options.items()
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        if sphinx.__version__ >= "0.6":
            only_html = ".. only:: html"
            only_latex = ".. only:: latex"
        else:
            only_html = ".. htmlonly::"
            only_latex = ".. latexonly::"

        if j == 0:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                shutil.copyfile(fn, os.path.join(dest_dir,
                                                 os.path.basename(fn)))

    # copy script (if necessary)
    if source_file_name == rst_file:
        target_name = os.path.join(dest_dir, output_base + source_ext)
        f = open(target_name, 'w')
        f.write(unescape_doctest(code))
        f.close()

    return errors

Example 8

Project: permute
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    formats = get_plot_formats(config)
    default_fmt = formats[0][0]

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    try:
        build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    except ValueError:
        # on Windows, relpath raises ValueError when path and start are on
        # different mounts/drives
        build_dir_link = build_dir
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            default_fmt=default_fmt,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and len(images),
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 9

Project: Subterfuge
Source File: views.py
View license
def conf(request, module):
      # Read in subterfuge.conf
   with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'r') as file:
      conf = file.readlines()

      # Subterfuge Settings Configuration
      # Edit subterfuge.conf
   if module == "settings":
         #Attack Setup
      try:
         setup.objects.update(iface = request.POST["iface"])
         conf[15] = request.POST["iface"] + "\n"
         print "Using Interface => " + request.POST["iface"]
      except:
         pass
      
      try:
         if request.POST["auto"] == "true":
            setup.objects.update(autoconf = "yes")
            conf[20] = "yes" + "\n"
            print "Auto Configure  => yes"
         else:
            setup.objects.update(autoconf = "no")
            conf[20] = "no" + "\n"
            print "Auto Configure  => no"
      except:
         pass
         
      try:
         setup.objects.update(gateway = request.POST["agw"])
         conf[17] = request.POST["agw"] + "\n"
         print "Using Gateway   => " + request.POST["agw"]
      except:
         pass

      try:
         setup.objects.update(proxymode = request.POST["proxymode"])
         print "Using Gateway   => " + request.POST["proxymode"]
      except:
         pass
         
      try:
         setup.objects.update(gateway = request.POST["mgw"])
         conf[17] = request.POST["mgw"] + "\n"
         print "Using Gateway   => " + request.POST["mgw"]
      except:
         pass

         #Get the Local IP Address
      try:
         f = os.popen("ifconfig " + request.POST["iface"] + " | grep \"inet addr\" | sed -e \'s/.*addr://;s/ .*//\'")
         temp2 = ''
         temp3 = ''
         temp = f.readline().rstrip('\n')
   
         ipaddress = re.findall(r'\d*.\d*.\d*.\d*', temp)[0]
         conf[26] = ipaddress + "\n"
         setup.objects.update(ip = ipaddress)
      except:
         pass
         
         
         #Configuration
      try:
         setup.objects.update(ploadrate = request.POST["ploadrate"])
         setup.objects.update(injectrate = request.POST["injectrate"])
         if request.POST["smartarp"] == "true":
            setup.objects.update(smartarp = "yes")
         elif request.POST["smartarp"] == "false":
            setup.objects.update(smartarp = "no")
         setup.objects.update(arprate = request.POST["arprate"])
      except:
         pass
      
      
         #Vectors
      try:
         if request.POST["active"] == "true":
            vectors.objects.filter(name = request.POST["vector"]).update(active = "yes")
         else:
            vectors.objects.filter(name = request.POST["vector"]).update(active = "no")
            
            #Wireless AP Generator Settings 
         if request.POST["vector"] == "ARP Cache Poisoning":
            arppoison.objects.update(target = request.POST["target"])
            arppoison.objects.update(method = request.POST["arpmethod"])
         
            #Wireless AP Generator Settings
         if request.POST["vector"] == "Wireless AP Generator":
            apgen.objects.update(essid = request.POST["essid"])
            apgen.objects.update(channel = request.POST["channel"])
            apgen.objects.update(atknic = request.POST["atknic"])
            apgen.objects.update(netnic = request.POST["netnic"])
      except:
         pass
         
         
         #Advanced
      try:
         scanip = request.POST["scantargetip"]
         print "Importing Nmap scan for: " + scanip
         
            #Get/Write Files
         if request.FILES['scanresults']:
            scanresults = request.FILES['scanresults']
            dest = open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'utilities/scans/' + scanip + '.xml', 'wb+')
            for chunk in scanresults.chunks():
               dest.write(chunk)
            dest.close()
               #Execute Scan
            os.system('python ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'utilities/scan.py ' + scanip)
            
               #Relay Template Variables
         return render_to_response("settings.ext", {
            "config"    :   config,
            "conf"      :   str(config[20]).rstrip('\n'),
            "iface"	   :   result,
            "gateway"   :   gw,
            "status"    :   status,
            "setup"     :   currentsetup,
         })    
      except:
         pass
         

   if module == "update":
      os.system('apt-get install subterfuge')

   if module == "exportcreds":
      os.system('python ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'exportcreds.py')

      #################################
      #Subterfuge Module Configurations
      #################################
      
   if module == "httpinjection":   
      httpcodeinjection(request, conf)
      
   elif module == "tunnelblock":   
      tunnelblock()
      
   else:
      for mod in installed.objects.all():
         if module == mod.name:
           os.system('python ' + str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'modules/' + module + '/' + module + '.py &')
   
              
      #################################
      #  END MODULE CONFIGURATION
      #################################
   
      # Write to subterfuge.conf
   with open(str(os.path.dirname(__file__)).rstrip("abcdefghijklmnnnopqrstruvwxyz") + 'subterfuge.conf', 'w') as file:
      file.writelines(conf)
      
      
      # Call Index Page
      # Check Arpspoof status
   command = "ps -A 1 | sed -e '/arpmitm/!d;/sed -e/d;s/^ //;s/ pts.*//'"
   a = os.popen(command)
   reply = a.read()
   if(len(reply)>1):
	   status = "on"
   else:
	   status = "off"
   
   if module == "httpinjection" or module == "tunnelblock":
            #Relay Template Variables
        modules = installed.objects.all()
        return render_to_response("plugins.ext", {
            "modules"   :   modules,
        })
   else:
         #Relay Template Variables
      return render_to_response(request.META['HTTP_REFERER'].split('/')[3] + ".ext", {
         "status"    :   status,
      })

Example 10

Project: icaro
Source File: icaro.py
View license
    def __init__(self, icaro_dir):

        # esta es la lista de donde se sacan los valores para los botones
        # icaro
        self.icaro_dir = icaro_dir
        arch = open(sys.path[0] + "/version", "r")
        version = arch.readline()

        creditos.Info.version = version
        self.carga_conf_ventana()
        # declaro la ventana principal
        # esta es la toolbar donde van los botones para cargar los datos
        # y compilar
        # declaro la tabla  donde van los botones para el menu de bloques
        # box1 es el contenedor principal despues de la ventana
        self.window1 = gtk.Window()

        titulo = "icaro " + version.strip("\n\r")
        self.window1.set_title(titulo)
        toolbar = gtk.Toolbar()
        self.area = gtk.DrawingArea()
        scrolled_window = gtk.ScrolledWindow()
        scrolled_window2 = gtk.ScrolledWindow()
        scrolled_window3 = gtk.ScrolledWindow()
        table = gtk.VBox(False, len(self.lista))
        notebook = gtk.Notebook()
        self.notebook2 = gtk.Notebook()
        hp = gtk.HPaned()
        box2 = gtk.HBox(False, 3)
        box1 = gtk.VBox(False, 3)
        menu_bar = gtk.MenuBar()

        # empaqueto todo
        # esta es la idea de usar un hpaned para poder achicar la pantalla
        #, en las netbook no entra todo
        self.window1.add(box1)
        box1.pack_start(menu_bar, False, True, 1)
        box1.pack_start(box2, True, True, 1)
        scrolled_window.add_with_viewport(self.area)
        scrolled_window3.add_with_viewport(toolbar)
        scrolled_window2.add_with_viewport(notebook)
        self.notebook2.append_page(scrolled_window, gtk.Label("bloques"))
        box2.pack_start(scrolled_window3, False, False, 1)
        box2.pack_start(hp, True, True, 1)
        hp.pack1(self.notebook2, True, True)
        hp.pack2(scrolled_window2, True, True)
        self.ver = visor.visor_codigo(self, self.notebook2)

        hp.set_position(500)
        self.window1.connect('delete-event', gtk.main_quit)
        self.window1.set_icon_from_file(
            sys.path[0] +
            "/imagenes/icaro.png"
        )
        self.area.set_app_paintable(True)
        self.area.set_size_request(800, 800)
        menu1 = [_("File"), _("Edit"), "herramientas"]
        menu_general = [
            (_("New"), _("Open"), _("Save"), _("Save as"),
             _("Save as function"), _("Examples"), _("Exit")),
            (_("Background"), _("Color"), _("About"), _("Config")),
            ("graficador", "clemente bulk", "clemente cdc"
             ,  _("Log"), "firmware",)
        ]
        menu_bar.show()
        # declaro los botones del menu 'menu'5 y 'edicion'
        for a in range(len(menu_general)):
            menu = gtk.Menu()
        # buf es donde se cargan todos los botones del menu
            for i in menu_general[a]:
                menu_items = gtk.MenuItem(i)
                menu.append(menu_items)
                menu_items.connect("activate", self.menuitem_response, i)
                menu_items.show()
            root_menu = gtk.MenuItem(menu1[a])
            root_menu.show()
            root_menu.set_submenu(menu)
            menu_bar.append(root_menu)

        # toolbar.append_item
        toolbar.set_style(gtk.TOOLBAR_BOTH_HORIZ)
        toolbar.set_orientation(gtk.ORIENTATION_VERTICAL)
        toolbar.show()

        # creo los botones de la toolbar
        botones_toolbar = [
            [1, toolbar, gtk.STOCK_NEW, "New",
             self.tooltip["nuevo"], self.nuevo, None],
            [1, toolbar, gtk.STOCK_OPEN, "Open",
             self.tooltip["abrir"], self.abrir, None],
            [1, toolbar, gtk.STOCK_SAVE, "Save",
             self.tooltip["guardar"], self.guardar, 0],
            [1, toolbar, gtk.STOCK_QUIT, "Quit",
             self.tooltip["salir"], self.salir, None],
            [3],
            [2, toolbar, sys.path[0] + "/imagenes/icaro.png",
             "Compile", self.tooltip["compilar"], self.compilar, None],
            [2, toolbar, sys.path[0] + "/imagenes/compilar.png",
             "Load", self.tooltip["cargar"], self.upload, None],
            [3],
            [2, toolbar, sys.path[0] + "/imagenes/tortucaro.png",
             "Tortucaro", self.tooltip["tortucaro"], self.comp_esp,
             "tortucaro/tortucaro"],

            [2, toolbar, sys.path[0] + "/imagenes/pilas.png",
             "pilas", self.tooltip["pilas"], self.comp_esp,
             "pilas/pilas-engine"],
            [2, toolbar, sys.path[0] + "/imagenes/icaroblue.png",
             "icaroblue", self.tooltip["icaroblue"], self.comp_esp,
             "icaroblue/icaroblue"],
            [3],
            [1, toolbar, gtk.STOCK_HELP, "Help",
             self.tooltip["ayuda"], self.ayuda, None],
            [3],
            [1, toolbar, gtk.STOCK_ADD, "Pen",
             self.tooltip["lapiz"], self.dibujo, 1],
            [1, toolbar, gtk.STOCK_SELECT_COLOR, "Move",
             self.tooltip["mover"], self.dibujo, 2],
            [1, toolbar, gtk.STOCK_DELETE, "Erase",
             self.tooltip["borrar"], self.dibujo, 3],
            [1, toolbar, gtk.STOCK_EDIT,
             "Edit", "", self.dibujo, 4],
            [3],
            [1, toolbar, gtk.STOCK_ZOOM_IN, "agrandar",
             "", self.menuitem_response, "zoomas"],
            [1, toolbar, gtk.STOCK_ZOOM_OUT, "achicar",
             "", self.menuitem_response, "zoomenos"],
            [1, toolbar, gtk.STOCK_ZOOM_100, "zoom 1:1",
             "", self.menuitem_response, "zoomcero"],
        ]

        # creo los botones de la toolbar en funcion de la tupla botonas_toolbar
        for dat in botones_toolbar:
            if dat[0] == 3:
                toolbar.append_space()
            if dat[0] == 1 or dat[0] == 2:
                self.crear_toolbuttons(
                    dat[0], dat[1], dat[2], dat[3], dat[4], dat[5], dat[6])

        scrolled_window.set_size_request(300, 300)
        scrolled_window.set_policy(gtk.POLICY_ALWAYS, gtk.POLICY_ALWAYS)
        scrolled_window.show()
        scrolled_window2.set_border_width(1)
        scrolled_window2.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        scrolled_window2.show()
        scrolled_window3.set_border_width(1)
        scrolled_window3.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        scrolled_window3.show()
        notebook.set_tab_pos(gtk.POS_RIGHT)
        label = gtk.Label(self.diccionario[self.lista[0]][1])
        notebook.append_page(table, label)
        button = gtk.RadioButton()
        ## aca cargo los datos de cada bloque ##
        for i in range(1, len(self.lista)):
            if self.diccionario[self.lista[i]][0] == "notebook":
                table = gtk.VBox(False, len(self.lista))
                label = gtk.Label(self.diccionario[self.lista[i]][1])
                notebook.append_page(table, label)
            else:
                self.diccionario[self.lista[i]][0]
                caja = self.imagen_boton(
                    self.diccionario[self.lista[i]][0],
                    self.diccionario[self.lista[i]][0]
                )
                button = gtk.RadioButton(button)
                button.set_tooltip_text(self.diccionario[self.lista[i]][6])
                button.add(caja)
                button.connect("clicked", self.botones, self.lista[i])
                table.pack_start(button, False, True, 0)
                button.show()

        # capturo los eventos del drawing area
        # menos el teclado que lo capturo desde la ventana principal
        self.area.add_events(gtk.gdk.BUTTON_PRESS_MASK)
        self.area.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
        self.area.add_events(gtk.gdk.POINTER_MOTION_MASK)
        self.window1.add_events(gtk.gdk.KEY_PRESS_MASK)
        self.window1.add_events(gtk.gdk.KEY_RELEASE_MASK)
        self.area.connect("button-press-event", self.buttonpress_cb)
        self.area.connect("button-release-event", self.buttonrelease_cb)
        self.area.connect("motion-notify-event", self.move_cb)
        self.area.connect("expose-event", self.expose)
        self.window1.connect("key_press_event", self.keypress_cb)
        self.window1.connect("key_release_event", self.keyrelease_cb)
        self.area.realize()
        display = self.area.window.get_display()
        pixbuf = gtk.gdk.pixbuf_new_from_file(
            os.path.abspath(os.path.dirname(__file__)) + "/imagenes/mouse/lapiz.png")
        lapiz = gtk.gdk.Cursor(display, pixbuf, 6, 18)
        self.cursores.append(lapiz)
        pixbuf = gtk.gdk.pixbuf_new_from_file(
            os.path.abspath(os.path.dirname(__file__)) + "/imagenes/mouse/puntero.png")
        puntero = gtk.gdk.Cursor(display, pixbuf, 6, 18)
        self.cursores.append(puntero)
        pixbuf = gtk.gdk.pixbuf_new_from_file(
            os.path.abspath(os.path.dirname(__file__)) + "/imagenes/mouse/borrar.png")
        borrar = gtk.gdk.Cursor(display, pixbuf, 6, 18)
        self.cursores.append(borrar)
        pixbuf = gtk.gdk.pixbuf_new_from_file(
            os.path.abspath(os.path.dirname(__file__)) + "/imagenes/mouse/edicion.png")
        edicion = gtk.gdk.Cursor(display, pixbuf, 6, 18)
        self.cursores.append(edicion)
        self.definir_cursor(1)

Example 11

Project: fontlab-scripts
Source File: convertToTTF.py
View license
def processFonts(fontsList):
    totalFonts = len(fontsList)

    print "%d fonts found:\n%s\n" % (totalFonts, '\n'.join(fontsList))

    setType1openPrefs()
    setTTgeneratePrefs()
    setTTautohintPrefs()

    fontIndex = 1
    for pfaPath in fontsList:

        # Make temporary encoding file from GOADB file. This step needs to
        # be done per font, because the directory tree selected may contain
        # more than one family, or because the glyph set of a given family
        # may not be the same for both Roman/Upright and Italic/Sloped.
        encPath = None
        goadbPath = None

        # The GOADB can be located in the same folder or up to two
        # levels above in the directory tree
        sameLevel = os.path.join(os.path.dirname(pfaPath), kGOADBfileName)
        oneUp = os.path.join(
            os.path.dirname(os.path.dirname(pfaPath)), kGOADBfileName)
        twoUp = os.path.join(
            os.path.dirname(os.path.dirname(os.path.dirname(pfaPath))), kGOADBfileName)

        if os.path.exists(sameLevel):
            goadbPath = sameLevel
        elif os.path.exists(oneUp):
            goadbPath = oneUp
        elif os.path.exists(twoUp):
            goadbPath = twoUp

        if goadbPath:
            encPath = makeTempEncFileFromGOADB(goadbPath)
        else:
            print "Could not find %s file." % kGOADBfileName
            print "Skipping %s" % pfaPath
            print

        if not encPath:
            continue

        # Checking if a derivedchars file exists.
        # If not, the dvInput step is skipped.
        makeDV = False

        for file in os.listdir(os.path.split(pfaPath)[0]):
            if re.search(r'derivedchars(.+?)?$', file) and dvModuleFound:
                makeDV = True

        fontIsTXT = False
        fontIsUFO = False

        if kFontTXT in pfaPath:
            fontIsTXT = True
            pfaPath = convertTXTfontToPFA(pfaPath)

        elif kFontUFO in pfaPath or (pfaPath[-4:].lower() in [".ufo"]):
            # Support more than just files named "font.ufo"
            fontIsUFO = True
            pfaPath = convertUFOfontToPFA(pfaPath)

        fl.Open(pfaPath)
        print "\nProcessing %s ... (%d/%d)" % (
            fl.font.font_name, fontIndex, totalFonts)
        fontIndex += 1

        fontZonesWereReplaced = replaceFontZonesByFamilyZones()
        baselineZonesWereRemoved = removeBottomZonesAboveBaseline()

        # NOTE: After making changes to the PostScript alignment zones, the TT
        # equivalents have to be updated as well, but I couldn't find a way
        # to do it via scripting (because TTH.top_zones and TTH.bottom_zones
        # are read-only, and despite that functionality being available in
        # the UI, there's no native function to update TT zones from T1 zones).
        # So the solution is to generate a new T1 font and open it back.
        pfaPathTemp = pfaPath.replace('.pfa', '_TEMP_.pfa')
        infPathTemp = pfaPathTemp.replace('.pfa', '.inf')
        if baselineZonesWereRemoved or fontZonesWereReplaced:
            fl.GenerateFont(eval("ftTYPE1ASCII"), pfaPathTemp)
            fl[fl.ifont].modified = 0
            fl.Close(fl.ifont)
            fl.Open(pfaPathTemp)
            if os.path.exists(infPathTemp):
                # Delete the .INF file (bug in FL v5.1.x)
                os.remove(infPathTemp)

        # Load encoding file
        fl.font.encoding.Load(encPath)

        # Make sure the Font window is in 'Names mode'
        fl.CallCommand(fl_cmd.FontModeNames)

        # Sort glyphs by encoding
        fl.CallCommand(fl_cmd.FontSortByCodepage)

        # read derivedchars file, make components
        if makeDV:
            dvInput_module.run(verbose=False)

        convertT1toTT()
        changeTTfontSettings()

        # Switch the Font window to 'Index mode'
        fl.CallCommand(fl_cmd.FontModeIndex)

        # path to the folder containing the font, and the font's file name
        folderPath, fontFileName = os.path.split(pfaPath)
        ppmsFilePath = os.path.join(folderPath, kPPMsFileName)
        if os.path.exists(ppmsFilePath):
            hPPMs, vPPMs = readPPMsFile(ppmsFilePath)
            replaceStemsAndPPMs(hPPMs, vPPMs)

        tthintsFilePath = os.path.join(folderPath, kTTHintsFileName)
        if os.path.exists(tthintsFilePath):
            inputTTHints.run(folderPath)
            # readTTHintsFile(tthintsFilePath)
            # replaceTTHints()

        # FontLab 5.1.5 Mac Build 5714 does NOT respect the unchecked
        # option "Automatically add .null, CR and space characters"
        for gName in ["NULL", "CR"]:
            gIndex = fl.font.FindGlyph(gName)
            if gIndex != -1:
                del fl.font.glyphs[gIndex]

        vfbPath = pfaPath.replace('.pfa', '.vfb')
        fl.Save(vfbPath)

        # The filename of the TT output is hardcoded
        ttfPath = os.path.join(folderPath, kFontTTF)
        fl.GenerateFont(eval("ftTRUETYPE"), ttfPath)

        fl[fl.ifont].modified = 0
        fl.Close(fl.ifont)

        # The TT font generated with FontLab ends up with a few glyph names
        # changed. Fix the glyph names so that makeOTF does not fail.
        postProccessTTF(ttfPath)

        # Delete temporary Encoding file:
        if os.path.exists(encPath):
            os.remove(encPath)

        # Delete temp PFA:
        if os.path.exists(pfaPathTemp):
            os.remove(pfaPathTemp)

        # Cleanup after processing from TXT type1 font or UFO font
        if fontIsTXT or fontIsUFO:
            if os.path.exists(pfaPath):
                os.remove(pfaPath)
            if os.path.exists(ttfPath):
                finalTTFpath = ttfPath.replace('_TEMP_.ttf', '.ttf')
                if finalTTFpath != ttfPath:
                    if PC:
                        os.remove(finalTTFpath)
                    os.rename(ttfPath, finalTTFpath)

            if os.path.exists(vfbPath):
                finalVFBpath = vfbPath.replace('_TEMP_.vfb', '.vfb')
                if finalVFBpath != vfbPath:
                    if PC and os.path.exists(finalVFBpath):
                        os.remove(finalVFBpath)
                    os.rename(vfbPath, finalVFBpath)

            # remove FontLab leftovers
            pfmPath = pfaPathTemp.replace('.pfa', '.pfm')
            afmPath = pfaPathTemp.replace('.pfa', '.afm')
            if os.path.exists(pfmPath):
                os.remove(pfmPath)
            if os.path.exists(afmPath):
                os.remove(afmPath)

Example 12

Project: ck-env
Source File: customize.py
View license
def setup(i):
    """
    Input:  {
              cfg              - meta of this soft entry
              self_cfg         - meta of module soft
              ck_kernel        - import CK kernel module (to reuse functions)

              host_os_uoa      - host OS UOA
              host_os_uid      - host OS UID
              host_os_dict     - host OS meta

              target_os_uoa    - target OS UOA
              target_os_uid    - target OS UID
              target_os_dict   - target OS meta

              target_device_id - target device ID (if via ADB)

              tags             - list of tags used to search this entry

              env              - updated environment vars from meta
              customize        - updated customize vars from meta

              deps             - resolved dependencies for this soft

              interactive      - if 'yes', can ask questions, otherwise quiet
            }

    Output: {
              return       - return code =  0, if successful
                                         >  0, if error
              (error)      - error text if return > 0

              bat          - prepared string for bat file
            }

    """

    import os

    # Get variables
    ck=i['ck_kernel']
    s=''

    iv=i.get('interactive','')

    env=i.get('env',{})
    cfg=i.get('cfg',{})
    deps=i.get('deps',{})
    tags=i.get('tags',[])
    cus=i.get('customize',{})

    hosd=i['host_os_dict']
    tosd=i['target_os_dict']

    hplat=hosd.get('ck_name','')
    hbits=hosd.get('bits','')
    tbits=tosd.get('bits','')

    target_d=i.get('target_os_dict',{})
    winh=hosd.get('windows_base','')
    win=target_d.get('windows_base','')
    remote=target_d.get('remote','')
    mingw=target_d.get('mingw','')
    tbits=target_d.get('bits','')

    eqis=hosd.get('env_quotes_if_space','')

    envp=cus.get('env_prefix','')
    pi=cus.get('path_install','')

    fp=cus.get('full_path','')

    ############################################################
    platform=target_d.get('android_ndk_platform','')
    if platform=='':
       return {'return':1, 'error':'platform is not defined in target OS'}

    ############################################################
    arch=target_d.get('android_ndk_arch','')
    if arch=='':
       return {'return':1, 'error':'platform architecture is not defined in target OS'}

    ############################################################
    abi=target_d.get('abi','')
    if arch=='':
       return {'return':1, 'error':'abi is not defined in target OS'}

    ############################################################
    atc=tosd.get('android_toolchain','')
    if atc=='':
       return {'return':1, 'error':'android_toolchain is not specified in target OS meta'}

    acp=tosd.get('android_compiler_prefix','')
    if acp=='':
       return {'return':1, 'error':'android_compiler_prefix is not specified in target OS meta'}

    env['CK_ANDROID_COMPILER_PREFIX']=acp
    env['CK_ANDROID_TOOLCHAIN']=atc
    env['CK_ANDROID_ABI']=abi
    env['CK_ANDROID_NDK_ARCH']=arch
    env['CK_ANDROID_NDK_PLATFORM']=platform

    # Check path
    ep=cus.get('env_prefix','')
    if fp!='':
       p1=os.path.dirname(fp)
       p2=os.path.dirname(p1)
       p3=os.path.dirname(p2)
       p4=os.path.dirname(p3)
       p5=os.path.dirname(p4)
       pi=os.path.dirname(p5)

       if winh=='yes':
          s+='\nset PATH='+pi+';%PATH%\n\n'
       else:
          s+='\nexport PATH='+pi+':$PATH\n\n'

       if ep!='':
          env[ep]=p2
          env[ep+'_BIN']=p1

       prebuilt=''
       if hplat=='win':
          if hbits=='64':
             prebuilt='windows-x86_64'
          else:
             prebuilt='windows-x86'
       else:
          if hbits=='64':
             prebuilt='linux-x86_64'
          else:
             prebuilt='linux-x86'

       cus['tool_prefix_configured']='yes'
       cus['tool_prefix']=acp+'-'
       cus['platform_path_configured']='yes'
       cus['platform_path']=os.path.join(pi,'platforms')
       cus['add_extra_path_configured']='yes'
       cus['add_extra_path']=os.path.join(pi,'prebuilt',prebuilt,'bin')

       cus['ef_configured']='yes'
       x=''
#       if arch=='arm64': 
       x='-fPIE -pie'
       cus['ef']=x

       j=p4.find(atc)
       if j>0:
          ver=p4[j+len(atc)+1:]

          cus['libstdcpppath_include_configured']='yes'
          cus['libstdcpppath_include']=os.path.join(pi,'sources','cxx-stl','gnu-libstdc++',ver,'include')

          cus['libstdcpppath_configured']='yes'
          cus['libstdcpppath']=os.path.join(pi,'sources','cxx-stl','gnu-libstdc++',ver,'libs',abi)

    env.update({
      "CK_AR": "$#tool_prefix#$ar", 
      "CK_ASM_EXT": ".s", 
      "CK_CC": "$#tool_prefix#$gcc", 
      "CK_COMPILER_FLAGS_OBLIGATORY": "", 
      "CK_COMPILER_FLAG_CPP11": "-std=c++11", 
      "CK_COMPILER_FLAG_CPP0X": "-std=c++0x", 
      "CK_COMPILER_FLAG_GPROF": "-pg", 
      "CK_COMPILER_FLAG_OPENMP": "-fopenmp", 
      "CK_COMPILER_FLAG_PLUGIN": "-fplugin=", 
      "CK_COMPILER_FLAG_PTHREAD_LIB": "-lpthread", 
      "CK_CXX": "$#tool_prefix#$g++", 
      "CK_OPT_ALL_WARNINGS": "-Wall", 
      "CK_DLL_EXT": ".so", 
      "CK_EXE_EXT": ".out", 
      "CK_EXTRA_LIB_DL": "-ldl", 
      "CK_EXTRA_LIB_M": "-lm", 
      "CK_FLAGS_CREATE_ASM": "-S", 
      "CK_FLAGS_CREATE_OBJ": "-c", 
      "CK_FLAGS_DLL": "-shared -fPIC", 
      "CK_FLAGS_DLL_EXTRA": "", 
      "CK_FLAGS_OUTPUT": "-o ", 
      "CK_FLAGS_STATIC_BIN": "-static -fPIC", 
      "CK_FLAGS_STATIC_LIB": "-fPIC", 
      "CK_FLAG_PREFIX_INCLUDE": "-I", 
      "CK_FLAG_PREFIX_LIB_DIR": "-L", 
      "CK_FLAG_PREFIX_VAR": "-D", 
      "CK_GPROF_OUT_FILE": "gmon.out", 
      "CK_LB": "$#tool_prefix#$ar rcs", 
      "CK_LB_OUTPUT": "", 
      "CK_LD": "$#tool_prefix#$ld", 
      "CK_LD_FLAGS_EXTRA": "", 
      "CK_LIB_EXT": ".a", 
      "CK_LINKER_FLAG_OPENMP": "-lgomp", 
      "CK_MAKE": "make", 
      "CK_OBJDUMP": "$#tool_prefix#$objdump -d", 
      "CK_OBJ_EXT": ".o", 
      "CK_OPT_SIZE": "-Os", 
      "CK_OPT_SPEED": "-O3", 
      "CK_OPT_SPEED_SAFE": "-O2", 
      "CK_PLUGIN_FLAG": "-fplugin=", 
      "CK_PROFILER": "gprof"
    })

    ############################################################
    # Ask a few more questions

    ############################################################
    prefix_configured=cus.get('tool_prefix_configured','')
    prefix=cus.get('tool_prefix','')
    if prefix_configured!='yes' and iv=='yes':
       if prefix!='':
          ck.out('Current compiler name prefix: '+prefix)
       else:
          ra=ck.inp({'text':'Enter compiler name prefix, if needed (such as aarch64-linux-android-): '})
          prefix=ra['string'].strip()
          cus['tool_prefix_configured']='yes'

    if prefix!='':
       env['CK_COMPILER_PREFIX']=prefix
       cus['tool_prefix']=prefix
       cus['tool_prefix_configured']='yes'

    for k in env:
        v=env[k]
        v=v.replace('$#tool_prefix#$',prefix)
        env[k]=v

    ############################################################
    extra_path_configured=cus.get('add_extra_path_configured','')
    extra_path=cus.get('add_extra_path','')
    if extra_path_configured!='yes' and iv=='yes':
       if extra_path!='':
          ck.out('Full path to pre-built Android tools: '+extra_path)
       else:
          ra=ck.inp({'text':'Enter full path to pre-built Android tools (such as ...prebuilt/linux-x86_64/bin) : '})
          extra_path=ra['string']
          cus['extra_path_configured']='yes'

    if extra_path!='':
       cus['add_extra_path']=extra_path
       cus['add_extra_path_configured']='yes'

    if extra_path!='':
       if winh=='yes':
          s+='\nset PATH='+extra_path+';%PATH%\n\n'
       else:
          s+='\nexport PATH='+extra_path+':$PATH\n\n'

    ############################################################
    platform_path_configured=cus.get('platform_path_configured','')
    platform_path=cus.get('platform_path','')
    if platform_path_configured!='yes' and iv=='yes':
       if platform_path!='':
          ck.out('Full path to directory with Android NDK platforms: '+platform_path)
       else:
          ra=ck.inp({'text':'Enter full path to directory with Android NDK platforms : '})
          platform_path=ra['string']
          cus['platform_path_configured']='yes'

    if platform_path=='':
       return {'return':1, 'error':'path to Android platforms is not defined'}

    cus['platform_path']=platform_path
    cus['platform_path_configured']='yes'

    ############################################################
    libstdcpppathi_configured=cus.get('libstdcpppath_include_configured','')
    libstdcpppathi=cus.get('libstdcpppath_include','')
    if libstdcpppathi_configured!='yes' and iv=='yes':
       if libstdcpppathi!='':
          ck.out('Full path to include directory with libstdc++: '+libstdcpppathi)
       else:
          ra=ck.inp({'text':'* If needed, enter full path to include directory with libstdc++ (such as ...sources/cxx-stl/gnu-libstdc++/4.9/include: '})
          libstdcpppathi=ra['string']
          cus['libstdcpppath_include_configured']='yes'

    cus['libstdcpppath_include']=libstdcpppathi
    env['CK_ENV_LIB_STDCPP_INCLUDE']=libstdcpppathi
    cus['libstdcpppath_include_configured']='yes'

    libstdcpppath_configured=cus.get('libstdcpppath_configured','')
    libstdcpppath=cus.get('libstdcpppath','')
    if libstdcpppath_configured!='yes' and iv=='yes':
       if libstdcpppath!='':
          ck.out('Full path to include directory with libstdc++: '+libstdcpppath)
       else:
          ra=ck.inp({'text':'* If needed, enter full path to lib directory with libstdc++ (such as ...sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a: '})
          libstdcpppath=ra['string']
          cus['libstdcpppath_configured']='yes'

    if winh=='yes':
       sep='\\'
    else:
       sep='/'

    cus['libstdcpppath']=libstdcpppath
    if libstdcpppath!='':
       env['CK_ENV_LIB_STDCPP_STATIC']=libstdcpppath+sep+'libgnustl_static.a'
       env['CK_ENV_LIB_STDCPP_DYNAMIC']=libstdcpppath+sep+'libgnustl_shared.so'
       env['CK_ENV_LIB_STDCPP_INCLUDE_EXTRA']=libstdcpppath+sep+'include'
    else:
       env['CK_ENV_LIB_STDCPP_STATIC']=''
       env['CK_ENV_LIB_STDCPP_DYNAMIC']=''
       env['CK_ENV_LIB_STDCPP_INCLUDE_EXTRA']=''
    cus['libstdcpppath_configured']='yes'

    ############################################################
    ef_configured=cus.get('ef_configured','')
    ef=cus.get('ef','')
    if ef_configured!='yes' and iv=='yes':
       ra=ck.inp({'text':'Force extra flags, if needed (such as -fPIE -pie for aarch64): '})
       ef=ra['string']
       cus['ef']=ef
       cus['ef_configured']='yes'

    ##############
    if winh=='yes':
       psysroot=platform_path+'\\'+platform+'\\arch-'+arch
    else:
       psysroot=platform_path+'/'+platform+'/arch-'+arch
    sysroot='--sysroot "'+psysroot+'"'

    env['CK_SYS_ROOT']=psysroot

    x=env.get('CK_COMPILER_FLAGS_OBLIGATORY','')
    if sysroot not in x:
       x=sysroot+' '+x
    env['CK_COMPILER_FLAGS_OBLIGATORY']=x

    if ef!='':
       x=env['CK_CC']
       if x.find(ef)<0:
          x=eqis+x+' '+ef+eqis
       env['CK_CC']=x

       x=env['CK_CXX']
       if x.find(ef)<0:
          x=eqis+x+' '+ef+eqis
       env['CK_CXX']=x

    if pi!='':
       env['CK_ANDROID_NDK_ROOT_DIR']=pi

#    x=env.get('CK_LD_FLAGS_EXTRA','')
#    if sysroot not in x:
#       x=sysroot+' '+x
#    env['CK_LD_FLAGS_EXTRA']=x

    return {'return':0, 'bat':s, 'env':env, 'tags':tags, 'cus':cus}

Example 13

Project: python-documentcloud
Source File: test_all.py
View license
    def test_private_actions(self):
        """
        Test all the stuff that requires a login.
        """
        # Get an editable document
        obj_id = self.get_editable_document(self.version)
        obj = self.private_client.documents.get(obj_id)

        # Make sure `data` attribute will only accept a dictionary.
        obj.data = dict(foo='bar')
        self.assertRaises(TypeError, obj.set_data, "string")
        self.assertRaises(TypeError, obj.set_data, 666)
        self.assertRaises(TypeError, obj.set_data, obj)

        # Test whether we can put random noise to all the editable fields.
        title = get_random_string()
        source = get_random_string()
        description = get_random_string()
        data = {get_random_string(): get_random_string()}
        if obj.resources.related_article == 'http://documents.latimes.com':
            related_article = 'http://documentcloud.org'
        else:
            related_article = 'http://documents.latimes.com'
        if obj.resources.published_url == 'http://documents.latimes.com':
            published_url = 'http://documentcloud.org'
        else:
            published_url = 'http://documents.latimes.com'
        obj.title = title
        obj.source = source
        obj.description = description
        obj.data = data
        obj.resources.related_article = related_article
        obj.resources.published_url = published_url

        # Save the changes up to DocumentCloud
        obj.put()

        # Pull the object again and verify the changes stuck
        obj = self.private_client.documents.get(obj_id)
        self.assertEqual(obj.title, title)
        self.assertEqual(obj.source, source)
        self.assertEqual(obj.description, description)
        self.assertEqual(obj.data, data)
        self.assertEqual(obj.resources.related_article, related_article)
        self.assertEqual(obj.resources.published_url, published_url)

        # Test reserved namespaces to make sure they're protected
        black_list = [
            'person', 'organization', 'place', 'term', 'email', 'phone',
            'city', 'state', 'country', 'title', 'description', 'source',
            'account', 'group', 'project', 'projectid', 'document', 'access',
            'filter',
        ]
        for key in black_list:
            self.assertRaises(ValueError, setattr, obj, "data", {key: 'foo'})
        obj.data = dict(boom='bap')

        # Test to make sure non-strings can't get into the data dictionary
        with self.assertRaises(TypeError):
            obj.data = dict(a=1)

        with self.assertRaises(TypeError):
            obj.data = {1: 'a'}

        obj.data = dict(boom='bap')
        with self.assertRaises(TypeError):
            obj.data[1] = 2

        # Resources
        self.assertEqual(obj.published_url, obj.resources.published_url)
        self.assertEqual(obj.related_article, obj.resources.related_article)

        # And their shortcuts
        obj.published_url = 'http://latimes.com'
        obj.related_article = 'http://palewi.re'
        self.assertEqual(obj.published_url, obj.resources.published_url)
        self.assertEqual(obj.related_article, obj.resources.related_article)

        # Test whether the save method properly aliases `put`.
        title = get_random_string()
        obj.title = title
        obj.save()
        obj = self.private_client.documents.get(obj_id)
        self.assertEqual(obj.title, title)

        # Test whether you can save an attribute with some weird encoding
        before_title = copy(obj.title)
        before_description = copy(obj.description)
        obj.title = random.choice(list(PANGRAMS.keys()))
        obj.description = random.choice(list(PANGRAMS.keys()))
        obj.put()
        obj.title = before_title
        obj.description = before_description
        obj.put()

        # Upload
        title = get_random_string()
        obj = self.private_client.documents.upload(
            os.path.join(os.path.dirname(__file__), "test.pdf"),
            title,
            description='Blah blah',
            related_article='http://www.latimes.com',
            data=dict(like_this='like+that', boom='bap'),
        )
        self.assertTrue(isinstance(obj, Document))
        self.assertEqual(obj.title, title)
        self.assertEqual(obj.description, 'Blah blah')
        self.assertEqual(obj.related_article, 'http://www.latimes.com')
        self.assertEqual(
            obj.data,
            {u'like_this': u'like+that', u'boom': u'bap'}
        )

        # Delete
        obj.delete()
        self.assertRaises(
            DoesNotExistError,
            self.private_client.documents.get,
            obj.id
        )

        # Test upload with bad keyword
        title = '001 - Test upload (%s)' % get_random_string()
        self.assertRaises(
            ValueError,
            self.private_client.documents.upload,
            os.path.join(os.path.dirname(__file__), "test.pdf"),
            title,
            description='Blah blah',
            related_article='http://www.latimes.com',
            # Access is an reserved keyword so this should fail
            data=dict(access='this', boom='bap'),
        )

        # Upload with a file object, not a path
        title = get_random_string()
        obj = self.private_client.documents.upload(
            open(os.path.join(os.path.dirname(__file__), "test.pdf"), "rb"),
            title,
        )
        self.assertTrue(isinstance(obj, Document))
        self.assertEqual(obj.title, title)
        obj.delete()

        # Ensure that documents with non-english characters can be uploaded
        pdf = os.path.join(os.path.dirname(__file__), "español.pdf")
        obj = self.private_client.documents.upload(open(pdf, 'rb'))
        self.assertTrue(isinstance(obj, Document))
        obj.delete()

        # Test virtual file upload and delete
        path = os.path.join(os.path.dirname(__file__), "español.pdf")
        real_file = open(path, 'rb')
        if six.PY3:
            virtual_file = io.BytesIO(real_file.read())
        else:
            virtual_file = io.StringIO(real_file.read())
        obj = self.private_client.documents.upload(
            virtual_file,
            title='Espanola!'
        )
        self.assertTrue(isinstance(obj, Document))
        obj.delete()

        # Test secure upload
        title = get_random_string()
        obj = self.private_client.documents.upload(
            os.path.join(os.path.dirname(__file__), "test.pdf"),
            title,
            secure=True,
        )
        self.assertTrue(isinstance(obj, Document))
        obj.delete()

        # Upload everything in this directory.
        obj_list = self.private_client.documents.upload_directory(
            './',
            source='Los Angeles Times',
            published_url='http://www.latimes.com',
        )
        self.assertEqual(len(obj_list), 2)
        self.assertTrue(isinstance(obj_list[0], Document))
        self.assertEqual(obj_list[0].source, 'Los Angeles Times')
        self.assertEqual(obj_list[0].published_url, 'http://www.latimes.com')
        [i.delete() for i in obj_list]

        # Test URL upload
        url = 'http://ord.legistar.com/Chicago/attachments/e3a0cbcb-044d-4ec3-9848-23c5692b1943.pdf'
        obj = self.private_client.documents.upload(url)
        obj.delete()

Example 14

Project: eofs
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = options.has_key('nofigs')

    options.setdefault('include-source', config.plot_include_source)
    context = options.has_key('context')

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with open(source_file_name, 'r') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if options.has_key('format'):
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code, source_file_name, build_dir, output_base,
                                 context, function_name, config)
        errors = []
    except PlotError, err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in options.items()
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"

        if j == 0:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with open(target_name, 'w') as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 15

Project: windspharm
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = options.has_key('nofigs')

    options.setdefault('include-source', config.plot_include_source)
    context = options.has_key('context')

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with open(source_file_name, 'r') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if options.has_key('format'):
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code, source_file_name, build_dir, output_base,
                                 context, function_name, config)
        errors = []
    except PlotError, err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in options.items()
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"

        if j == 0:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with open(target_name, 'w') as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 16

Project: fdroidserver
Source File: init.py
View license
def main():

    global options, config

    # Parse command line...
    parser = ArgumentParser()
    common.setup_global_opts(parser)
    parser.add_argument("-d", "--distinguished-name", default=None,
                        help="X.509 'Distiguished Name' used when generating keys")
    parser.add_argument("--keystore", default=None,
                        help="Path to the keystore for the repo signing key")
    parser.add_argument("--repo-keyalias", default=None,
                        help="Alias of the repo signing key in the keystore")
    parser.add_argument("--android-home", default=None,
                        help="Path to the Android SDK (sometimes set in ANDROID_HOME)")
    parser.add_argument("--no-prompt", action="store_true", default=False,
                        help="Do not prompt for Android SDK path, just fail")
    options = parser.parse_args()

    # find root install prefix
    tmp = os.path.dirname(sys.argv[0])
    examplesdir = None
    if os.path.basename(tmp) == 'bin':
        egg_link = os.path.join(tmp, '..', 'local/lib/python2.7/site-packages/fdroidserver.egg-link')
        if os.path.exists(egg_link):
            # installed from local git repo
            examplesdir = os.path.join(open(egg_link).readline().rstrip(), 'examples')
        else:
            # try .egg layout
            examplesdir = os.path.dirname(os.path.dirname(__file__)) + '/share/doc/fdroidserver/examples'
            if not os.path.exists(examplesdir):  # use UNIX layout
                examplesdir = os.path.dirname(tmp) + '/share/doc/fdroidserver/examples'
    else:
        # we're running straight out of the git repo
        prefix = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
        examplesdir = prefix + '/examples'

    aapt = None
    fdroiddir = os.getcwd()
    test_config = dict()
    common.fill_config_defaults(test_config)

    # track down where the Android SDK is, the default is to use the path set
    # in ANDROID_HOME if that exists, otherwise None
    if options.android_home is not None:
        test_config['sdk_path'] = options.android_home
    elif not common.test_sdk_exists(test_config):
        if os.path.isfile('/usr/bin/aapt'):
            # remove sdk_path and build_tools, they are not required
            test_config.pop('sdk_path', None)
            test_config.pop('build_tools', None)
            # make sure at least aapt is found, since this can't do anything without it
            test_config['aapt'] = common.find_sdk_tools_cmd('aapt')
        else:
            # if neither --android-home nor the default sdk_path exist, prompt the user
            default_sdk_path = '/opt/android-sdk'
            if sys.platform == 'win32' or sys.platform == 'cygwin':
                default_sdk_path = os.path.join(os.getenv('USERPROFILE'),
                                                'AppData', 'Local', 'Android', 'android-sdk')
            while not options.no_prompt:
                try:
                    s = input('Enter the path to the Android SDK ('
                              + default_sdk_path + ') here:\n> ')
                except KeyboardInterrupt:
                    print('')
                    sys.exit(1)
                if re.match('^\s*$', s) is not None:
                    test_config['sdk_path'] = default_sdk_path
                else:
                    test_config['sdk_path'] = s
                if common.test_sdk_exists(test_config):
                    break
    if not common.test_sdk_exists(test_config):
        sys.exit(3)

    if not os.path.exists('config.py'):
        # 'metadata' and 'tmp' are created in fdroid
        if not os.path.exists('repo'):
            os.mkdir('repo')
        shutil.copy(os.path.join(examplesdir, 'fdroid-icon.png'), fdroiddir)
        shutil.copyfile(os.path.join(examplesdir, 'config.py'), 'config.py')
        os.chmod('config.py', 0o0600)
        # If android_home is None, test_config['sdk_path'] will be used and
        # "$ANDROID_HOME" may be used if the env var is set up correctly.
        # If android_home is not None, the path given from the command line
        # will be directly written in the config.
        if 'sdk_path' in test_config:
            common.write_to_config(test_config, 'sdk_path', options.android_home)
    else:
        logging.warn('Looks like this is already an F-Droid repo, cowardly refusing to overwrite it...')
        logging.info('Try running `fdroid init` in an empty directory.')
        sys.exit()

    if 'aapt' not in test_config or not os.path.isfile(test_config['aapt']):
        # try to find a working aapt, in all the recent possible paths
        build_tools = os.path.join(test_config['sdk_path'], 'build-tools')
        aaptdirs = []
        aaptdirs.append(os.path.join(build_tools, test_config['build_tools']))
        aaptdirs.append(build_tools)
        for f in os.listdir(build_tools):
            if os.path.isdir(os.path.join(build_tools, f)):
                aaptdirs.append(os.path.join(build_tools, f))
        for d in sorted(aaptdirs, reverse=True):
            if os.path.isfile(os.path.join(d, 'aapt')):
                aapt = os.path.join(d, 'aapt')
                break
        if os.path.isfile(aapt):
            dirname = os.path.basename(os.path.dirname(aapt))
            if dirname == 'build-tools':
                # this is the old layout, before versioned build-tools
                test_config['build_tools'] = ''
            else:
                test_config['build_tools'] = dirname
            common.write_to_config(test_config, 'build_tools')
        common.ensure_build_tools_exists(test_config)

    # now that we have a local config.py, read configuration...
    config = common.read_config(options)

    # the NDK is optional and there may be multiple versions of it, so it's
    # left for the user to configure

    # find or generate the keystore for the repo signing key. First try the
    # path written in the default config.py.  Then check if the user has
    # specified a path from the command line, which will trump all others.
    # Otherwise, create ~/.local/share/fdroidserver and stick it in there.  If
    # keystore is set to NONE, that means that Java will look for keys in a
    # Hardware Security Module aka Smartcard.
    keystore = config['keystore']
    if options.keystore:
        keystore = os.path.abspath(options.keystore)
        if options.keystore == 'NONE':
            keystore = options.keystore
        else:
            keystore = os.path.abspath(options.keystore)
            if not os.path.exists(keystore):
                logging.info('"' + keystore
                             + '" does not exist, creating a new keystore there.')
    common.write_to_config(test_config, 'keystore', keystore)
    repo_keyalias = None
    if options.repo_keyalias:
        repo_keyalias = options.repo_keyalias
        common.write_to_config(test_config, 'repo_keyalias', repo_keyalias)
    if options.distinguished_name:
        keydname = options.distinguished_name
        common.write_to_config(test_config, 'keydname', keydname)
    if keystore == 'NONE':  # we're using a smartcard
        common.write_to_config(test_config, 'repo_keyalias', '1')  # seems to be the default
        disable_in_config('keypass', 'never used with smartcard')
        common.write_to_config(test_config, 'smartcardoptions',
                               ('-storetype PKCS11 -providerName SunPKCS11-OpenSC '
                                + '-providerClass sun.security.pkcs11.SunPKCS11 '
                                + '-providerArg opensc-fdroid.cfg'))
        # find opensc-pkcs11.so
        if not os.path.exists('opensc-fdroid.cfg'):
            if os.path.exists('/usr/lib/opensc-pkcs11.so'):
                opensc_so = '/usr/lib/opensc-pkcs11.so'
            elif os.path.exists('/usr/lib64/opensc-pkcs11.so'):
                opensc_so = '/usr/lib64/opensc-pkcs11.so'
            else:
                files = glob.glob('/usr/lib/' + os.uname()[4] + '-*-gnu/opensc-pkcs11.so')
                if len(files) > 0:
                    opensc_so = files[0]
                else:
                    opensc_so = '/usr/lib/opensc-pkcs11.so'
                    logging.warn('No OpenSC PKCS#11 module found, ' +
                                 'install OpenSC then edit "opensc-fdroid.cfg"!')
            with open(os.path.join(examplesdir, 'opensc-fdroid.cfg'), 'r') as f:
                opensc_fdroid = f.read()
            opensc_fdroid = re.sub('^library.*', 'library = ' + opensc_so, opensc_fdroid,
                                   flags=re.MULTILINE)
            with open('opensc-fdroid.cfg', 'w') as f:
                f.write(opensc_fdroid)
    elif not os.path.exists(keystore):
        password = common.genpassword()
        c = dict(test_config)
        c['keystorepass'] = password
        c['keypass'] = password
        c['repo_keyalias'] = socket.getfqdn()
        c['keydname'] = 'CN=' + c['repo_keyalias'] + ', OU=F-Droid'
        common.write_to_config(test_config, 'keystorepass', password)
        common.write_to_config(test_config, 'keypass', password)
        common.write_to_config(test_config, 'repo_keyalias', c['repo_keyalias'])
        common.write_to_config(test_config, 'keydname', c['keydname'])
        common.genkeystore(c)

    logging.info('Built repo based in "' + fdroiddir + '"')
    logging.info('with this config:')
    logging.info('  Android SDK:\t\t\t' + config['sdk_path'])
    if aapt:
        logging.info('  Android SDK Build Tools:\t' + os.path.dirname(aapt))
    logging.info('  Android NDK r12b (optional):\t$ANDROID_NDK')
    logging.info('  Keystore for signing key:\t' + keystore)
    if repo_keyalias is not None:
        logging.info('  Alias for key in store:\t' + repo_keyalias)
    logging.info('\nTo complete the setup, add your APKs to "' +
                 os.path.join(fdroiddir, 'repo') + '"' + '''
then run "fdroid update -c; fdroid update".  You might also want to edit
"config.py" to set the URL, repo name, and more.  You should also set up
a signing key (a temporary one might have been automatically generated).

For more info: https://f-droid.org/manual/fdroid.html#Simple-Binary-Repository
and https://f-droid.org/manual/fdroid.html#Signing
''')

Example 17

Project: ganga
Source File: AthenaLocalRTHandler.py
View license
    def master_prepare( self, app, appconfig ):
        """Prepare the master job"""
        
        job = app._getParent() # Returns job or subjob object

        logger.debug("AthenaLocalRTHandler master_prepare called, %s", job.id)

        if job._getRoot().subjobs:
            jobid = "%d" % (job._getRoot().id)
        else:
            jobid = "%d" % job.id

        # Generate output dataset name
        if job.outputdata:
            if job.outputdata._name=='DQ2OutputDataset':
                dq2_datasetname = job.outputdata.datasetname
                dq2_isGroupDS = job.outputdata.isGroupDS
                dq2_groupname = job.outputdata.groupname
            else:
                dq2_datasetname = ''
                dq2_isGroupDS = False
                dq2_groupname = ''
            self.output_datasetname, self.output_lfn = dq2outputdatasetname(dq2_datasetname, jobid, dq2_isGroupDS, dq2_groupname)

        # Expand Athena jobOptions
        if not app.option_file and not app.command_line:
            raise ConfigError("j.application.option_file='' - No Athena jobOptions files specified.")

        athena_options = ''
        inputbox = [File(os.path.join(os.path.dirname(__file__),'athena-utility.sh'))]
        if app.atlas_exetype in ['PYARA','ARES','ROOT','EXE']:

            for option_file in app.option_file:
                athena_options += ' ' + os.path.basename(option_file.name)
                inputbox += [ File(option_file.name) ]

            athena_options += ' %s ' % app.options

        else:
            for option_file in app.option_file:
                athena_option = os.path.basename(option_file.name)
                athena_options += ' ' + athena_option
                if app.options:
                    athena_options =  app.options + ' ' + athena_options
                inputbox += [ File(option_file.name) ]


            if app.command_line:
                athena_options = app.command_line

        athena_usersetupfile = os.path.basename(app.user_setupfile.name)

#       prepare input sandbox

        if app.user_setupfile.name: inputbox += [ File(app.user_setupfile.name) ]
        #CN: added extra test for TNTJobSplitter
        if job.inputdata and job.inputdata._name in [ 'DQ2Dataset', 'ATLASTier3Dataset'] or (job._getRoot().splitter and job._getRoot().splitter._name == 'TNTJobSplitter'):
            _append_files(inputbox,'ganga-stage-in-out-dq2.py')
            _append_files(inputbox,'dq2_get')
            _append_files(inputbox,'dq2info.tar.gz')
            _append_files(inputbox,'libdcap.so')

        if job.inputdata and job.inputdata._name == 'ATLASDataset':
            if job.inputdata.lfc:
                _append_files(inputbox,'ganga-stagein-lfc.py')
            else:
                _append_files(inputbox,'ganga-stagein.py')

        ## insert more scripts to inputsandbox for FileStager
        if job.inputdata and job.inputdata._name in [ 'DQ2Dataset' ] and job.inputdata.type in ['FILE_STAGER']:
            _append_files(inputbox,'make_filestager_joption.py','dm_util.py','fs-copy.py')

        if not 'getstats.py' in [ os.path.basename(file.name) for file in inputbox ]:
            _append_files(inputbox, 'getstats.py')

        if job.outputdata and job.outputdata._name == 'DQ2OutputDataset':
            if not job.outputdata.location:
                raise ApplicationConfigurationError(None,'j.outputdata.location is empty - Please specify a DQ2 output location - job not submitted !')
            if not File(os.path.join(os.path.dirname(__file__),'ganga-stage-in-out-dq2.py')) in inputbox:
                _append_files(inputbox,'ganga-stage-in-out-dq2.py')
                _append_files(inputbox,'dq2info.tar.gz')
                _append_files(inputbox,'libdcap.so')
            _append_files(inputbox,'ganga-joboption-parse.py')

        if job.inputsandbox:
            for file in job.inputsandbox:
                inputbox += [ file ]
        if app.user_area.name:
            if app.is_prepared is True:
                inputbox += [ File(app.user_area.name) ] 
            else:
                inputbox += [ File(os.path.join(os.path.join(shared_path,app.is_prepared.name),os.path.basename(app.user_area.name))) ]
        if app.group_area.name and string.find(app.group_area.name,"http")<0:
            if app.is_prepared is True:
                inputbox += [ File(app.group_area.name) ] 
            else:
                inputbox += [ File(os.path.join(os.path.join(shared_path,app.is_prepared.name),os.path.basename(app.group_area.name))) ]
   
#       prepare environment

        try:
            atlas_software = config['ATLAS_SOFTWARE']
        except ConfigError:
            raise ConfigError('No default location of ATLAS_SOFTWARE specified in the configuration.')

        if app.atlas_release=='' and app.atlas_project != "AthAnalysisBase":
            raise ApplicationConfigurationError(None,'j.application.atlas_release is empty - No ATLAS release version found. Run prepare() or specify a version explictly.')
      
        environment={ 
            'ATLAS_RELEASE' : app.atlas_release,
            'ATHENA_OPTIONS' : athena_options,
            'ATLAS_SOFTWARE' : atlas_software,
            'ATHENA_USERSETUPFILE' : athena_usersetupfile,
            'ATLAS_PROJECT' : app.atlas_project,
            'ATLAS_EXETYPE' : app.atlas_exetype,
            'GANGA_VERSION' : configSystem['GANGA_VERSION'],
            'DQ2_SETUP_SCRIPT': configDQ2['setupScript']
        }

        # Set athena architecture: 32 or 64 bit
        environment['ATLAS_ARCH'] = '32'
        cmtconfig = app.atlas_cmtconfig
        if cmtconfig.find('x86_64')>=0:
            environment['ATLAS_ARCH'] = '64'

        environment['ATLAS_CMTCONFIG'] = app.atlas_cmtconfig
        environment['DCACHE_RA_BUFFER'] = str(config['DCACHE_RA_BUFFER'])
        
        if app.atlas_environment:
            for var in app.atlas_environment:
                vars=var.split('=')
                if len(vars)==2:
                    environment[vars[0]]=vars[1]

        if app.atlas_production and (app.atlas_project == 'AtlasPoint1' or app.atlas_release.find('12.')<=0):
            environment['ATLAS_PRODUCTION'] = app.atlas_production 

        if app.user_area.name: 
            environment['USER_AREA'] = os.path.basename(app.user_area.name)
        if app.group_area.name:
            if string.find(app.group_area.name,"http")>=0:
                environment['GROUP_AREA_REMOTE'] = "%s" % (app.group_area.name)
            else:
                environment['GROUP_AREA']=os.path.basename(app.group_area.name)

        if app.max_events:
            if (app.max_events != -999) and (app.max_events > -2):
                environment['ATHENA_MAX_EVENTS'] = str(app.max_events)

        if job.inputdata and job.inputdata._name == 'StagerDataset':

            if job.inputdata.type not in ['LOCAL']:

                try:
                    environment['X509CERTDIR']=os.environ['X509_CERT_DIR']
                except KeyError:
                    environment['X509CERTDIR']=''

                try:
                    proxy = os.environ['X509_USER_PROXY']
                except KeyError:
                    proxy = '/tmp/x509up_u%s' % os.getuid()

                REMOTE_PROXY = '%s:%s' % (socket.getfqdn(),proxy)
                environment['REMOTE_PROXY'] = REMOTE_PROXY

                try:
                    environment['GANGA_GLITE_UI']=configLCG['GLITE_SETUP']
                except:
                    pass

        if job.inputdata and job.inputdata._name == 'DQ2Dataset':
            if job.inputdata.dataset:
                datasetname = job.inputdata.dataset
                environment['DATASETNAME']=':'.join(datasetname)
                environment['DATASETLOCATION'] = ':'.join(job.inputdata.get_locations())
                environment['DQ2_URL_SERVER']=configDQ2['DQ2_URL_SERVER']
                environment['DQ2_URL_SERVER_SSL']=configDQ2['DQ2_URL_SERVER_SSL']
                #environment['DATASETTYPE']=job.inputdata.type
                # At present, DQ2 download is the only thing that works
                environment['DATASETTYPE']="DQ2_DOWNLOAD"
                if job.inputdata.accessprotocol:
                    environment['DQ2_LOCAL_PROTOCOL'] = job.inputdata.accessprotocol                

                try:
                    environment['X509CERTDIR']=os.environ['X509_CERT_DIR']
                except KeyError:
                    environment['X509CERTDIR']=''

                try:
                    proxy = os.environ['X509_USER_PROXY']
                except KeyError:
                    proxy = '/tmp/x509up_u%s' % os.getuid()

                REMOTE_PROXY = '%s:%s' % (socket.getfqdn(),proxy)
                environment['REMOTE_PROXY'] = REMOTE_PROXY
                try:
                    environment['GANGA_GLITE_UI']=configLCG['GLITE_SETUP']
                except:
                    pass

            else:
                raise ConfigError("j.inputdata.dataset='' - DQ2 dataset name needs to be specified.")
            
            if job.inputdata.tagdataset:
                environment['TAGDATASETNAME'] = ':'.join(job.inputdata.tagdataset)
                
        if job.outputdata and job.outputdata._name == 'DQ2OutputDataset':
            environment['DQ2_URL_SERVER']=configDQ2['DQ2_URL_SERVER']
            environment['DQ2_URL_SERVER_SSL']=configDQ2['DQ2_URL_SERVER_SSL']
            try:
                environment['X509CERTDIR']=os.environ['X509_CERT_DIR']
            except KeyError:
                environment['X509CERTDIR']=''
            try:
                proxy = os.environ['X509_USER_PROXY']
            except KeyError:
                proxy = '/tmp/x509up_u%s' % os.getuid()

            REMOTE_PROXY = '%s:%s' % (socket.getfqdn(),proxy)
            environment['REMOTE_PROXY'] = REMOTE_PROXY
            try:
                environment['GANGA_GLITE_UI']=configLCG['GLITE_SETUP']
            except:
                pass

        if hasattr(job.backend, 'extraopts'):
            if job.backend.extraopts.find('site=hh')>0:
                environment['DQ2_LOCAL_SITE_ID'] = 'DESY-HH_SCRATCHDISK'
            elif job.backend.extraopts.find('site=zn')>0:
                environment['DQ2_LOCAL_SITE_ID'] = 'DESY-ZN_SCRATCHDISK'
            else:
                environment['DQ2_LOCAL_SITE_ID'] = configDQ2['DQ2_LOCAL_SITE_ID']
        else:
            environment['DQ2_LOCAL_SITE_ID'] = configDQ2['DQ2_LOCAL_SITE_ID']

        exe = os.path.join(os.path.dirname(__file__), 'run-athena-local.sh')

#       output sandbox
        outputbox = [ ]
        outputGUIDs='output_guids'
        outputLOCATION='output_location'
        outputDATA='output_data'
        outputbox.append( outputGUIDs )
        outputbox.append( outputLOCATION )
        outputbox.append( outputDATA )
        outputbox.append('stats.pickle')
        if (job.outputsandbox):
            for file in job.outputsandbox:
                outputbox += [ file ]

        ## retrieve the FileStager log
        if job.inputdata and job.inputdata._name in [ 'DQ2Dataset'] and job.inputdata.type in ['FILE_STAGER']:
            outputbox += ['FileStager.out', 'FileStager.err']

        # Switch for DEBUG print-out in logfiles
        if app.useNoDebugLogs:
            environment['GANGA_LOG_DEBUG'] = '0'
        else:
            environment['GANGA_LOG_DEBUG'] = '1'

        return StandardJobConfig(File(exe), inputbox, [], outputbox, environment)

Example 18

Project: robothon
Source File: setup.py
View license
def configuration(parent_package='',top_path=None):
    from numpy.distutils.misc_util import Configuration,dot_join
    from numpy.distutils.system_info import get_info, default_lib_dirs

    config = Configuration('core',parent_package,top_path)
    local_dir = config.local_path
    codegen_dir = join(local_dir,'code_generators')

    generate_umath_py = join(codegen_dir,'generate_umath.py')
    n = dot_join(config.name,'generate_umath')
    generate_umath = imp.load_module('_'.join(n.split('.')),
                                     open(generate_umath_py,'U'),generate_umath_py,
                                     ('.py','U',1))

    header_dir = 'include/numpy' # this is relative to config.path_in_package

    def generate_config_h(ext, build_dir):
        target = join(build_dir,header_dir,'config.h')
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        if newer(__file__,target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s',target)
            tc = generate_testcode(target)
            from distutils import sysconfig
            python_include = sysconfig.get_python_inc()
            python_h = join(python_include, 'Python.h')
            if not os.path.isfile(python_h):
                raise SystemError,\
                      "Non-existing %s. Perhaps you need to install"\
                      " python-dev|python-devel." % (python_h)
            result = config_cmd.try_run(tc,include_dirs=[python_include],
                                        library_dirs = default_lib_dirs)
            if not result:
                raise SystemError,"Failed to test configuration. "\
                      "See previous error messages for more information."

            moredefs = []
            #
            mathlibs = []
            tc = testcode_mathlib()
            mathlibs_choices = [[],['m'],['cpml']]
            mathlib = os.environ.get('MATHLIB')
            if mathlib:
                mathlibs_choices.insert(0,mathlib.split(','))
            for libs in mathlibs_choices:
                if config_cmd.try_run(tc,libraries=libs):
                    mathlibs = libs
                    break
            else:
                raise EnvironmentError("math library missing; rerun "
                                       "setup.py after setting the "
                                       "MATHLIB env variable")
            ext.libraries.extend(mathlibs)
            moredefs.append(('MATHLIB',','.join(mathlibs)))

            def check_func(func_name):
                return config_cmd.check_func(func_name,
                                             libraries=mathlibs, decl=False,
                                             headers=['math.h'])

            for func_name, defsymbol in FUNCTIONS_TO_CHECK:
                if check_func(func_name):
                    moredefs.append(defsymbol)

            if is_npy_no_signal():
                moredefs.append('__NPY_PRIVATE_NO_SIGNAL')

            if sys.platform=='win32' or os.name=='nt':
                from numpy.distutils.misc_util import get_build_architecture
                a = get_build_architecture()
                print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % (a, os.name, sys.platform)
                if a == 'AMD64':
                    moredefs.append('DISTUTILS_USE_SDK')

            if sys.version[:3] < '2.4':
                if config_cmd.check_func('strtod', decl=False,
                                         headers=['stdlib.h']):
                    moredefs.append(('PyOS_ascii_strtod', 'strtod'))

            target_f = open(target,'a')
            for d in moredefs:
                if isinstance(d,str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0],d[1]))
            target_f.close()
            cmd_ = 'ed - %s < /SourceCache/python_modules/python_modules-21/numpy/config.h.ed' % target
            print cmd_
            os.system(cmd_)
            print 'File:',target
            target_f = open(target)
            print target_f.read()
            target_f.close()
            print 'EOF'
        else:
            mathlibs = []
            target_f = open(target)
            for line in target_f.readlines():
                s = '#define MATHLIB'
                if line.startswith(s):
                    value = line[len(s):].strip()
                    if value:
                        mathlibs.extend(value.split(','))
            target_f.close()

        ext.libraries.extend(mathlibs)

        incl_dir = os.path.dirname(target)
        if incl_dir not in config.numpy_include_dirs:
            config.numpy_include_dirs.append(incl_dir)

        return target

    def generate_numpyconfig_h(ext, build_dir):
        """Depends on config.h: generate_config_h has to be called before !"""
        target = join(build_dir,header_dir,'numpyconfig.h')
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        if newer(__file__,target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s',target)
            testcode = generate_numpyconfig_code(target)

            from distutils import sysconfig
            python_include = sysconfig.get_python_inc()
            python_h = join(python_include, 'Python.h')
            if not os.path.isfile(python_h):
                raise SystemError,\
                      "Non-existing %s. Perhaps you need to install"\
                      " python-dev|python-devel." % (python_h)

            config.numpy_include_dirs
            result = config_cmd.try_run(testcode,
                                include_dirs = [python_include] + \
                                                       config.numpy_include_dirs,
                                        library_dirs = default_lib_dirs)

            if not result:
                raise SystemError,"Failed to generate numpy configuration. "\
                      "See previous error messages for more information."

            moredefs = []

            # Check wether we can use inttypes (C99) formats
            if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
                moredefs.append(('NPY_USE_C99_FORMATS', 1))
            else:
                moredefs.append(('NPY_USE_C99_FORMATS', 0))

            # Add moredefs to header
            target_f = open(target,'a')
            for d in moredefs:
                if isinstance(d,str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0],d[1]))

            # Define __STDC_FORMAT_MACROS
            target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
            target_f.close()

            # Dump the numpyconfig.h header to stdout
            cmd_ = 'ed - %s < /SourceCache/python_modules/python_modules-21/numpy/numpyconfig.h.ed' % target
            print cmd_
            os.system(cmd_)
            print 'File: %s' % target
            target_f = open(target)
            print target_f.read()
            target_f.close()
            print 'EOF'
        config.add_data_files((header_dir, target))
        return target

    def generate_api_func(module_name):
        def generate_api(ext, build_dir):
            script = join(codegen_dir, module_name + '.py')
            sys.path.insert(0, codegen_dir)
            try:
                m = __import__(module_name)
                log.info('executing %s', script)
                h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
            finally:
                del sys.path[0]
            config.add_data_files((header_dir, h_file),
                                  (header_dir, doc_file))
            return (h_file,)
        return generate_api

    generate_numpy_api = generate_api_func('generate_numpy_api')
    generate_ufunc_api = generate_api_func('generate_ufunc_api')

    def generate_umath_c(ext,build_dir):
        target = join(build_dir,header_dir,'__umath_generated.c')
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        script = generate_umath_py
        if newer(script,target):
            f = open(target,'w')
            f.write(generate_umath.make_code(generate_umath.defdict,
                                             generate_umath.__file__))
            f.close()
        return []

    config.add_data_files('include/numpy/*.h')
    config.add_include_dirs('src')

    config.numpy_include_dirs.extend(config.paths('include'))

    deps = [join('src','arrayobject.c'),
            join('src','arraymethods.c'),
            join('src','scalartypes.inc.src'),
            join('src','arraytypes.inc.src'),
            join('src','_signbit.c'),
            join('src','_isnan.c'),
            join('src','ucsnarrow.c'),
            join('include','numpy','*object.h'),
            'include/numpy/fenv/fenv.c',
            'include/numpy/fenv/fenv.h',
            join(codegen_dir,'genapi.py'),
            join(codegen_dir,'*.txt')
            ]

    # Don't install fenv unless we need them.
    if sys.platform == 'cygwin':
        config.add_data_dir('include/numpy/fenv')

    config.add_extension('multiarray',
                         sources = [join('src','multiarraymodule.c'),
                                    generate_config_h,
                                    generate_numpyconfig_h,
                                    generate_numpy_api,
                                    join('src','scalartypes.inc.src'),
                                    join('src','arraytypes.inc.src'),
                                    join(codegen_dir,'generate_numpy_api.py'),
                                    join('*.py')
                                    ],
                         depends = deps,
                         )

    config.add_extension('umath',
                         sources = [generate_config_h,
                                    generate_numpyconfig_h,
                                    join('src','umathmodule.c.src'),
                                    generate_umath_c,
                                    generate_ufunc_api,
                                    join('src','scalartypes.inc.src'),
                                    join('src','arraytypes.inc.src'),
                                    ],
                         depends = [join('src','ufuncobject.c'),
                                    generate_umath_py,
                                    join(codegen_dir,'generate_ufunc_api.py'),
                                    ]+deps,
                         )

    config.add_extension('_sort',
                         sources=[join('src','_sortmodule.c.src'),
                                  generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api,
                                  ],
                         )

    config.add_extension('scalarmath',
                         sources=[join('src','scalarmathmodule.c.src'),
                                  generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api,
                                  generate_ufunc_api],
                         )

    # Configure blasdot
    blas_info = get_info('blas_opt',0)
    #blas_info = {}
    def get_dotblas_sources(ext, build_dir):
        if blas_info:
            if ('NO_ATLAS_INFO',1) in blas_info.get('define_macros',[]):
                return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient.
            return ext.depends[:1]
        return None # no extension module will be built

    config.add_extension('_dotblas',
                         sources = [get_dotblas_sources],
                         depends=[join('blasdot','_dotblas.c'),
                                  join('blasdot','cblas.h'),
                                  ],
                         include_dirs = ['blasdot'],
                         extra_info = blas_info
                         )


    config.add_data_dir('tests')
    config.add_data_dir('tests/data')

    config.make_svn_version_py()

    return config

Example 19

Project: django-ddp
Source File: views.py
View license
    def __init__(self, **kwargs):
        """
        Initialisation for Django DDP server view.

        The following items populate `Meteor.settings` (later take precedence):
          1. django.conf.settings.METEOR_SETTINGS
          2. os.environ['METEOR_SETTINGS']
          3. MeteorView.meteor_settings (class attribute) or empty dict
          4. MeteorView.as_view(meteor_settings=...)

        Additionally, `Meteor.settings.public` is updated with values from
        environemnt variables specified from the following sources:
          1. django.conf.settings.METEOR_PUBLIC_ENVS
          2. os.environ['METEOR_PUBLIC_ENVS']
          3. MeteorView.meteor_public_envs (class attribute) or empty dict
          4. MeteorView.as_view(meteor_public_envs=...)
        """
        self.runtime_config = {}
        self.meteor_settings = {}
        for other in [
                getattr(settings, 'METEOR_SETTINGS', {}),
                loads(os.environ.get('METEOR_SETTINGS', '{}')),
                self.meteor_settings or {},
                kwargs.pop('meteor_settings', {}),
        ]:
            self.meteor_settings = dict_merge(self.meteor_settings, other)
        self.meteor_public_envs = set()
        self.meteor_public_envs.update(
            getattr(settings, 'METEOR_PUBLIC_ENVS', []),
            os.environ.get('METEOR_PUBLIC_ENVS', '').replace(',', ' ').split(),
            self.meteor_public_envs or [],
            kwargs.pop('meteor_public_envs', []),
        )
        public = self.meteor_settings.setdefault('public', {})
        for env_name in self.meteor_public_envs:
            try:
                public[env_name] = os.environ[env_name]
            except KeyError:
                pass  # environment variable not set
        # super(...).__init__ assigns kwargs to instance.
        super(MeteorView, self).__init__(**kwargs)

        # read and process /etc/mime.types
        mimetypes.init()

        self.url_map = {}

        # process `star_json`
        self.star_json = read_json(self.json_path)
        star_format = self.star_json['format']
        if star_format != 'site-archive-pre1':
            raise ValueError(
                'Unknown Meteor star format: %r' % star_format,
            )
        programs = {
            program['name']: program
            for program in self.star_json['programs']
        }

        # process `bundle/programs/server/program.json` from build dir
        server_json_path = os.path.join(
            os.path.dirname(self.json_path),
            os.path.dirname(programs['server']['path']),
            'program.json',
        )
        server_json = read_json(server_json_path)
        server_format = server_json['format']
        if server_format != 'javascript-image-pre1':
            raise ValueError(
                'Unknown Meteor server format: %r' % server_format,
            )
        self.server_load_map = {}
        for item in server_json['load']:
            item['path_full'] = os.path.join(
                os.path.dirname(server_json_path),
                item['path'],
            )
            self.server_load_map[item['path']] = item
            self.url_map[item['path']] = (
                item['path_full'], 'text/javascript'
            )
            try:
                item['source_map_full'] = os.path.join(
                    os.path.dirname(server_json_path),
                    item['sourceMap'],
                )
                self.url_map[item['sourceMap']] = (
                    item['source_map_full'], 'text/plain'
                )
            except KeyError:
                pass
        self.template_path = os.path.join(
            os.path.dirname(server_json_path),
            self.server_load_map[
                'packages/boilerplate-generator.js'
            ][
                'assets'
            ][
                'boilerplate_web.browser.html'
            ],
        )

        # process `bundle/programs/web.browser/program.json` from build dir
        web_browser_json_path = os.path.join(
            os.path.dirname(self.json_path),
            programs['web.browser']['path'],
        )
        web_browser_json = read_json(web_browser_json_path)
        web_browser_format = web_browser_json['format']
        if web_browser_format != 'web-program-pre1':
            raise ValueError(
                'Unknown Meteor web.browser format: %r' % (
                    web_browser_format,
                ),
            )
        self.client_map = {}
        self.internal_map = {}
        for item in web_browser_json['manifest']:
            item['path_full'] = os.path.join(
                os.path.dirname(web_browser_json_path),
                item['path'],
            )
            if item['where'] == 'client':
                if '?' in item['url']:
                    item['url'] = item['url'].split('?', 1)[0]
                if item['url'].startswith('/'):
                    item['url'] = item['url'][1:]
                self.client_map[item['url']] = item
                self.url_map[item['url']] = (
                    item['path_full'],
                    mimetypes.guess_type(
                        item['path_full'],
                    )[0] or 'application/octet-stream',
                )
            elif item['where'] == 'internal':
                self.internal_map[item['type']] = item

        config = {
            'css': [
                {'url': item['path']}
                for item in web_browser_json['manifest']
                if item['type'] == 'css' and item['where'] == 'client'
            ],
            'js': [
                {'url': item['path']}
                for item in web_browser_json['manifest']
                if item['type'] == 'js' and item['where'] == 'client'
            ],
            'meteorRuntimeConfig': '"%s"' % (
                dumps(self.runtime_config)
            ),
            'rootUrlPathPrefix': self.root_url_path_prefix,
            'bundledJsCssPrefix': self.bundled_js_css_prefix,
            'inlineScriptsAllowed': False,
            'inline': None,
            'head': read(
                self.internal_map.get('head', {}).get('path_full', None),
                default=u'',
            ),
            'body': read(
                self.internal_map.get('body', {}).get('path_full', None),
                default=u'',
            ),
        }
        tmpl_raw = read(self.template_path, encoding='utf8')
        compiler = pybars.Compiler()
        tmpl = compiler.compile(tmpl_raw)
        self.html = '<!DOCTYPE html>\n%s' % tmpl(config)

Example 20

Project: pywcsgrid2
Source File: plot_directive_v3.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    context = 'context' in options

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with open(source_file_name, 'r') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code, source_file_name, build_dir, output_base,
                                 context, function_name, config)
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in list(options.items())
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        if j == 0:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with open(target_name, 'w') as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 21

Project: pywcsgrid2
Source File: plot_directive_v3.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    context = 'context' in options

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with open(source_file_name, 'r') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code, source_file_name, build_dir, output_base,
                                 context, function_name, config)
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in list(options.items())
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        if j == 0:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with open(target_name, 'w') as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 22

Project: calmap
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    try:
        build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    except ValueError:
        # on Windows, relpath raises ValueError when path and start are on
        # different mounts/drives
        build_dir_link = build_dir
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 23

Project: calmap
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    try:
        build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    except ValueError:
        # on Windows, relpath raises ValueError when path and start are on
        # different mounts/drives
        build_dir_link = build_dir
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 24

View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    context = 'context' in options
    context_reset = True if (context and options['context'] == 'reset') else False

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code, source_file_name, build_dir, output_base,
                                 context, function_name, config,
                                 context_reset=context_reset)
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 25

View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    context = 'context' in options
    context_reset = True if (context and options['context'] == 'reset') else False

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code, source_file_name, build_dir, output_base,
                                 context, function_name, config,
                                 context_reset=context_reset)
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 26

Project: Arelle
Source File: CntlrGenVersReports.py
View license
    def runFromExcel(self, options):
        #testGenFileName = options.excelfilename
        testGenFileName = r"C:\Users\Herm Fischer\Documents\mvsl\projects\XBRL.org\conformance-versioning\trunk\versioningReport\conf\creation-index.xls"
        testGenDir = os.path.dirname(testGenFileName)
        schemaDir = os.path.dirname(testGenDir) + os.sep + "schema"
        timeNow = XmlUtil.dateunionValue(datetime.datetime.now())
        if options.testfiledate:
            today = options.testfiledate
        else:
            today = XmlUtil.dateunionValue(datetime.date.today())
        startedAt = time.time()
        
        LogHandler(self) # start logger

        self.logMessages = []
        logMessagesFile = testGenDir + os.sep + 'log-generation-messages.txt'

        modelTestcases = ModelXbrl.create(self.modelManager, url=testGenFileName, isEntry=True)
        testcaseIndexBook = xlrd.open_workbook(testGenFileName)
        testcaseIndexSheet = testcaseIndexBook.sheet_by_index(0)
        self.addToLog(_("[info] xls loaded in {0:.2} secs at {1}").format(time.time() - startedAt, timeNow))
        
        # start index file
        indexFiles = [testGenDir + os.sep + 'creation-testcases-index.xml',
                      testGenDir + os.sep + 'consumption-testcases-index.xml']
        indexDocs = []
        testcasesElements = []
        for purpose in ("Creation","Consumption"):
            file = io.StringIO(
                #'<?xml version="1.0" encoding="UTF-8"?>'
                '<!-- XBRL Versioning 1.0 {0} Tests -->'
                '<!-- Copyright 2011 XBRL International.  All Rights Reserved. -->'
                '<?xml-stylesheet type="text/xsl" href="infrastructure/testcases-index.xsl"?>'
                '<testcases name="XBRL Versioning 1.0 {0} Tests" '
                ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
                ' xsi:noNamespaceSchemaLocation="infrastructure/testcases-index.xsd">'
                '</testcases>'.format(purpose, today)
                )
            doc = etree.parse(file)
            file.close()
            indexDocs.append(doc)
            testcasesElements.append(doc.getroot())
        priorTestcasesDir = None
        testcaseFiles = None
        testcaseDocs = None
        for iRow in range(1, testcaseIndexSheet.nrows):
            try:
                row = testcaseIndexSheet.row(iRow)
                if (row[0].ctype == xlrd.XL_CELL_EMPTY or # must have directory
                    row[1].ctype == xlrd.XL_CELL_EMPTY or # from
                    row[2].ctype == xlrd.XL_CELL_EMPTY):  # to
                    continue
                testDir = row[0].value
                uriFrom = row[1].value
                uriTo = row[2].value
                overrideReport = row[3].value
                description = row[4].value
                if description is None or len(description) == 0:
                    continue # test not ready to run
                assignment = row[5].value
                expectedEvents = row[6].value # comma space separated if multiple
                note = row[7].value
                useCase = row[8].value
                base = os.path.join(os.path.dirname(testGenFileName),testDir) + os.sep
                self.addToLog(_("[info] testcase uriFrom {0}").format(uriFrom))
                if uriFrom and uriTo and assignment.lower() not in ("n.a.", "error") and expectedEvents != "N.A.":
                    modelDTSfrom = modelDTSto = None
                    for URIs, msg, isFrom in ((uriFrom, _("loading from DTS"), True), (uriTo, _("loading to DTS"), False)):
                        if ',' not in URIs:
                            modelDTS = ModelXbrl.load(self.modelManager, URIs, msg, base=base)
                        else:
                            modelDTS = ModelXbrl.create(self.modelManager, 
                                         ModelDocument.Type.DTSENTRIES,
                                         self.webCache.normalizeUrl(URIs.replace(", ","_") + ".dts", 
                                                                    base),
                                         isEntry=True)
                            DTSdoc = modelDTS.modelDocument
                            DTSdoc.inDTS = True
                            for uri in URIs.split(','):
                                doc = ModelDocument.load(modelDTS, uri.strip(), base=base)
                                if doc is not None:
                                    DTSdoc.referencesDocument[doc] = "import"  #fake import
                                    doc.inDTS = True
                        if isFrom: modelDTSfrom = modelDTS
                        else: modelDTSto = modelDTS
                    if modelDTSfrom is not None and modelDTSto is not None:
                        # generate differences report
                        reportUri = uriFrom.partition(',')[0]  # first file
                        reportDir = os.path.dirname(reportUri)
                        if reportDir: reportDir += os.sep
                        reportName = os.path.basename(reportUri).replace("from.xsd","report.xml")
                        reportFile = reportDir + "out" + os.sep + reportName
                        #reportFile = reportDir + "report" + os.sep + reportName
                        reportFullPath = self.webCache.normalizeUrl(
                                            reportFile, 
                                            base)
                        testcasesDir = os.path.dirname(os.path.dirname(reportFullPath))
                        if testcasesDir != priorTestcasesDir:
                            # close prior report
                            if priorTestcasesDir:
                                for i,testcaseFile in enumerate(testcaseFiles):
                                    with open(testcaseFile, "w", encoding="utf-8") as fh:
                                        XmlUtil.writexml(fh, testcaseDocs[i], encoding="utf-8")
                            testcaseName = os.path.basename(testcasesDir)
                            testcaseFiles = [testcasesDir + os.sep + testcaseName + "-creation-testcase.xml",
                                             testcasesDir + os.sep + testcaseName + "-consumption-testcase.xml"]
                            for i,testcaseFile in enumerate(testcaseFiles):
                                etree.SubElement(testcasesElements[i], "testcase", 
                                                 attrib={"uri": 
                                                         testcaseFile[len(testGenDir)+1:].replace("\\","/")} )
                            
                            # start testcase file
                            testcaseDocs = []
                            testcaseElements = []
                            testcaseNumber = testcaseName[0:4]
                            if testcaseNumber.isnumeric():
                                testcaseNumberElement = "<number>{0}</number>".format(testcaseNumber)
                                testcaseName = testcaseName[5:]
                            else:
                                testcaseNumberElement = ""
                            testDirSegments = testDir.split('/')
                            if len(testDirSegments) >= 2 and '-' in testDirSegments[1]:
                                testedModule = testDirSegments[1][testDirSegments[1].index('-') + 1:]
                            else:
                                testedModule = ''
                            for purpose in ("Creation","Consumption"):
                                file = io.StringIO(
                                    #'<?xml version="1.0" encoding="UTF-8"?>'
                                    '<!-- Copyright 2011 XBRL International.  All Rights Reserved. -->'
                                    '<?xml-stylesheet type="text/xsl" href="../../../infrastructure/test.xsl"?>'
                                    '<testcase '
                                    ' xmlns="http://xbrl.org/2008/conformance"'
                                    ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
                                    ' xsi:schemaLocation="http://xbrl.org/2008/conformance ../../../infrastructure/test.xsd">'
                                    '<creator>'
                                    '<name>Roland Hommes</name>'
                                    '<email>[email protected]</email>'
                                    '</creator>'
                                    '{0}'
                                    '<name>{1}</name>'
                                    # '<description>{0}</description>'
                                    '<reference>'
                                    '{2}'
                                    '{3}'
                                    '</reference>'
                                    '</testcase>'.format(testcaseNumberElement,
                                                         testcaseName,
                                                         '<name>{0}</name>'.format(testedModule) if testedModule else '',
                                                         '<id>{0}</id>'.format(useCase) if useCase else '')
                                    
                                    )
                                doc = etree.parse(file)
                                file.close()
                                testcaseDocs.append(doc)
                                testcaseElements.append(doc.getroot())
                            priorTestcasesDir = testcasesDir
                            variationSeq = 1
                        try:
                            os.makedirs(os.path.dirname(reportFullPath))
                        except WindowsError:
                            pass # dir already exists
                        modelVersReport = ModelVersReport.ModelVersReport(modelTestcases)
                        modelVersReport.diffDTSes(reportFullPath,modelDTSfrom, modelDTSto, 
                                                  assignment=assignment,
                                                  schemaDir=schemaDir)
                        
                        # check for expected elements
                        if expectedEvents:
                            for expectedEvent in expectedEvents.split(","):
                                if expectedEvent not in ("No change", "N.A."):
                                    prefix, sep, localName = expectedEvent.partition(':')
                                    if sep and len(modelVersReport.xmlDocument.findall(
                                                        '//{{{0}}}{1}'.format(
                                                            XbrlConst.verPrefixNS.get(prefix),
                                                            localName))) == 0:
                                        modelTestcases.warning("warning",
                                            "Generated test case %(reportName)s missing expected event %(event)s",
                                            reportName=reportName, 
                                            event=expectedEvent)
                        
                        modelVersReport.close()
                        uriFromParts = uriFrom.split('_')
                        if len(uriFromParts) >= 2:
                            variationId = uriFromParts[1]
                        else:
                            variationId = "_{0:02n}".format(variationSeq)
                        for i,testcaseElt in enumerate(testcaseElements):
                            variationElement = etree.SubElement(testcaseElt, "{http://xbrl.org/2008/conformance}variation", 
                                                                attrib={"id": variationId})
                            nameElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}description")
                            nameElement.text = description
                            ''' (removed per RH 2011/10/04
                            if note:
                                paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}description")
                                paramElement.text = "Note: " + note
                            if useCase:
                                paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}reference")
                                paramElement.set("specification", "versioning-requirements")
                                paramElement.set("useCase", useCase)
                            '''
                            dataElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}data")
                            if i == 0:  # result is report
                                if expectedEvents:
                                    paramElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}parameter",
                                                                    attrib={"name":"expectedEvent",
                                                                            "value":expectedEvents.replace(',',' ')},
                                                                    nsmap={"conf":"http://xbrl.org/2008/conformance",
                                                                           None:""})
                                if assignment:
                                    paramElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}parameter",
                                                                    attrib={"name":"assignment",
                                                                            "value":assignment},
                                                                    nsmap={"conf":"http://xbrl.org/2008/conformance",
                                                                           None:""})
                            for schemaURIs, dtsAttr in ((uriFrom,"from"), (uriTo,"to")):
                                for schemaURI in schemaURIs.split(","): 
                                    schemaElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}schema")
                                    schemaElement.set("dts",dtsAttr)
                                    if i == 0:
                                        schemaElement.set("readMeFirst","true")
                                    schemaElement.text=os.path.basename(schemaURI.strip())
                            resultElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}result")
                            reportElement = etree.SubElement(resultElement if i == 0 else dataElement, 
                                             "{http://xbrl.org/2008/conformance}versioningReport")
                            if i == 1:
                                reportElement.set("readMeFirst","true")
                            reportElement.text = "report/" + reportName
                        variationSeq += 1
            except Exception as err:
                modelTestcases.error("exception",
                    _("Exception: %(error)s, Excel row: %(excelRow)s"),
                    error=err,
                    excelRow=iRow, 
                    exc_info=True)
        
        # add tests-error-code index files to consumption
        for testcaseFile in self.testcaseFiles(testGenDir + os.sep + "tests-error-code"):
            etree.SubElement(testcasesElements[1], "testcase", 
                             attrib={"uri": 
                             testcaseFile[len(testGenDir)+1:].replace("\\","/")} )

        with open(logMessagesFile, "w") as fh:
            fh.writelines(self.logMessages)

        if priorTestcasesDir:
            for i,testcaseFile in enumerate(testcaseFiles):
                with open(testcaseFile, "w", encoding="utf-8") as fh:
                    XmlUtil.writexml(fh, testcaseDocs[i], encoding="utf-8")
        for i,indexFile in enumerate(indexFiles):
            with open(indexFile, "w", encoding="utf-8") as fh:
                XmlUtil.writexml(fh, indexDocs[i], encoding="utf-8")

Example 27

Project: Arelle
Source File: CntlrGenVersReports.py
View license
    def runFromExcel(self, options):
        #testGenFileName = options.excelfilename
        testGenFileName = r"C:\Users\Herm Fischer\Documents\mvsl\projects\XBRL.org\conformance-versioning\trunk\versioningReport\conf\creation-index.xls"
        testGenDir = os.path.dirname(testGenFileName)
        schemaDir = os.path.dirname(testGenDir) + os.sep + "schema"
        timeNow = XmlUtil.dateunionValue(datetime.datetime.now())
        if options.testfiledate:
            today = options.testfiledate
        else:
            today = XmlUtil.dateunionValue(datetime.date.today())
        startedAt = time.time()
        
        LogHandler(self) # start logger

        self.logMessages = []
        logMessagesFile = testGenDir + os.sep + 'log-generation-messages.txt'

        modelTestcases = ModelXbrl.create(self.modelManager, url=testGenFileName, isEntry=True)
        testcaseIndexBook = xlrd.open_workbook(testGenFileName)
        testcaseIndexSheet = testcaseIndexBook.sheet_by_index(0)
        self.addToLog(_("[info] xls loaded in {0:.2} secs at {1}").format(time.time() - startedAt, timeNow))
        
        # start index file
        indexFiles = [testGenDir + os.sep + 'creation-testcases-index.xml',
                      testGenDir + os.sep + 'consumption-testcases-index.xml']
        indexDocs = []
        testcasesElements = []
        for purpose in ("Creation","Consumption"):
            file = io.StringIO(
                #'<?xml version="1.0" encoding="UTF-8"?>'
                '<!-- XBRL Versioning 1.0 {0} Tests -->'
                '<!-- Copyright 2011 XBRL International.  All Rights Reserved. -->'
                '<?xml-stylesheet type="text/xsl" href="infrastructure/testcases-index.xsl"?>'
                '<testcases name="XBRL Versioning 1.0 {0} Tests" '
                ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
                ' xsi:noNamespaceSchemaLocation="infrastructure/testcases-index.xsd">'
                '</testcases>'.format(purpose, today)
                )
            doc = etree.parse(file)
            file.close()
            indexDocs.append(doc)
            testcasesElements.append(doc.getroot())
        priorTestcasesDir = None
        testcaseFiles = None
        testcaseDocs = None
        for iRow in range(1, testcaseIndexSheet.nrows):
            try:
                row = testcaseIndexSheet.row(iRow)
                if (row[0].ctype == xlrd.XL_CELL_EMPTY or # must have directory
                    row[1].ctype == xlrd.XL_CELL_EMPTY or # from
                    row[2].ctype == xlrd.XL_CELL_EMPTY):  # to
                    continue
                testDir = row[0].value
                uriFrom = row[1].value
                uriTo = row[2].value
                overrideReport = row[3].value
                description = row[4].value
                if description is None or len(description) == 0:
                    continue # test not ready to run
                assignment = row[5].value
                expectedEvents = row[6].value # comma space separated if multiple
                note = row[7].value
                useCase = row[8].value
                base = os.path.join(os.path.dirname(testGenFileName),testDir) + os.sep
                self.addToLog(_("[info] testcase uriFrom {0}").format(uriFrom))
                if uriFrom and uriTo and assignment.lower() not in ("n.a.", "error") and expectedEvents != "N.A.":
                    modelDTSfrom = modelDTSto = None
                    for URIs, msg, isFrom in ((uriFrom, _("loading from DTS"), True), (uriTo, _("loading to DTS"), False)):
                        if ',' not in URIs:
                            modelDTS = ModelXbrl.load(self.modelManager, URIs, msg, base=base)
                        else:
                            modelDTS = ModelXbrl.create(self.modelManager, 
                                         ModelDocument.Type.DTSENTRIES,
                                         self.webCache.normalizeUrl(URIs.replace(", ","_") + ".dts", 
                                                                    base),
                                         isEntry=True)
                            DTSdoc = modelDTS.modelDocument
                            DTSdoc.inDTS = True
                            for uri in URIs.split(','):
                                doc = ModelDocument.load(modelDTS, uri.strip(), base=base)
                                if doc is not None:
                                    DTSdoc.referencesDocument[doc] = "import"  #fake import
                                    doc.inDTS = True
                        if isFrom: modelDTSfrom = modelDTS
                        else: modelDTSto = modelDTS
                    if modelDTSfrom is not None and modelDTSto is not None:
                        # generate differences report
                        reportUri = uriFrom.partition(',')[0]  # first file
                        reportDir = os.path.dirname(reportUri)
                        if reportDir: reportDir += os.sep
                        reportName = os.path.basename(reportUri).replace("from.xsd","report.xml")
                        reportFile = reportDir + "out" + os.sep + reportName
                        #reportFile = reportDir + "report" + os.sep + reportName
                        reportFullPath = self.webCache.normalizeUrl(
                                            reportFile, 
                                            base)
                        testcasesDir = os.path.dirname(os.path.dirname(reportFullPath))
                        if testcasesDir != priorTestcasesDir:
                            # close prior report
                            if priorTestcasesDir:
                                for i,testcaseFile in enumerate(testcaseFiles):
                                    with open(testcaseFile, "w", encoding="utf-8") as fh:
                                        XmlUtil.writexml(fh, testcaseDocs[i], encoding="utf-8")
                            testcaseName = os.path.basename(testcasesDir)
                            testcaseFiles = [testcasesDir + os.sep + testcaseName + "-creation-testcase.xml",
                                             testcasesDir + os.sep + testcaseName + "-consumption-testcase.xml"]
                            for i,testcaseFile in enumerate(testcaseFiles):
                                etree.SubElement(testcasesElements[i], "testcase", 
                                                 attrib={"uri": 
                                                         testcaseFile[len(testGenDir)+1:].replace("\\","/")} )
                            
                            # start testcase file
                            testcaseDocs = []
                            testcaseElements = []
                            testcaseNumber = testcaseName[0:4]
                            if testcaseNumber.isnumeric():
                                testcaseNumberElement = "<number>{0}</number>".format(testcaseNumber)
                                testcaseName = testcaseName[5:]
                            else:
                                testcaseNumberElement = ""
                            testDirSegments = testDir.split('/')
                            if len(testDirSegments) >= 2 and '-' in testDirSegments[1]:
                                testedModule = testDirSegments[1][testDirSegments[1].index('-') + 1:]
                            else:
                                testedModule = ''
                            for purpose in ("Creation","Consumption"):
                                file = io.StringIO(
                                    #'<?xml version="1.0" encoding="UTF-8"?>'
                                    '<!-- Copyright 2011 XBRL International.  All Rights Reserved. -->'
                                    '<?xml-stylesheet type="text/xsl" href="../../../infrastructure/test.xsl"?>'
                                    '<testcase '
                                    ' xmlns="http://xbrl.org/2008/conformance"'
                                    ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
                                    ' xsi:schemaLocation="http://xbrl.org/2008/conformance ../../../infrastructure/test.xsd">'
                                    '<creator>'
                                    '<name>Roland Hommes</name>'
                                    '<email>[email protected]</email>'
                                    '</creator>'
                                    '{0}'
                                    '<name>{1}</name>'
                                    # '<description>{0}</description>'
                                    '<reference>'
                                    '{2}'
                                    '{3}'
                                    '</reference>'
                                    '</testcase>'.format(testcaseNumberElement,
                                                         testcaseName,
                                                         '<name>{0}</name>'.format(testedModule) if testedModule else '',
                                                         '<id>{0}</id>'.format(useCase) if useCase else '')
                                    
                                    )
                                doc = etree.parse(file)
                                file.close()
                                testcaseDocs.append(doc)
                                testcaseElements.append(doc.getroot())
                            priorTestcasesDir = testcasesDir
                            variationSeq = 1
                        try:
                            os.makedirs(os.path.dirname(reportFullPath))
                        except WindowsError:
                            pass # dir already exists
                        modelVersReport = ModelVersReport.ModelVersReport(modelTestcases)
                        modelVersReport.diffDTSes(reportFullPath,modelDTSfrom, modelDTSto, 
                                                  assignment=assignment,
                                                  schemaDir=schemaDir)
                        
                        # check for expected elements
                        if expectedEvents:
                            for expectedEvent in expectedEvents.split(","):
                                if expectedEvent not in ("No change", "N.A."):
                                    prefix, sep, localName = expectedEvent.partition(':')
                                    if sep and len(modelVersReport.xmlDocument.findall(
                                                        '//{{{0}}}{1}'.format(
                                                            XbrlConst.verPrefixNS.get(prefix),
                                                            localName))) == 0:
                                        modelTestcases.warning("warning",
                                            "Generated test case %(reportName)s missing expected event %(event)s",
                                            reportName=reportName, 
                                            event=expectedEvent)
                        
                        modelVersReport.close()
                        uriFromParts = uriFrom.split('_')
                        if len(uriFromParts) >= 2:
                            variationId = uriFromParts[1]
                        else:
                            variationId = "_{0:02n}".format(variationSeq)
                        for i,testcaseElt in enumerate(testcaseElements):
                            variationElement = etree.SubElement(testcaseElt, "{http://xbrl.org/2008/conformance}variation", 
                                                                attrib={"id": variationId})
                            nameElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}description")
                            nameElement.text = description
                            ''' (removed per RH 2011/10/04
                            if note:
                                paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}description")
                                paramElement.text = "Note: " + note
                            if useCase:
                                paramElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}reference")
                                paramElement.set("specification", "versioning-requirements")
                                paramElement.set("useCase", useCase)
                            '''
                            dataElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}data")
                            if i == 0:  # result is report
                                if expectedEvents:
                                    paramElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}parameter",
                                                                    attrib={"name":"expectedEvent",
                                                                            "value":expectedEvents.replace(',',' ')},
                                                                    nsmap={"conf":"http://xbrl.org/2008/conformance",
                                                                           None:""})
                                if assignment:
                                    paramElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}parameter",
                                                                    attrib={"name":"assignment",
                                                                            "value":assignment},
                                                                    nsmap={"conf":"http://xbrl.org/2008/conformance",
                                                                           None:""})
                            for schemaURIs, dtsAttr in ((uriFrom,"from"), (uriTo,"to")):
                                for schemaURI in schemaURIs.split(","): 
                                    schemaElement = etree.SubElement(dataElement, "{http://xbrl.org/2008/conformance}schema")
                                    schemaElement.set("dts",dtsAttr)
                                    if i == 0:
                                        schemaElement.set("readMeFirst","true")
                                    schemaElement.text=os.path.basename(schemaURI.strip())
                            resultElement = etree.SubElement(variationElement, "{http://xbrl.org/2008/conformance}result")
                            reportElement = etree.SubElement(resultElement if i == 0 else dataElement, 
                                             "{http://xbrl.org/2008/conformance}versioningReport")
                            if i == 1:
                                reportElement.set("readMeFirst","true")
                            reportElement.text = "report/" + reportName
                        variationSeq += 1
            except Exception as err:
                modelTestcases.error("exception",
                    _("Exception: %(error)s, Excel row: %(excelRow)s"),
                    error=err,
                    excelRow=iRow, 
                    exc_info=True)
        
        # add tests-error-code index files to consumption
        for testcaseFile in self.testcaseFiles(testGenDir + os.sep + "tests-error-code"):
            etree.SubElement(testcasesElements[1], "testcase", 
                             attrib={"uri": 
                             testcaseFile[len(testGenDir)+1:].replace("\\","/")} )

        with open(logMessagesFile, "w") as fh:
            fh.writelines(self.logMessages)

        if priorTestcasesDir:
            for i,testcaseFile in enumerate(testcaseFiles):
                with open(testcaseFile, "w", encoding="utf-8") as fh:
                    XmlUtil.writexml(fh, testcaseDocs[i], encoding="utf-8")
        for i,indexFile in enumerate(indexFiles):
            with open(indexFile, "w", encoding="utf-8") as fh:
                XmlUtil.writexml(fh, indexDocs[i], encoding="utf-8")

Example 28

View license
def configuration(parent_package='',top_path=None):
    from numpy.distutils.misc_util import Configuration, dot_join
    from numpy.distutils.system_info import get_info

    config = Configuration('core', parent_package, top_path)
    local_dir = config.local_path
    codegen_dir = join(local_dir, 'code_generators')

    if is_released(config):
        warnings.simplefilter('error', MismatchCAPIWarning)

    # Check whether we have a mismatch between the set C API VERSION and the
    # actual C API VERSION
    check_api_version(C_API_VERSION, codegen_dir)

    generate_umath_py = join(codegen_dir, 'generate_umath.py')
    n = dot_join(config.name, 'generate_umath')
    generate_umath = imp.load_module('_'.join(n.split('.')),
                                     open(generate_umath_py, 'U'), generate_umath_py,
                                     ('.py', 'U', 1))

    header_dir = 'include/numpy'  # this is relative to config.path_in_package

    cocache = CallOnceOnly()

    def generate_config_h(ext, build_dir):
        target = join(build_dir, header_dir, 'config.h')
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)

        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s', target)

            # Check sizeof
            moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)

            # Check math library and C99 math funcs availability
            mathlibs = check_mathlib(config_cmd)
            moredefs.append(('MATHLIB', ','.join(mathlibs)))

            check_math_capabilities(config_cmd, moredefs, mathlibs)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])

            # Signal check
            if is_npy_no_signal():
                moredefs.append('__NPY_PRIVATE_NO_SIGNAL')

            # Windows checks
            if sys.platform == 'win32' or os.name == 'nt':
                win32_checks(moredefs)

            # C99 restrict keyword
            moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))

            # Inline check
            inline = config_cmd.check_inline()

            # Check whether we need our own wide character support
            if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
                PYTHON_HAS_UNICODE_WIDE = True
            else:
                PYTHON_HAS_UNICODE_WIDE = False

            if ENABLE_SEPARATE_COMPILATION:
                moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))

            # Get long double representation
            if sys.platform != 'darwin':
                rep = check_long_double_representation(config_cmd)
                if rep in ['INTEL_EXTENDED_12_BYTES_LE',
                           'INTEL_EXTENDED_16_BYTES_LE',
                           'MOTOROLA_EXTENDED_12_BYTES_BE',
                           'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
                           'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
                           'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
                    moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
                else:
                    raise ValueError("Unrecognized long double format: %s" % rep)

            # Py3K check
            if sys.version_info[0] == 3:
                moredefs.append(('NPY_PY3K', 1))

            # Generate the config.h file from moredefs
            target_f = open(target, 'w')
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0], d[1]))

            # define inline to our keyword, or nothing
            target_f.write('#ifndef __cplusplus\n')
            if inline == 'inline':
                target_f.write('/* #undef inline */\n')
            else:
                target_f.write('#define inline %s\n' % inline)
            target_f.write('#endif\n')

            # add the guard to make sure config.h is never included directly,
            # but always through npy_config.h
            target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")

            target_f.close()
            print('File:', target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print('EOF')
        else:
            mathlibs = []
            target_f = open(target)
            for line in target_f:
                s = '#define MATHLIB'
                if line.startswith(s):
                    value = line[len(s):].strip()
                    if value:
                        mathlibs.extend(value.split(','))
            target_f.close()

        # Ugly: this can be called within a library and not an extension,
        # in which case there is no libraries attributes (and none is
        # needed).
        if hasattr(ext, 'libraries'):
            ext.libraries.extend(mathlibs)

        incl_dir = os.path.dirname(target)
        if incl_dir not in config.numpy_include_dirs:
            config.numpy_include_dirs.append(incl_dir)

        return target

    def generate_numpyconfig_h(ext, build_dir):
        """Depends on config.h: generate_config_h has to be called before !"""
        # put private include directory in build_dir on search path
        # allows using code generation in headers headers
        config.add_include_dirs(join(build_dir, "src", "private"))

        target = join(build_dir, header_dir, '_numpyconfig.h')
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)
        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s', target)

            # Check sizeof
            ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)

            if is_npy_no_signal():
                moredefs.append(('NPY_NO_SIGNAL', 1))

            if is_npy_no_smp():
                moredefs.append(('NPY_NO_SMP', 1))
            else:
                moredefs.append(('NPY_NO_SMP', 0))

            mathlibs = check_mathlib(config_cmd)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])

            if ENABLE_SEPARATE_COMPILATION:
                moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))

            # Check wether we can use inttypes (C99) formats
            if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
                moredefs.append(('NPY_USE_C99_FORMATS', 1))

            # visibility check
            hidden_visibility = visibility_define(config_cmd)
            moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))

            # Add the C API/ABI versions
            moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
            moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))

            # Add moredefs to header
            target_f = open(target, 'w')
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0], d[1]))

            # Define __STDC_FORMAT_MACROS
            target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
            target_f.close()

            # Dump the numpyconfig.h header to stdout
            print('File: %s' % target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print('EOF')
        config.add_data_files((header_dir, target))
        return target

    def generate_api_func(module_name):
        def generate_api(ext, build_dir):
            script = join(codegen_dir, module_name + '.py')
            sys.path.insert(0, codegen_dir)
            try:
                m = __import__(module_name)
                log.info('executing %s', script)
                h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
            finally:
                del sys.path[0]
            config.add_data_files((header_dir, h_file),
                                  (header_dir, doc_file))
            return (h_file,)
        return generate_api

    generate_numpy_api = generate_api_func('generate_numpy_api')
    generate_ufunc_api = generate_api_func('generate_ufunc_api')

    config.add_include_dirs(join(local_dir, "src", "private"))
    config.add_include_dirs(join(local_dir, "src"))
    config.add_include_dirs(join(local_dir))

    config.add_data_files('include/numpy/*.h')
    config.add_include_dirs(join('src', 'npymath'))
    config.add_include_dirs(join('src', 'multiarray'))
    config.add_include_dirs(join('src', 'umath'))
    config.add_include_dirs(join('src', 'npysort'))

    config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
    config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
    config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
    config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])

    config.numpy_include_dirs.extend(config.paths('include'))

    deps = [join('src', 'npymath', '_signbit.c'),
            join('include', 'numpy', '*object.h'),
            join(codegen_dir, 'genapi.py'),
            ]

    #######################################################################
    #                            dummy module                             #
    #######################################################################

    # npymath needs the config.h and numpyconfig.h files to be generated, but
    # build_clib cannot handle generate_config_h and generate_numpyconfig_h
    # (don't ask). Because clib are generated before extensions, we have to
    # explicitly add an extension which has generate_config_h and
    # generate_numpyconfig_h as sources *before* adding npymath.

    config.add_extension('_dummy',
                         sources=[join('src', 'dummymodule.c'),
                                  generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api]
                         )

    #######################################################################
    #                          npymath library                            #
    #######################################################################

    subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])

    def get_mathlib_info(*args):
        # Another ugly hack: the mathlib info is known once build_src is run,
        # but we cannot use add_installed_pkg_config here either, so we only
        # update the substition dictionary during npymath build
        config_cmd = config.get_config_cmd()

        # Check that the toolchain works, to fail early if it doesn't
        # (avoid late errors with MATHLIB which are confusing if the
        # compiler does not work).
        st = config_cmd.try_link('int main(void) { return 0;}')
        if not st:
            raise RuntimeError("Broken toolchain: cannot link a simple C program")
        mlibs = check_mathlib(config_cmd)

        posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
        msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
        subst_dict["posix_mathlib"] = posix_mlib
        subst_dict["msvc_mathlib"] = msvc_mlib

    npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
                       join('src', 'npymath', 'ieee754.c.src'),
                       join('src', 'npymath', 'npy_math_complex.c.src'),
                       join('src', 'npymath', 'halffloat.c')
                       ]
    config.add_installed_library('npymath',
            sources=npymath_sources + [get_mathlib_info],
            install_dir='lib')
    config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
            subst_dict)
    config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
            subst_dict)

    #######################################################################
    #                         npysort library                             #
    #######################################################################

    # This library is created for the build but it is not installed
    npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
                       join('src', 'npysort', 'mergesort.c.src'),
                       join('src', 'npysort', 'heapsort.c.src'),
                       join('src', 'private', 'npy_partition.h.src'),
                       join('src', 'npysort', 'selection.c.src'),
                       join('src', 'private', 'npy_binsearch.h.src'),
                       join('src', 'npysort', 'binsearch.c.src'),
                       ]
    config.add_library('npysort',
                       sources=npysort_sources,
                       include_dirs=[])

    #######################################################################
    #                        multiarray module                            #
    #######################################################################

    # Multiarray version: this function is needed to build foo.c from foo.c.src
    # when foo.c is included in another file and as such not in the src
    # argument of build_ext command
    def generate_multiarray_templated_sources(ext, build_dir):
        from numpy.distutils.misc_util import get_cmd

        subpath = join('src', 'multiarray')
        sources = [join(local_dir, subpath, 'scalartypes.c.src'),
                   join(local_dir, subpath, 'arraytypes.c.src'),
                   join(local_dir, subpath, 'nditer_templ.c.src'),
                   join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
                   join(local_dir, subpath, 'einsum.c.src'),
                   join(local_dir, 'src', 'private', 'templ_common.h.src')
                   ]

        # numpy.distutils generate .c from .c.src in weird directories, we have
        # to add them there as they depend on the build_dir
        config.add_include_dirs(join(build_dir, subpath))
        cmd = get_cmd('build_src')
        cmd.ensure_finalized()
        cmd.template_sources(sources, ext)

    multiarray_deps = [
            join('src', 'multiarray', 'arrayobject.h'),
            join('src', 'multiarray', 'arraytypes.h'),
            join('src', 'multiarray', 'array_assign.h'),
            join('src', 'multiarray', 'buffer.h'),
            join('src', 'multiarray', 'calculation.h'),
            join('src', 'multiarray', 'cblasfuncs.h'),
            join('src', 'multiarray', 'common.h'),
            join('src', 'multiarray', 'convert_datatype.h'),
            join('src', 'multiarray', 'convert.h'),
            join('src', 'multiarray', 'conversion_utils.h'),
            join('src', 'multiarray', 'ctors.h'),
            join('src', 'multiarray', 'descriptor.h'),
            join('src', 'multiarray', 'getset.h'),
            join('src', 'multiarray', 'hashdescr.h'),
            join('src', 'multiarray', 'iterators.h'),
            join('src', 'multiarray', 'mapping.h'),
            join('src', 'multiarray', 'methods.h'),
            join('src', 'multiarray', 'multiarraymodule.h'),
            join('src', 'multiarray', 'nditer_impl.h'),
            join('src', 'multiarray', 'numpymemoryview.h'),
            join('src', 'multiarray', 'number.h'),
            join('src', 'multiarray', 'numpyos.h'),
            join('src', 'multiarray', 'refcount.h'),
            join('src', 'multiarray', 'scalartypes.h'),
            join('src', 'multiarray', 'sequence.h'),
            join('src', 'multiarray', 'shape.h'),
            join('src', 'multiarray', 'ucsnarrow.h'),
            join('src', 'multiarray', 'usertypes.h'),
            join('src', 'multiarray', 'vdot.h'),
            join('src', 'private', 'npy_config.h'),
            join('src', 'private', 'templ_common.h.src'),
            join('src', 'private', 'lowlevel_strided_loops.h'),
            join('include', 'numpy', 'arrayobject.h'),
            join('include', 'numpy', '_neighborhood_iterator_imp.h'),
            join('include', 'numpy', 'npy_endian.h'),
            join('include', 'numpy', 'arrayscalars.h'),
            join('include', 'numpy', 'noprefix.h'),
            join('include', 'numpy', 'npy_interrupt.h'),
            join('include', 'numpy', 'npy_3kcompat.h'),
            join('include', 'numpy', 'npy_math.h'),
            join('include', 'numpy', 'halffloat.h'),
            join('include', 'numpy', 'npy_common.h'),
            join('include', 'numpy', 'npy_os.h'),
            join('include', 'numpy', 'utils.h'),
            join('include', 'numpy', 'ndarrayobject.h'),
            join('include', 'numpy', 'npy_cpu.h'),
            join('include', 'numpy', 'numpyconfig.h'),
            join('include', 'numpy', 'ndarraytypes.h'),
            join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
            join('include', 'numpy', '_numpyconfig.h.in'),
            # add library sources as distuils does not consider libraries
            # dependencies
            ] + npysort_sources + npymath_sources

    multiarray_src = [
            join('src', 'multiarray', 'alloc.c'),
            join('src', 'multiarray', 'arrayobject.c'),
            join('src', 'multiarray', 'arraytypes.c.src'),
            join('src', 'multiarray', 'array_assign.c'),
            join('src', 'multiarray', 'array_assign_scalar.c'),
            join('src', 'multiarray', 'array_assign_array.c'),
            join('src', 'multiarray', 'buffer.c'),
            join('src', 'multiarray', 'calculation.c'),
            join('src', 'multiarray', 'compiled_base.c'),
            join('src', 'multiarray', 'common.c'),
            join('src', 'multiarray', 'convert.c'),
            join('src', 'multiarray', 'convert_datatype.c'),
            join('src', 'multiarray', 'conversion_utils.c'),
            join('src', 'multiarray', 'ctors.c'),
            join('src', 'multiarray', 'datetime.c'),
            join('src', 'multiarray', 'datetime_strings.c'),
            join('src', 'multiarray', 'datetime_busday.c'),
            join('src', 'multiarray', 'datetime_busdaycal.c'),
            join('src', 'multiarray', 'descriptor.c'),
            join('src', 'multiarray', 'dtype_transfer.c'),
            join('src', 'multiarray', 'einsum.c.src'),
            join('src', 'multiarray', 'flagsobject.c'),
            join('src', 'multiarray', 'getset.c'),
            join('src', 'multiarray', 'hashdescr.c'),
            join('src', 'multiarray', 'item_selection.c'),
            join('src', 'multiarray', 'iterators.c'),
            join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
            join('src', 'multiarray', 'mapping.c'),
            join('src', 'multiarray', 'methods.c'),
            join('src', 'multiarray', 'multiarraymodule.c'),
            join('src', 'multiarray', 'nditer_templ.c.src'),
            join('src', 'multiarray', 'nditer_api.c'),
            join('src', 'multiarray', 'nditer_constr.c'),
            join('src', 'multiarray', 'nditer_pywrap.c'),
            join('src', 'multiarray', 'number.c'),
            join('src', 'multiarray', 'numpymemoryview.c'),
            join('src', 'multiarray', 'numpyos.c'),
            join('src', 'multiarray', 'refcount.c'),
            join('src', 'multiarray', 'sequence.c'),
            join('src', 'multiarray', 'shape.c'),
            join('src', 'multiarray', 'scalarapi.c'),
            join('src', 'multiarray', 'scalartypes.c.src'),
            join('src', 'multiarray', 'usertypes.c'),
            join('src', 'multiarray', 'ucsnarrow.c'),
            join('src', 'multiarray', 'vdot.c'),
            join('src', 'private', 'templ_common.h.src'),
            ]

    blas_info = get_info('blas_opt', 0)
    if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
        extra_info = blas_info
        # These files are also in MANIFEST.in so that they are always in
        # the source distribution independently of HAVE_CBLAS.
        multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
                               join('src', 'multiarray', 'python_xerbla.c'),
                               ])
        if uses_accelerate_framework(blas_info):
            multiarray_src.extend(get_sgemv_fix())
    else:
        extra_info = {}

    if not ENABLE_SEPARATE_COMPILATION:
        multiarray_deps.extend(multiarray_src)
        multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
        multiarray_src.append(generate_multiarray_templated_sources)

    config.add_extension('multiarray',
                         sources=multiarray_src +
                                 [generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api,
                                  join(codegen_dir, 'generate_numpy_api.py'),
                                  join('*.py')],
                         depends=deps + multiarray_deps,
                         libraries=['npymath', 'npysort'],
                         extra_info=extra_info)

    #######################################################################
    #                           umath module                              #
    #######################################################################

    # umath version: this function is needed to build foo.c from foo.c.src
    # when foo.c is included in another file and as such not in the src
    # argument of build_ext command
    def generate_umath_templated_sources(ext, build_dir):
        from numpy.distutils.misc_util import get_cmd

        subpath = join('src', 'umath')
        sources = [
            join(local_dir, subpath, 'loops.h.src'),
            join(local_dir, subpath, 'loops.c.src'),
            join(local_dir, subpath, 'scalarmath.c.src'),
            join(local_dir, subpath, 'simd.inc.src')]

        # numpy.distutils generate .c from .c.src in weird directories, we have
        # to add them there as they depend on the build_dir
        config.add_include_dirs(join(build_dir, subpath))
        cmd = get_cmd('build_src')
        cmd.ensure_finalized()
        cmd.template_sources(sources, ext)

    def generate_umath_c(ext, build_dir):
        target = join(build_dir, header_dir, '__umath_generated.c')
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        script = generate_umath_py
        if newer(script, target):
            f = open(target, 'w')
            f.write(generate_umath.make_code(generate_umath.defdict,
                                             generate_umath.__file__))
            f.close()
        return []

    umath_src = [
            join('src', 'umath', 'umathmodule.c'),
            join('src', 'umath', 'reduction.c'),
            join('src', 'umath', 'funcs.inc.src'),
            join('src', 'umath', 'simd.inc.src'),
            join('src', 'umath', 'loops.h.src'),
            join('src', 'umath', 'loops.c.src'),
            join('src', 'umath', 'ufunc_object.c'),
            join('src', 'umath', 'scalarmath.c.src'),
            join('src', 'umath', 'ufunc_type_resolution.c')]

    umath_deps = [
            generate_umath_py,
            join('src', 'multiarray', 'common.h'),
            join('src', 'private', 'templ_common.h.src'),
            join('src', 'umath', 'simd.inc.src'),
            join(codegen_dir, 'generate_ufunc_api.py'),
            join('src', 'private', 'ufunc_override.h')] + npymath_sources

    if not ENABLE_SEPARATE_COMPILATION:
        umath_deps.extend(umath_src)
        umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
        umath_src.append(generate_umath_templated_sources)
        umath_src.append(join('src', 'umath', 'funcs.inc.src'))
        umath_src.append(join('src', 'umath', 'simd.inc.src'))

    config.add_extension('umath',
                         sources=umath_src +
                                 [generate_config_h,
                                 generate_numpyconfig_h,
                                 generate_umath_c,
                                 generate_ufunc_api],
                         depends=deps + umath_deps,
                         libraries=['npymath'],
                         )

    #######################################################################
    #                        umath_tests module                           #
    #######################################################################

    config.add_extension('umath_tests',
                    sources=[join('src', 'umath', 'umath_tests.c.src')])

    #######################################################################
    #                   custom rational dtype module                      #
    #######################################################################

    config.add_extension('test_rational',
                    sources=[join('src', 'umath', 'test_rational.c.src')])

    #######################################################################
    #                        struct_ufunc_test module                     #
    #######################################################################

    config.add_extension('struct_ufunc_test',
                    sources=[join('src', 'umath', 'struct_ufunc_test.c.src')])

    #######################################################################
    #                     multiarray_tests module                         #
    #######################################################################

    config.add_extension('multiarray_tests',
                    sources=[join('src', 'multiarray', 'multiarray_tests.c.src')])

    #######################################################################
    #                        operand_flag_tests module                    #
    #######################################################################

    config.add_extension('operand_flag_tests',
                    sources=[join('src', 'umath', 'operand_flag_tests.c.src')])

    config.add_data_dir('tests')
    config.add_data_dir('tests/data')

    config.make_svn_version_py()

    return config

Example 29

View license
def configuration(parent_package='',top_path=None):
    from numpy.distutils.misc_util import Configuration, dot_join
    from numpy.distutils.system_info import get_info

    config = Configuration('core', parent_package, top_path)
    local_dir = config.local_path
    codegen_dir = join(local_dir, 'code_generators')

    if is_released(config):
        warnings.simplefilter('error', MismatchCAPIWarning)

    # Check whether we have a mismatch between the set C API VERSION and the
    # actual C API VERSION
    check_api_version(C_API_VERSION, codegen_dir)

    generate_umath_py = join(codegen_dir, 'generate_umath.py')
    n = dot_join(config.name, 'generate_umath')
    generate_umath = imp.load_module('_'.join(n.split('.')),
                                     open(generate_umath_py, 'U'), generate_umath_py,
                                     ('.py', 'U', 1))

    header_dir = 'include/numpy'  # this is relative to config.path_in_package

    cocache = CallOnceOnly()

    def generate_config_h(ext, build_dir):
        target = join(build_dir, header_dir, 'config.h')
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)

        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s', target)

            # Check sizeof
            moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)

            # Check math library and C99 math funcs availability
            mathlibs = check_mathlib(config_cmd)
            moredefs.append(('MATHLIB', ','.join(mathlibs)))

            check_math_capabilities(config_cmd, moredefs, mathlibs)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])

            # Signal check
            if is_npy_no_signal():
                moredefs.append('__NPY_PRIVATE_NO_SIGNAL')

            # Windows checks
            if sys.platform == 'win32' or os.name == 'nt':
                win32_checks(moredefs)

            # C99 restrict keyword
            moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))

            # Inline check
            inline = config_cmd.check_inline()

            # Check whether we need our own wide character support
            if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
                PYTHON_HAS_UNICODE_WIDE = True
            else:
                PYTHON_HAS_UNICODE_WIDE = False

            if ENABLE_SEPARATE_COMPILATION:
                moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))

            # Get long double representation
            if sys.platform != 'darwin':
                rep = check_long_double_representation(config_cmd)
                if rep in ['INTEL_EXTENDED_12_BYTES_LE',
                           'INTEL_EXTENDED_16_BYTES_LE',
                           'MOTOROLA_EXTENDED_12_BYTES_BE',
                           'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
                           'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
                           'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
                    moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
                else:
                    raise ValueError("Unrecognized long double format: %s" % rep)

            # Py3K check
            if sys.version_info[0] == 3:
                moredefs.append(('NPY_PY3K', 1))

            # Generate the config.h file from moredefs
            target_f = open(target, 'w')
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0], d[1]))

            # define inline to our keyword, or nothing
            target_f.write('#ifndef __cplusplus\n')
            if inline == 'inline':
                target_f.write('/* #undef inline */\n')
            else:
                target_f.write('#define inline %s\n' % inline)
            target_f.write('#endif\n')

            # add the guard to make sure config.h is never included directly,
            # but always through npy_config.h
            target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")

            target_f.close()
            print('File:', target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print('EOF')
        else:
            mathlibs = []
            target_f = open(target)
            for line in target_f:
                s = '#define MATHLIB'
                if line.startswith(s):
                    value = line[len(s):].strip()
                    if value:
                        mathlibs.extend(value.split(','))
            target_f.close()

        # Ugly: this can be called within a library and not an extension,
        # in which case there is no libraries attributes (and none is
        # needed).
        if hasattr(ext, 'libraries'):
            ext.libraries.extend(mathlibs)

        incl_dir = os.path.dirname(target)
        if incl_dir not in config.numpy_include_dirs:
            config.numpy_include_dirs.append(incl_dir)

        return target

    def generate_numpyconfig_h(ext, build_dir):
        """Depends on config.h: generate_config_h has to be called before !"""
        # put private include directory in build_dir on search path
        # allows using code generation in headers headers
        config.add_include_dirs(join(build_dir, "src", "private"))

        target = join(build_dir, header_dir, '_numpyconfig.h')
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)
        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info('Generating %s', target)

            # Check sizeof
            ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)

            if is_npy_no_signal():
                moredefs.append(('NPY_NO_SIGNAL', 1))

            if is_npy_no_smp():
                moredefs.append(('NPY_NO_SMP', 1))
            else:
                moredefs.append(('NPY_NO_SMP', 0))

            mathlibs = check_mathlib(config_cmd)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])

            if ENABLE_SEPARATE_COMPILATION:
                moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))

            # Check wether we can use inttypes (C99) formats
            if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
                moredefs.append(('NPY_USE_C99_FORMATS', 1))

            # visibility check
            hidden_visibility = visibility_define(config_cmd)
            moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))

            # Add the C API/ABI versions
            moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
            moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))

            # Add moredefs to header
            target_f = open(target, 'w')
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write('#define %s\n' % (d))
                else:
                    target_f.write('#define %s %s\n' % (d[0], d[1]))

            # Define __STDC_FORMAT_MACROS
            target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
            target_f.close()

            # Dump the numpyconfig.h header to stdout
            print('File: %s' % target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print('EOF')
        config.add_data_files((header_dir, target))
        return target

    def generate_api_func(module_name):
        def generate_api(ext, build_dir):
            script = join(codegen_dir, module_name + '.py')
            sys.path.insert(0, codegen_dir)
            try:
                m = __import__(module_name)
                log.info('executing %s', script)
                h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
            finally:
                del sys.path[0]
            config.add_data_files((header_dir, h_file),
                                  (header_dir, doc_file))
            return (h_file,)
        return generate_api

    generate_numpy_api = generate_api_func('generate_numpy_api')
    generate_ufunc_api = generate_api_func('generate_ufunc_api')

    config.add_include_dirs(join(local_dir, "src", "private"))
    config.add_include_dirs(join(local_dir, "src"))
    config.add_include_dirs(join(local_dir))

    config.add_data_files('include/numpy/*.h')
    config.add_include_dirs(join('src', 'npymath'))
    config.add_include_dirs(join('src', 'multiarray'))
    config.add_include_dirs(join('src', 'umath'))
    config.add_include_dirs(join('src', 'npysort'))

    config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
    config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
    config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
    config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])

    config.numpy_include_dirs.extend(config.paths('include'))

    deps = [join('src', 'npymath', '_signbit.c'),
            join('include', 'numpy', '*object.h'),
            join(codegen_dir, 'genapi.py'),
            ]

    #######################################################################
    #                            dummy module                             #
    #######################################################################

    # npymath needs the config.h and numpyconfig.h files to be generated, but
    # build_clib cannot handle generate_config_h and generate_numpyconfig_h
    # (don't ask). Because clib are generated before extensions, we have to
    # explicitly add an extension which has generate_config_h and
    # generate_numpyconfig_h as sources *before* adding npymath.

    config.add_extension('_dummy',
                         sources=[join('src', 'dummymodule.c'),
                                  generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api]
                         )

    #######################################################################
    #                          npymath library                            #
    #######################################################################

    subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])

    def get_mathlib_info(*args):
        # Another ugly hack: the mathlib info is known once build_src is run,
        # but we cannot use add_installed_pkg_config here either, so we only
        # update the substition dictionary during npymath build
        config_cmd = config.get_config_cmd()

        # Check that the toolchain works, to fail early if it doesn't
        # (avoid late errors with MATHLIB which are confusing if the
        # compiler does not work).
        st = config_cmd.try_link('int main(void) { return 0;}')
        if not st:
            raise RuntimeError("Broken toolchain: cannot link a simple C program")
        mlibs = check_mathlib(config_cmd)

        posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
        msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
        subst_dict["posix_mathlib"] = posix_mlib
        subst_dict["msvc_mathlib"] = msvc_mlib

    npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
                       join('src', 'npymath', 'ieee754.c.src'),
                       join('src', 'npymath', 'npy_math_complex.c.src'),
                       join('src', 'npymath', 'halffloat.c')
                       ]
    config.add_installed_library('npymath',
            sources=npymath_sources + [get_mathlib_info],
            install_dir='lib')
    config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
            subst_dict)
    config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
            subst_dict)

    #######################################################################
    #                         npysort library                             #
    #######################################################################

    # This library is created for the build but it is not installed
    npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
                       join('src', 'npysort', 'mergesort.c.src'),
                       join('src', 'npysort', 'heapsort.c.src'),
                       join('src', 'private', 'npy_partition.h.src'),
                       join('src', 'npysort', 'selection.c.src'),
                       join('src', 'private', 'npy_binsearch.h.src'),
                       join('src', 'npysort', 'binsearch.c.src'),
                       ]
    config.add_library('npysort',
                       sources=npysort_sources,
                       include_dirs=[])

    #######################################################################
    #                        multiarray module                            #
    #######################################################################

    # Multiarray version: this function is needed to build foo.c from foo.c.src
    # when foo.c is included in another file and as such not in the src
    # argument of build_ext command
    def generate_multiarray_templated_sources(ext, build_dir):
        from numpy.distutils.misc_util import get_cmd

        subpath = join('src', 'multiarray')
        sources = [join(local_dir, subpath, 'scalartypes.c.src'),
                   join(local_dir, subpath, 'arraytypes.c.src'),
                   join(local_dir, subpath, 'nditer_templ.c.src'),
                   join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
                   join(local_dir, subpath, 'einsum.c.src'),
                   join(local_dir, 'src', 'private', 'templ_common.h.src')
                   ]

        # numpy.distutils generate .c from .c.src in weird directories, we have
        # to add them there as they depend on the build_dir
        config.add_include_dirs(join(build_dir, subpath))
        cmd = get_cmd('build_src')
        cmd.ensure_finalized()
        cmd.template_sources(sources, ext)

    multiarray_deps = [
            join('src', 'multiarray', 'arrayobject.h'),
            join('src', 'multiarray', 'arraytypes.h'),
            join('src', 'multiarray', 'array_assign.h'),
            join('src', 'multiarray', 'buffer.h'),
            join('src', 'multiarray', 'calculation.h'),
            join('src', 'multiarray', 'cblasfuncs.h'),
            join('src', 'multiarray', 'common.h'),
            join('src', 'multiarray', 'convert_datatype.h'),
            join('src', 'multiarray', 'convert.h'),
            join('src', 'multiarray', 'conversion_utils.h'),
            join('src', 'multiarray', 'ctors.h'),
            join('src', 'multiarray', 'descriptor.h'),
            join('src', 'multiarray', 'getset.h'),
            join('src', 'multiarray', 'hashdescr.h'),
            join('src', 'multiarray', 'iterators.h'),
            join('src', 'multiarray', 'mapping.h'),
            join('src', 'multiarray', 'methods.h'),
            join('src', 'multiarray', 'multiarraymodule.h'),
            join('src', 'multiarray', 'nditer_impl.h'),
            join('src', 'multiarray', 'numpymemoryview.h'),
            join('src', 'multiarray', 'number.h'),
            join('src', 'multiarray', 'numpyos.h'),
            join('src', 'multiarray', 'refcount.h'),
            join('src', 'multiarray', 'scalartypes.h'),
            join('src', 'multiarray', 'sequence.h'),
            join('src', 'multiarray', 'shape.h'),
            join('src', 'multiarray', 'ucsnarrow.h'),
            join('src', 'multiarray', 'usertypes.h'),
            join('src', 'multiarray', 'vdot.h'),
            join('src', 'private', 'npy_config.h'),
            join('src', 'private', 'templ_common.h.src'),
            join('src', 'private', 'lowlevel_strided_loops.h'),
            join('include', 'numpy', 'arrayobject.h'),
            join('include', 'numpy', '_neighborhood_iterator_imp.h'),
            join('include', 'numpy', 'npy_endian.h'),
            join('include', 'numpy', 'arrayscalars.h'),
            join('include', 'numpy', 'noprefix.h'),
            join('include', 'numpy', 'npy_interrupt.h'),
            join('include', 'numpy', 'npy_3kcompat.h'),
            join('include', 'numpy', 'npy_math.h'),
            join('include', 'numpy', 'halffloat.h'),
            join('include', 'numpy', 'npy_common.h'),
            join('include', 'numpy', 'npy_os.h'),
            join('include', 'numpy', 'utils.h'),
            join('include', 'numpy', 'ndarrayobject.h'),
            join('include', 'numpy', 'npy_cpu.h'),
            join('include', 'numpy', 'numpyconfig.h'),
            join('include', 'numpy', 'ndarraytypes.h'),
            join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
            join('include', 'numpy', '_numpyconfig.h.in'),
            # add library sources as distuils does not consider libraries
            # dependencies
            ] + npysort_sources + npymath_sources

    multiarray_src = [
            join('src', 'multiarray', 'alloc.c'),
            join('src', 'multiarray', 'arrayobject.c'),
            join('src', 'multiarray', 'arraytypes.c.src'),
            join('src', 'multiarray', 'array_assign.c'),
            join('src', 'multiarray', 'array_assign_scalar.c'),
            join('src', 'multiarray', 'array_assign_array.c'),
            join('src', 'multiarray', 'buffer.c'),
            join('src', 'multiarray', 'calculation.c'),
            join('src', 'multiarray', 'compiled_base.c'),
            join('src', 'multiarray', 'common.c'),
            join('src', 'multiarray', 'convert.c'),
            join('src', 'multiarray', 'convert_datatype.c'),
            join('src', 'multiarray', 'conversion_utils.c'),
            join('src', 'multiarray', 'ctors.c'),
            join('src', 'multiarray', 'datetime.c'),
            join('src', 'multiarray', 'datetime_strings.c'),
            join('src', 'multiarray', 'datetime_busday.c'),
            join('src', 'multiarray', 'datetime_busdaycal.c'),
            join('src', 'multiarray', 'descriptor.c'),
            join('src', 'multiarray', 'dtype_transfer.c'),
            join('src', 'multiarray', 'einsum.c.src'),
            join('src', 'multiarray', 'flagsobject.c'),
            join('src', 'multiarray', 'getset.c'),
            join('src', 'multiarray', 'hashdescr.c'),
            join('src', 'multiarray', 'item_selection.c'),
            join('src', 'multiarray', 'iterators.c'),
            join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
            join('src', 'multiarray', 'mapping.c'),
            join('src', 'multiarray', 'methods.c'),
            join('src', 'multiarray', 'multiarraymodule.c'),
            join('src', 'multiarray', 'nditer_templ.c.src'),
            join('src', 'multiarray', 'nditer_api.c'),
            join('src', 'multiarray', 'nditer_constr.c'),
            join('src', 'multiarray', 'nditer_pywrap.c'),
            join('src', 'multiarray', 'number.c'),
            join('src', 'multiarray', 'numpymemoryview.c'),
            join('src', 'multiarray', 'numpyos.c'),
            join('src', 'multiarray', 'refcount.c'),
            join('src', 'multiarray', 'sequence.c'),
            join('src', 'multiarray', 'shape.c'),
            join('src', 'multiarray', 'scalarapi.c'),
            join('src', 'multiarray', 'scalartypes.c.src'),
            join('src', 'multiarray', 'usertypes.c'),
            join('src', 'multiarray', 'ucsnarrow.c'),
            join('src', 'multiarray', 'vdot.c'),
            join('src', 'private', 'templ_common.h.src'),
            ]

    blas_info = get_info('blas_opt', 0)
    if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
        extra_info = blas_info
        # These files are also in MANIFEST.in so that they are always in
        # the source distribution independently of HAVE_CBLAS.
        multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
                               join('src', 'multiarray', 'python_xerbla.c'),
                               ])
        if uses_accelerate_framework(blas_info):
            multiarray_src.extend(get_sgemv_fix())
    else:
        extra_info = {}

    if not ENABLE_SEPARATE_COMPILATION:
        multiarray_deps.extend(multiarray_src)
        multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
        multiarray_src.append(generate_multiarray_templated_sources)

    config.add_extension('multiarray',
                         sources=multiarray_src +
                                 [generate_config_h,
                                  generate_numpyconfig_h,
                                  generate_numpy_api,
                                  join(codegen_dir, 'generate_numpy_api.py'),
                                  join('*.py')],
                         depends=deps + multiarray_deps,
                         libraries=['npymath', 'npysort'],
                         extra_info=extra_info)

    #######################################################################
    #                           umath module                              #
    #######################################################################

    # umath version: this function is needed to build foo.c from foo.c.src
    # when foo.c is included in another file and as such not in the src
    # argument of build_ext command
    def generate_umath_templated_sources(ext, build_dir):
        from numpy.distutils.misc_util import get_cmd

        subpath = join('src', 'umath')
        sources = [
            join(local_dir, subpath, 'loops.h.src'),
            join(local_dir, subpath, 'loops.c.src'),
            join(local_dir, subpath, 'scalarmath.c.src'),
            join(local_dir, subpath, 'simd.inc.src')]

        # numpy.distutils generate .c from .c.src in weird directories, we have
        # to add them there as they depend on the build_dir
        config.add_include_dirs(join(build_dir, subpath))
        cmd = get_cmd('build_src')
        cmd.ensure_finalized()
        cmd.template_sources(sources, ext)

    def generate_umath_c(ext, build_dir):
        target = join(build_dir, header_dir, '__umath_generated.c')
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        script = generate_umath_py
        if newer(script, target):
            f = open(target, 'w')
            f.write(generate_umath.make_code(generate_umath.defdict,
                                             generate_umath.__file__))
            f.close()
        return []

    umath_src = [
            join('src', 'umath', 'umathmodule.c'),
            join('src', 'umath', 'reduction.c'),
            join('src', 'umath', 'funcs.inc.src'),
            join('src', 'umath', 'simd.inc.src'),
            join('src', 'umath', 'loops.h.src'),
            join('src', 'umath', 'loops.c.src'),
            join('src', 'umath', 'ufunc_object.c'),
            join('src', 'umath', 'scalarmath.c.src'),
            join('src', 'umath', 'ufunc_type_resolution.c')]

    umath_deps = [
            generate_umath_py,
            join('src', 'multiarray', 'common.h'),
            join('src', 'private', 'templ_common.h.src'),
            join('src', 'umath', 'simd.inc.src'),
            join(codegen_dir, 'generate_ufunc_api.py'),
            join('src', 'private', 'ufunc_override.h')] + npymath_sources

    if not ENABLE_SEPARATE_COMPILATION:
        umath_deps.extend(umath_src)
        umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
        umath_src.append(generate_umath_templated_sources)
        umath_src.append(join('src', 'umath', 'funcs.inc.src'))
        umath_src.append(join('src', 'umath', 'simd.inc.src'))

    config.add_extension('umath',
                         sources=umath_src +
                                 [generate_config_h,
                                 generate_numpyconfig_h,
                                 generate_umath_c,
                                 generate_ufunc_api],
                         depends=deps + umath_deps,
                         libraries=['npymath'],
                         )

    #######################################################################
    #                        umath_tests module                           #
    #######################################################################

    config.add_extension('umath_tests',
                    sources=[join('src', 'umath', 'umath_tests.c.src')])

    #######################################################################
    #                   custom rational dtype module                      #
    #######################################################################

    config.add_extension('test_rational',
                    sources=[join('src', 'umath', 'test_rational.c.src')])

    #######################################################################
    #                        struct_ufunc_test module                     #
    #######################################################################

    config.add_extension('struct_ufunc_test',
                    sources=[join('src', 'umath', 'struct_ufunc_test.c.src')])

    #######################################################################
    #                     multiarray_tests module                         #
    #######################################################################

    config.add_extension('multiarray_tests',
                    sources=[join('src', 'multiarray', 'multiarray_tests.c.src')])

    #######################################################################
    #                        operand_flag_tests module                    #
    #######################################################################

    config.add_extension('operand_flag_tests',
                    sources=[join('src', 'umath', 'operand_flag_tests.c.src')])

    config.add_data_dir('tests')
    config.add_data_dir('tests/data')

    config.make_svn_version_py()

    return config

Example 30

Project: seaborn
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 31

Project: seaborn
Source File: plot_directive.py
View license
def run(arguments, content, options, state_machine, state, lineno):
    # The user may provide a filename *or* Python code content, but not both
    if arguments and content:
        raise RuntimeError("plot:: directive can't have both args and content")

    document = state_machine.document
    config = document.settings.env.config
    nofigs = 'nofigs' in options

    options.setdefault('include-source', config.plot_include_source)
    keep_context = 'context' in options
    context_opt = None if not keep_context else options['context']

    rst_file = document.attributes['source']
    rst_dir = os.path.dirname(rst_file)

    if len(arguments):
        if not config.plot_basedir:
            source_file_name = os.path.join(setup.app.builder.srcdir,
                                            directives.uri(arguments[0]))
        else:
            source_file_name = os.path.join(setup.confdir, config.plot_basedir,
                                            directives.uri(arguments[0]))

        # If there is content, it will be passed as a caption.
        caption = '\n'.join(content)

        # If the optional function name is provided, use it
        if len(arguments) == 2:
            function_name = arguments[1]
        else:
            function_name = None

        with io.open(source_file_name, 'r', encoding='utf-8') as fd:
            code = fd.read()
        output_base = os.path.basename(source_file_name)
    else:
        source_file_name = rst_file
        code = textwrap.dedent("\n".join(map(str, content)))
        counter = document.attributes.get('_plot_counter', 0) + 1
        document.attributes['_plot_counter'] = counter
        base, ext = os.path.splitext(os.path.basename(source_file_name))
        output_base = '%s-%d.py' % (base, counter)
        function_name = None
        caption = ''

    base, source_ext = os.path.splitext(output_base)
    if source_ext in ('.py', '.rst', '.txt'):
        output_base = base
    else:
        source_ext = ''

    # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
    output_base = output_base.replace('.', '-')

    # is it in doctest format?
    is_doctest = contains_doctest(code)
    if 'format' in options:
        if options['format'] == 'python':
            is_doctest = False
        else:
            is_doctest = True

    # determine output directory name fragment
    source_rel_name = relpath(source_file_name, setup.confdir)
    source_rel_dir = os.path.dirname(source_rel_name)
    while source_rel_dir.startswith(os.path.sep):
        source_rel_dir = source_rel_dir[1:]

    # build_dir: where to place output files (temporarily)
    build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
                             'plot_directive',
                             source_rel_dir)
    # get rid of .. in paths, also changes pathsep
    # see note in Python docs for warning about symbolic links on Windows.
    # need to compare source and dest paths at end
    build_dir = os.path.normpath(build_dir)

    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    # output_dir: final location in the builder's directory
    dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
                                            source_rel_dir))
    if not os.path.exists(dest_dir):
        os.makedirs(dest_dir) # no problem here for me, but just use built-ins

    # how to link to files from the RST file
    dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
                                 source_rel_dir).replace(os.path.sep, '/')
    build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
    source_link = dest_dir_link + '/' + output_base + source_ext

    # make figures
    try:
        results = render_figures(code,
                                 source_file_name,
                                 build_dir,
                                 output_base,
                                 keep_context,
                                 function_name,
                                 config,
                                 context_reset=context_opt == 'reset',
                                 close_figs=context_opt == 'close-figs')
        errors = []
    except PlotError as err:
        reporter = state.memo.reporter
        sm = reporter.system_message(
            2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
                                                source_file_name, err),
            line=lineno)
        results = [(code, [])]
        errors = [sm]

    # Properly indent the caption
    caption = '\n'.join('      ' + line.strip()
                        for line in caption.split('\n'))

    # generate output restructuredtext
    total_lines = []
    for j, (code_piece, images) in enumerate(results):
        if options['include-source']:
            if is_doctest:
                lines = ['']
                lines += [row.rstrip() for row in code_piece.split('\n')]
            else:
                lines = ['.. code-block:: python', '']
                lines += ['    %s' % row.rstrip()
                          for row in code_piece.split('\n')]
            source_code = "\n".join(lines)
        else:
            source_code = ""

        if nofigs:
            images = []

        opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
                if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]

        only_html = ".. only:: html"
        only_latex = ".. only:: latex"
        only_texinfo = ".. only:: texinfo"

        # Not-None src_link signals the need for a source link in the generated
        # html
        if j == 0 and config.plot_html_show_source_link:
            src_link = source_link
        else:
            src_link = None

        result = format_template(
            config.plot_template or TEMPLATE,
            dest_dir=dest_dir_link,
            build_dir=build_dir_link,
            source_link=src_link,
            multi_image=len(images) > 1,
            only_html=only_html,
            only_latex=only_latex,
            only_texinfo=only_texinfo,
            options=opts,
            images=images,
            source_code=source_code,
            html_show_formats=config.plot_html_show_formats and not nofigs,
            caption=caption)

        total_lines.extend(result.split("\n"))
        total_lines.extend("\n")

    if total_lines:
        state_machine.insert_input(total_lines, source=source_file_name)

    # copy image files to builder's output directory, if necessary
    if not os.path.exists(dest_dir):
        cbook.mkdirs(dest_dir)

    for code_piece, images in results:
        for img in images:
            for fn in img.filenames():
                destimg = os.path.join(dest_dir, os.path.basename(fn))
                if fn != destimg:
                    shutil.copyfile(fn, destimg)

    # copy script (if necessary)
    target_name = os.path.join(dest_dir, output_base + source_ext)
    with io.open(target_name, 'w', encoding="utf-8") as f:
        if source_file_name == rst_file:
            code_escaped = unescape_doctest(code)
        else:
            code_escaped = code
        f.write(code_escaped)

    return errors

Example 32

Project: dipy
Source File: gqsampling_stats.py
View license
def test_gqiodf():

    #read bvals,gradients and data
    bvals=np.load(opj(os.path.dirname(__file__), \
                          'data','small_64D.bvals.npy'))
    gradients=np.load(opj(os.path.dirname(__file__), \
                              'data','small_64D.gradients.npy'))    
    img =ni.load(os.path.join(os.path.dirname(__file__),\
                                  'data','small_64D.nii'))
    data=img.get_data()    

    #print(bvals.shape)
    #print(gradients.shape)
    #print(data.shape)


    t1=time.clock()
    
    gqs = gq.GeneralizedQSampling(data,bvals,gradients)
    ten = dt.Tensor(data,bvals,gradients,thresh=50)

    
    fa=ten.fa()

    x,y,z,a,b=ten.evecs.shape
    evecs=ten.evecs
    xyz=x*y*z
    evecs = evecs.reshape(xyz,3,3)
    #vs = np.sign(evecs[:,2,:])
    #print vs.shape
    #print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape
    #evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3)
    #print evecs.shape
    evals=ten.evals
    evals = evals.reshape(xyz,3)
    #print evals.shape

    

    t2=time.clock()
    #print('GQS in %d' %(t2-t1))
        
    eds=np.load(opj(os.path.dirname(__file__),\
                        '..','matrices',\
                        'evenly_distributed_sphere_362.npz'))

    
    odf_vertices=eds['vertices']
    odf_faces=eds['faces']

    #Yeh et.al, IEEE TMI, 2010
    #calculate the odf using GQI

    scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free
    #water diffusion coefficient 
    #l_values sqrt(6 D tau) D free water
    #diffusion coefficiet and tau included in the b-value

    tmp=np.tile(scaling,(3,1))
    b_vector=gradients.T*tmp
    Lambda = 1.2 # smoothing parameter - diffusion sampling length
    
    q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)
    #implements equation no. 9 from Yeh et.al.

    S=data.copy()

    x,y,z,g=S.shape
    S=S.reshape(x*y*z,g)
    QA = np.zeros((x*y*z,5))
    IN = np.zeros((x*y*z,5))

    fwd = 0
    
    #Calculate Quantitative Anisotropy and find the peaks and the indices
    #for every voxel

    summary = {}

    summary['vertices'] = odf_vertices
    v = odf_vertices.shape[0]
    summary['faces'] = odf_faces
    f = odf_faces.shape[0]

    '''
    If e = number_of_edges
    the Euler formula says f-e+v = 2 for a mesh on a sphere
    Here, assuming we have a healthy triangulation
    every face is a triangle, all 3 of whose edges should belong to
    exactly two faces = so 2*e = 3*f
    to avoid division we test whether 2*f - 3*f + 2*v == 4
    or equivalently 2*v - f == 4
    '''

    assert_equal(2*v-f, 4,'Direct Euler test fails')
    assert_true(meshes.euler_characteristic_check(odf_vertices, odf_faces,chi=2),'euler_characteristic_check fails')
    
    coarse = meshes.coarseness(odf_faces)
    print 'coarseness: ', coarse

    for (i,s) in enumerate(S):

        #print 'Volume %d' % i

        istr = str(i)

        summary[istr] = {}

        odf = Q2odf(s,q2odf_params)
        peaks,inds=rp.peak_finding(odf,odf_faces)
        fwd=max(np.max(odf),fwd)
        peaks = peaks - np.min(odf)
        l=min(len(peaks),5)
        QA[i][:l] = peaks[:l]
        IN[i][:l] = inds[:l]

        summary[istr]['odf'] = odf
        summary[istr]['peaks'] = peaks
        summary[istr]['inds'] = inds
        summary[istr]['evecs'] = evecs[i,:,:]
        summary[istr]['evals'] = evals[i,:]
   
    QA/=fwd
    QA=QA.reshape(x,y,z,5)    
    IN=IN.reshape(x,y,z,5)
    
    #print('Old %d secs' %(time.clock() - t2))
    # assert_equal((gqs.QA-QA).max(),0.,'Frank QA different than our QA')

    # assert_equal((gqs.QA.shape),QA.shape, 'Frank QA shape is different')
       
    # assert_equal((gqs.QA-QA).max(), 0.)

    #import dipy.core.track_propagation as tp

    #tp.FACT_Delta(QA,IN)

    #return tp.FACT_Delta(QA,IN,seeds_no=10000).tracks

    peaks_1 = [i for i in range(1000) if len(summary[str(i)]['inds'])==1]
    peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2]
    peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3]

    # correct numbers of voxels with respectively 1,2,3 ODF/QA peaks
    assert_array_equal((len(peaks_1),len(peaks_2),len(peaks_3)), (790,196,14),
                       'error in numbers of QA/ODF peaks')

    # correct indices of odf directions for voxels 0,10,44
    # with respectively 1,2,3 ODF/QA peaks
    assert_array_equal(summary['0']['inds'],[116],
                       'wrong peak indices for voxel 0')
    assert_array_equal(summary['10']['inds'],[105, 78],
                       'wrong peak indices for voxel 10')
    assert_array_equal(summary['44']['inds'],[95, 84, 108],
                       'wrong peak indices for voxel 44')

    assert_equal(np.argmax(summary['0']['odf']), 116)
    assert_equal(np.argmax(summary['10']['odf']), 105)
    assert_equal(np.argmax(summary['44']['odf']), 95)

    pole_1 = summary['vertices'][116]
    #print 'pole_1', pole_1
    pole_2 = summary['vertices'][105]
    #print 'pole_2', pole_2
    pole_3 = summary['vertices'][95]
    #print 'pole_3', pole_3

    vertices = summary['vertices']

    width = 0.02#0.3 #0.05
    
    '''
    print 'pole_1 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_1)) < width])
    print 'pole_2 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_2)) < width])
    print 'pole_3 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_3)) < width])
    '''
    
    #print 'pole_1 equator contains:', len(meshes.equatorial_vertices(vertices,pole_1,width))
    #print 'pole_2 equator contains:', len(meshes.equatorial_vertices(vertices,pole_2,width))
    #print 'pole_3 equator contains:', len(meshes'equatorial_vertices(vertices,pole_3,width))

    #print triple_odf_maxima(vertices,summary['0']['odf'],width)
    #print triple_odf_maxima(vertices,summary['10']['odf'],width)
    #print triple_odf_maxima(vertices,summary['44']['odf'],width)
    #print summary['0']['evals']
    '''

    pole=np.array([0,0,1])

    from dipy.viz import fos
    r=fos.ren()
    fos.add(r,fos.point(pole,fos.green))
    for i,ev in enumerate(vertices):        
        if np.abs(np.dot(ev,pole))<width:
            fos.add(r,fos.point(ev,fos.red))
    fos.show(r)

    '''

    triple = triple_odf_maxima(vertices, summary['10']['odf'], width)
    
    indmax1, odfmax1 = triple[0]
    indmax2, odfmax2 = triple[1]
    indmax3, odfmax3 = triple[2] 

    '''
    from dipy.viz import fos
    r=fos.ren()
    for v in vertices:
        fos.add(r,fos.point(v,fos.cyan))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax1]),radius=0.1,color=fos.red))
    #fos.add(r,fos.line(np.array([0,0,0]),vertices[indmax1]))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax2]),radius=0.05,color=fos.green))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax3]),radius=0.025,color=fos.blue))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,0]),radius=0.1,color=fos.red,opacity=0.7))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,1]),radius=0.05,color=fos.green,opacity=0.7))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,2]),radius=0.025,color=fos.blue,opacity=0.7))
    fos.add(r,fos.sphere([0,0,0],radius=0.01,color=fos.white))
    fos.show(r)
    '''
    
    mat = np.vstack([vertices[indmax1],vertices[indmax2],vertices[indmax3]])

    print np.dot(mat,np.transpose(mat))
    # this is to assess how othogonal the triple is/are
    print np.dot(summary['0']['evecs'],np.transpose(mat))

Example 33

Project: dipy
Source File: gqsampling_stats.py
View license
def test_gqiodf():

    #read bvals,gradients and data
    bvals=np.load(opj(os.path.dirname(__file__), \
                          'data','small_64D.bvals.npy'))
    gradients=np.load(opj(os.path.dirname(__file__), \
                              'data','small_64D.gradients.npy'))    
    img =ni.load(os.path.join(os.path.dirname(__file__),\
                                  'data','small_64D.nii'))
    data=img.get_data()    

    #print(bvals.shape)
    #print(gradients.shape)
    #print(data.shape)


    t1=time.clock()
    
    gqs = gq.GeneralizedQSampling(data,bvals,gradients)
    ten = dt.Tensor(data,bvals,gradients,thresh=50)

    
    fa=ten.fa()

    x,y,z,a,b=ten.evecs.shape
    evecs=ten.evecs
    xyz=x*y*z
    evecs = evecs.reshape(xyz,3,3)
    #vs = np.sign(evecs[:,2,:])
    #print vs.shape
    #print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape
    #evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3)
    #print evecs.shape
    evals=ten.evals
    evals = evals.reshape(xyz,3)
    #print evals.shape

    

    t2=time.clock()
    #print('GQS in %d' %(t2-t1))
        
    eds=np.load(opj(os.path.dirname(__file__),\
                        '..','matrices',\
                        'evenly_distributed_sphere_362.npz'))

    
    odf_vertices=eds['vertices']
    odf_faces=eds['faces']

    #Yeh et.al, IEEE TMI, 2010
    #calculate the odf using GQI

    scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free
    #water diffusion coefficient 
    #l_values sqrt(6 D tau) D free water
    #diffusion coefficiet and tau included in the b-value

    tmp=np.tile(scaling,(3,1))
    b_vector=gradients.T*tmp
    Lambda = 1.2 # smoothing parameter - diffusion sampling length
    
    q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)
    #implements equation no. 9 from Yeh et.al.

    S=data.copy()

    x,y,z,g=S.shape
    S=S.reshape(x*y*z,g)
    QA = np.zeros((x*y*z,5))
    IN = np.zeros((x*y*z,5))

    fwd = 0
    
    #Calculate Quantitative Anisotropy and find the peaks and the indices
    #for every voxel

    summary = {}

    summary['vertices'] = odf_vertices
    v = odf_vertices.shape[0]
    summary['faces'] = odf_faces
    f = odf_faces.shape[0]

    '''
    If e = number_of_edges
    the Euler formula says f-e+v = 2 for a mesh on a sphere
    Here, assuming we have a healthy triangulation
    every face is a triangle, all 3 of whose edges should belong to
    exactly two faces = so 2*e = 3*f
    to avoid division we test whether 2*f - 3*f + 2*v == 4
    or equivalently 2*v - f == 4
    '''

    assert_equal(2*v-f, 4,'Direct Euler test fails')
    assert_true(meshes.euler_characteristic_check(odf_vertices, odf_faces,chi=2),'euler_characteristic_check fails')
    
    coarse = meshes.coarseness(odf_faces)
    print 'coarseness: ', coarse

    for (i,s) in enumerate(S):

        #print 'Volume %d' % i

        istr = str(i)

        summary[istr] = {}

        odf = Q2odf(s,q2odf_params)
        peaks,inds=rp.peak_finding(odf,odf_faces)
        fwd=max(np.max(odf),fwd)
        peaks = peaks - np.min(odf)
        l=min(len(peaks),5)
        QA[i][:l] = peaks[:l]
        IN[i][:l] = inds[:l]

        summary[istr]['odf'] = odf
        summary[istr]['peaks'] = peaks
        summary[istr]['inds'] = inds
        summary[istr]['evecs'] = evecs[i,:,:]
        summary[istr]['evals'] = evals[i,:]
   
    QA/=fwd
    QA=QA.reshape(x,y,z,5)    
    IN=IN.reshape(x,y,z,5)
    
    #print('Old %d secs' %(time.clock() - t2))
    # assert_equal((gqs.QA-QA).max(),0.,'Frank QA different than our QA')

    # assert_equal((gqs.QA.shape),QA.shape, 'Frank QA shape is different')
       
    # assert_equal((gqs.QA-QA).max(), 0.)

    #import dipy.core.track_propagation as tp

    #tp.FACT_Delta(QA,IN)

    #return tp.FACT_Delta(QA,IN,seeds_no=10000).tracks

    peaks_1 = [i for i in range(1000) if len(summary[str(i)]['inds'])==1]
    peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2]
    peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3]

    # correct numbers of voxels with respectively 1,2,3 ODF/QA peaks
    assert_array_equal((len(peaks_1),len(peaks_2),len(peaks_3)), (790,196,14),
                       'error in numbers of QA/ODF peaks')

    # correct indices of odf directions for voxels 0,10,44
    # with respectively 1,2,3 ODF/QA peaks
    assert_array_equal(summary['0']['inds'],[116],
                       'wrong peak indices for voxel 0')
    assert_array_equal(summary['10']['inds'],[105, 78],
                       'wrong peak indices for voxel 10')
    assert_array_equal(summary['44']['inds'],[95, 84, 108],
                       'wrong peak indices for voxel 44')

    assert_equal(np.argmax(summary['0']['odf']), 116)
    assert_equal(np.argmax(summary['10']['odf']), 105)
    assert_equal(np.argmax(summary['44']['odf']), 95)

    pole_1 = summary['vertices'][116]
    #print 'pole_1', pole_1
    pole_2 = summary['vertices'][105]
    #print 'pole_2', pole_2
    pole_3 = summary['vertices'][95]
    #print 'pole_3', pole_3

    vertices = summary['vertices']

    width = 0.02#0.3 #0.05
    
    '''
    print 'pole_1 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_1)) < width])
    print 'pole_2 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_2)) < width])
    print 'pole_3 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_3)) < width])
    '''
    
    #print 'pole_1 equator contains:', len(meshes.equatorial_vertices(vertices,pole_1,width))
    #print 'pole_2 equator contains:', len(meshes.equatorial_vertices(vertices,pole_2,width))
    #print 'pole_3 equator contains:', len(meshes'equatorial_vertices(vertices,pole_3,width))

    #print triple_odf_maxima(vertices,summary['0']['odf'],width)
    #print triple_odf_maxima(vertices,summary['10']['odf'],width)
    #print triple_odf_maxima(vertices,summary['44']['odf'],width)
    #print summary['0']['evals']
    '''

    pole=np.array([0,0,1])

    from dipy.viz import fos
    r=fos.ren()
    fos.add(r,fos.point(pole,fos.green))
    for i,ev in enumerate(vertices):        
        if np.abs(np.dot(ev,pole))<width:
            fos.add(r,fos.point(ev,fos.red))
    fos.show(r)

    '''

    triple = triple_odf_maxima(vertices, summary['10']['odf'], width)
    
    indmax1, odfmax1 = triple[0]
    indmax2, odfmax2 = triple[1]
    indmax3, odfmax3 = triple[2] 

    '''
    from dipy.viz import fos
    r=fos.ren()
    for v in vertices:
        fos.add(r,fos.point(v,fos.cyan))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax1]),radius=0.1,color=fos.red))
    #fos.add(r,fos.line(np.array([0,0,0]),vertices[indmax1]))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax2]),radius=0.05,color=fos.green))
    fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax3]),radius=0.025,color=fos.blue))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,0]),radius=0.1,color=fos.red,opacity=0.7))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,1]),radius=0.05,color=fos.green,opacity=0.7))
    fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,2]),radius=0.025,color=fos.blue,opacity=0.7))
    fos.add(r,fos.sphere([0,0,0],radius=0.01,color=fos.white))
    fos.show(r)
    '''
    
    mat = np.vstack([vertices[indmax1],vertices[indmax2],vertices[indmax3]])

    print np.dot(mat,np.transpose(mat))
    # this is to assess how othogonal the triple is/are
    print np.dot(summary['0']['evecs'],np.transpose(mat))

Example 34

Project: AfterQC
Source File: preprocesser.py
View license
    def run(self):
        if self.options.debubble:
            self.loadBubbleCircles()

        #read1_file is required
        read1_file = fastq.Reader(self.options.read1_file)

        #no front trim if sequence is barcoded
        if self.options.barcode:
            self.options.trim_front = 0

        reporter = QCReporter()

        self.r1qc_prefilter = QualityControl(self.options.qc_sample, self.options.qc_kmer)
        self.r2qc_prefilter = QualityControl(self.options.qc_sample, self.options.qc_kmer)
        self.r1qc_prefilter.statFile(self.options.read1_file)
        if self.options.read2_file != None:
            self.r2qc_prefilter.statFile(self.options.read2_file)

        self.r1qc_postfilter = QualityControl(self.options.qc_sample, self.options.qc_kmer)
        self.r2qc_postfilter = QualityControl(self.options.qc_sample, self.options.qc_kmer)

        readLen = self.r1qc_prefilter.readLen
        overlap_histgram = [0 for x in xrange(readLen+1)]
        distance_histgram = [0 for x in xrange(readLen+1)]

        #auto detect trim front and trim tail
        if self.options.trim_front == -1 or self.options.trim_tail == -1:
            #auto trim for read1
            trimFront, trimTail = self.r1qc_prefilter.autoTrim()
            if self.options.trim_front == -1:
                self.options.trim_front = trimFront
            if self.options.trim_tail == -1:
                self.options.trim_tail = trimTail
            #auto trim for read2
            if self.options.read2_file != None:
                # check if we should keep same trimming for read1/read2 to keep their length identical
                # this option is on by default because lots of dedup algorithms require this feature
                if self.options.trim_pair_same:
                    self.options.trim_front2 = self.options.trim_front
                    self.options.trim_tail2 = self.options.trim_tail
                else:
                    trimFront2, trimTail2 = self.r2qc_prefilter.autoTrim()
                    if self.options.trim_front2 == -1:
                        self.options.trim_front2 = trimFront2
                    if self.options.trim_tail2 == -1:
                        self.options.trim_tail2 = trimTail2
                
        print(self.options.read1_file + " options:")
        print(self.options)
        
        #if good output folder not specified, set it as the same folder of read1 file
        good_dir = self.options.good_output_folder
        if good_dir == None:
            good_dir = os.path.dirname(self.options.read1_file)

        #if bad output folder not specified, set it as the same folder of read1 file            
        bad_dir = self.options.bad_output_folder
        if bad_dir == None:
            bad_dir = os.path.join(os.path.dirname(os.path.dirname(good_dir+"/")), "bad")

        #if overlap output folder not specified, set it as the same folder of read1 file
        overlap_dir = self.options.overlap_output_folder
        if overlap_dir == None:
#            overlap_dir = os.path.dirname(self.options.read1_file)
            overlap_dir = os.path.join(os.path.dirname(os.path.dirname(good_dir+"/")), "overlap")

        #save QC results at the same folder of good
        qc_base_folder =  os.path.join(os.path.dirname(os.path.dirname(good_dir+"/")), "QC")
        if not os.path.exists(qc_base_folder):
            os.makedirs(qc_base_folder)
        qc_dir =  os.path.join(qc_base_folder, os.path.basename(self.options.read1_file))
        if not os.path.exists(qc_dir):
            os.makedirs(qc_dir)
            
        if not os.path.exists(good_dir):
            os.makedirs(good_dir)
            
        if not os.path.exists(bad_dir):
            os.makedirs(bad_dir)

        if self.options.store_overlap and self.options.read2_file != None and (not os.path.exists(overlap_dir)):
            os.makedirs(overlap_dir)
        
        good_read1_file = None
        bad_read1_file = None
        overlap_read1_file = None
        if not self.options.qc_only:
            good_read1_file = fastq.Writer(os.path.join(good_dir, getMainName(self.options.read1_file)+".good.fq"))
            bad_read1_file = fastq.Writer(os.path.join(bad_dir, getMainName(self.options.read1_file)+".bad.fq"))

            overlap_read1_file = None
            if self.options.store_overlap:
                overlap_read1_file = fastq.Writer(os.path.join(overlap_dir, getMainName(self.options.read1_file)+".overlap.fq"))
        
        #other files are optional
        read2_file = None
        good_read2_file = None
        bad_read2_file = None
        overlap_read2_file = None

        index1_file = None
        good_index1_file = None
        bad_index1_file = None
        overlap_index1_file = None

        index2_file = None
        good_index2_file = None
        bad_index2_file = None
        overlap_index2_file = None
        
        #if other files are specified, then read them
        if self.options.read2_file != None:
            read2_file = fastq.Reader(self.options.read2_file)
            if not self.options.qc_only:
                good_read2_file = fastq.Writer(os.path.join(good_dir, getMainName(self.options.read2_file)+".good.fq"))
                bad_read2_file = fastq.Writer(os.path.join(bad_dir, getMainName(self.options.read2_file)+".bad.fq"))
                if self.options.store_overlap and self.options.read2_file != None:
                    overlap_read2_file = fastq.Writer(os.path.join(overlap_dir, getMainName(self.options.read2_file)+".overlap.fq"))
        if self.options.index1_file != None:
            index1_file = fastq.Reader(self.options.index1_file)
            if not self.options.qc_only:
                good_index1_file = fastq.Writer(os.path.join(good_dir, getMainName(self.options.index1_file)+".good.fq"))
                bad_index1_file = fastq.Writer(os.path.join(bad_dir, getMainName(self.options.index1_file)+".bad.fq"))
                if self.options.store_overlap and self.options.read2_file != None:
                    overlap_index1_file = fastq.Writer(os.path.join(overlap_dir, getMainName(self.options.index1_file)+".overlap.fq"))
        if self.options.index2_file != None:
            index2_file = fastq.Reader(self.options.index2_file)
            if not self.options.qc_only:
                good_index2_file = fastq.Writer(os.path.join(good_dir, getMainName(self.options.index2_file)+".good.fq"))
                bad_index2_file = fastq.Writer(os.path.join(bad_dir, getMainName(self.options.index2_file)+".bad.fq"))
                if self.options.store_overlap and self.options.read2_file != None:
                    overlap_index2_file = fastq.Writer(os.path.join(overlap_dir, getMainName(self.options.index2_file)+".overlap.fq"))
            
        r1 = None
        r2 = None
        i1 = None
        i2 = None

        # stat numbers
        TOTAL_BASES = 0
        GOOD_BASES = 0
        TOTAL_READS = 0
        GOOD_READS = 0
        BAD_READS = 0
        BADBCD1 = 0
        BADBCD2 = 0
        BADTRIM1 = 0
        BADTRIM2 = 0
        BADBBL = 0
        BADLEN = 0
        BADPOL = 0
        BADLQC = 0
        BADNCT = 0
        BADINDEL = 0
        BADMISMATCH = 0
        READ_CORRECTED = 0
        BASE_CORRECTED = 0
        BASE_ZERO_QUAL_MASKED = 0
        OVERLAPPED = 0
        OVERLAP_LEN_SUM = 0
        OVERLAP_BASE_SUM = 0
        # error profiling by overlap analysis
        OVERLAP_BASE_ERR = 0
        OVERLAP_ERR_MATRIX = init_error_matrix()

        while True:
            r1 = read1_file.nextRead()
            if r1==None:
                break
            else:
                TOTAL_BASES += len(r1[1])
                
            if read2_file != None:
                r2 = read2_file.nextRead()
                if r2==None:
                    break
            if index1_file != None:
                i1 = index1_file.nextRead()
                if i1==None:
                    break
            if index2_file != None:
                i2 = index2_file.nextRead()
                if i2==None:
                    break
                else:
                    TOTAL_BASES += len(r2[1])

            TOTAL_READS += 1
                    
            #barcode processing
            if self.options.barcode:
                barcodeLen1 = barcodeprocesser.detectBarcode(r1[1], self.options.barcode_length, self.options.barcode_verify)
                if barcodeLen1 == 0:
                    self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADBCD1")
                    BADBCD1 += 1
                    continue
                else:
                    if r2 == None:
                        barcodeprocesser.moveBarcodeToName(r1, self.options.barcode_length, self.options.barcode_verify)
                    else:
                        barcodeLen2 = barcodeprocesser.detectBarcode(r2[1], self.options.barcode_length, self.options.barcode_verify)
                        if barcodeLen2 == 0:
                            self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADBCD2")
                            BADBCD2 += 1
                            continue
                        else:
                            barcodeprocesser.moveAndTrimPair(r1, r2, barcodeLen1, barcodeLen2, self.options.barcode_verify)
            
            #trim
            if self.options.trim_front > 0 or self.options.trim_tail > 0:
                r1 = trim(r1, self.options.trim_front, self.options.trim_tail)
                if len(r1[1]) < 5:
                    self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADTRIM1")
                    BADTRIM1 += 1
                    continue
                if r2 != None:
                    r2 = trim(r2, self.options.trim_front2, self.options.trim_tail2)
                    if len(r2[1]) < 5:
                        self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADTRIM2")
                        BADTRIM2 += 1
                        continue

            #filter debubble
            if self.options.debubble:
                if self.isInBubble(r1[0]):
                    self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADBBL")
                    BADBBL += 1
                    continue
            
            #filter sequence length
            if len(r1[1])<self.options.seq_len_req:
                self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADLEN")
                BADLEN += 1
                continue
                    
            #check polyX
            if self.options.poly_size_limit > 0:
                poly1 = hasPolyX(r1[1], self.options.poly_size_limit, self.options.allow_mismatch_in_poly)
                poly2 = None
                if r2!=None:
                    poly2 = hasPolyX(r2[1], self.options.poly_size_limit, self.options.allow_mismatch_in_poly)
                if poly1!=None or poly2!=None:
                    self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADPOL")
                    BADPOL += 1
                    continue
            
            #check low quality count
            if self.options.unqualified_base_limit > 0:
                lowQual1 = lowQualityNum(r1, self.options.qualified_quality_phred)
                lowQual2 = 0
                if r2!=None:
                    lowQual2 = lowQualityNum(r2, self.options.qualified_quality_phred)
                if lowQual1 > self.options.unqualified_base_limit or lowQual1 > self.options.unqualified_base_limit:
                    self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADLQC")
                    BADLQC += 1
                    continue
            
            #check N number
            if self.options.n_base_limit > 0:
                nNum1 = nNumber(r1)
                nNum2 = 0
                if r2!=None:
                    nNum2 = nNumber(r2)
                if nNum1 > self.options.n_base_limit or nNum2 > self.options.n_base_limit:
                    self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADNCT")
                    BADNCT += 1
                    continue

            #check overlap and do error correction
            if r2!=None and (not self.options.no_overlap):
                (offset, overlap_len, distance) = util.overlap(r1[1], r2[1])
                overlap_histgram[overlap_len] += 1
                # deal with the case insert DNA is shorter than read length and cause offset is negative
                if offset <0 and overlap_len > 30:
                    # shift the junk bases
                    r1[1] = r1[1][0:overlap_len]
                    r1[3] = r1[3][0:overlap_len]
                    r2[1] = r2[1][-offset:-offset+overlap_len]
                    r2[3] = r2[3][-offset:-offset+overlap_len]
                    # then calc overlap again
                    (offset, overlap_len, distance) = util.overlap(r1[1], r2[1])
                if overlap_len>30:
                    OVERLAPPED += 1
                    distance_histgram[distance] += 1
                    OVERLAP_LEN_SUM += overlap_len
                    # we consider the distance is caused by sequencing error
                    OVERLAP_BASE_SUM += overlap_len * 2
                    OVERLAP_BASE_ERR += distance
                    corrected = 0
                    zero_qual_masked = 0
                    if distance>0:
                        #try to fix low quality base
                        hamming = util.hammingDistance(r1[1][len(r1[1]) - overlap_len:], util.reverseComplement(r2[1][len(r2[1]) - overlap_len:]))
                        if hamming != distance:
                            self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADINDEL")
                            BADINDEL += 1
                            continue
                        #print(r1[1][len(r1[1]) - overlap_len:])
                        #print(util.reverseComplement(r2[1][len(r2[1]) - overlap_len:]))
                        #print(r1[3][len(r1[1]) - overlap_len:])
                        #print(util.reverse(r2[3][len(r2[1]) - overlap_len:]))
                        err_mtx = init_error_matrix()
                        for o in xrange(overlap_len):
                            b1 = r1[1][len(r1[1]) - overlap_len + o]
                            b2 = util.complement(r2[1][-o-1])
                            q1 = r1[3][len(r1[3]) - overlap_len + o]
                            q2 = r2[3][-o-1]
                            if b1 != b2:
                                # print(TOTAL_READS, o, b1, b2, q1, q2)
                                this_is_corrected = False
                                if util.qualNum(q1) >= 30 and util.qualNum(q2) <= 14:
                                    if b1!='N' and b2!='N':
                                        err_mtx[util.complement(b1)][util.complement(b2)] += 1
                                    if not self.options.no_correction:
                                        r2[1] = util.changeString(r2[1], -o-1, util.complement(b1))
                                        r2[3] = util.changeString(r2[3], -o-1, q1)
                                        corrected += 1
                                        this_is_corrected = True
                                elif util.qualNum(q2) >= 30 and util.qualNum(q1) <= 14:
                                    if b1!='N' and b2!='N':
                                        err_mtx[b2][b1] += 1
                                    if not self.options.no_correction:
                                        r1[1]= util.changeString(r1[1], len(r1[1]) - overlap_len + o, b2)
                                        r1[3] = util.changeString(r1[3], len(r1[3]) - overlap_len + o, q2)
                                        corrected += 1
                                        this_is_corrected = True
                                if not this_is_corrected:
                                    # mask them as zero qual if it is not corrected
                                    zero_qual = '!'
                                    r2[3] = util.changeString(r2[3], -o-1, zero_qual)
                                    r1[3] = util.changeString(r1[3], len(r1[3]) - overlap_len + o, zero_qual)
                                    zero_qual_masked += 1

                                if corrected + zero_qual_masked>= distance:
                                    break
                        #print(r1[1][len(r1[1]) - overlap_len:])
                        #print(util.reverseComplement(r2[1][len(r2[1]) - overlap_len:]))
                        #print(r1[3][len(r1[1]) - overlap_len:])
                        #print(util.reverse(r2[3][len(r2[1]) - overlap_len:]))
                        if corrected + zero_qual_masked == distance:
                            merge_error_matrix(OVERLAP_ERR_MATRIX, err_mtx)
                            READ_CORRECTED += 1
                            BASE_CORRECTED += corrected
                            # multiply by 2 since we mask bases by pair
                            BASE_ZERO_QUAL_MASKED += zero_qual_masked * 2
                        else:
                            self.writeReads(r1, r2, i1, i2, bad_read1_file, bad_read2_file, bad_index1_file, bad_index2_file, "BADMISMATCH")
                            BADMISMATCH += 1
                            continue
                    if distance == 0 or distance == corrected:
                        if self.options.store_overlap:
                            self.writeReads(getOverlap(r1, overlap_len), getOverlap(r2, overlap_len), i1, i2, overlap_read1_file, overlap_read2_file, overlap_index1_file, overlap_index2_file, None)

            #write to good       
            self.writeReads(r1, r2, i1, i2, good_read1_file, good_read2_file, good_index1_file, good_index2_file, None)
            GOOD_BASES += len(r1[1])
            if i2 != None:
                GOOD_BASES += len(r2[1])
            if self.options.qc_sample <=0 or TOTAL_READS < self.options.qc_sample:
                self.r1qc_postfilter.statRead(r1)
                if r2 != None:
                    self.r2qc_postfilter.statRead(r2)

            GOOD_READS += 1
            if self.options.qc_only and TOTAL_READS >= self.options.qc_sample:
                break

        self.r1qc_postfilter.qc()
        #self.r1qc_postfilter.plot(qc_dir, "R1-postfilter")
        if self.options.read2_file != None:
            self.r2qc_postfilter.qc()
            #self.r2qc_postfilter.plot(qc_dir, "R2-postfilter")
        
        #close all files
        if not self.options.qc_only:
            good_read1_file.flush()
            bad_read1_file.flush()
            if self.options.read2_file != None:
                good_read2_file.flush()
                bad_read2_file.flush()
            if self.options.index1_file != None:
                good_index1_file.flush()
                bad_index1_file.flush()
            if self.options.index2_file != None:
                good_index2_file.flush()
                bad_index2_file.flush()

        # print stat numbers
        BAD_READS = TOTAL_READS - GOOD_READS
        result = {}
        result['total_bases']=TOTAL_BASES
        result['good_bases']=GOOD_BASES
        result['total_reads']=TOTAL_READS
        result['good_reads']=GOOD_READS
        result['bad_reads']=BAD_READS
        result['bad_reads_with_bad_barcode']= BADBCD1 + BADBCD2
        result['bad_reads_with_reads_in_bubble']= BADBBL
        result['bad_reads_with_bad_read_length']= BADLEN + BADTRIM1 + BADTRIM2
        result['bad_reads_with_polyX']= BADPOL
        result['bad_reads_with_low_quality']=BADLQC
        result['bad_reads_with_too_many_N']= BADNCT
        result['bad_reads_with_bad_overlap']= BADMISMATCH + BADINDEL
        result['readlen'] = readLen

        # plot result bar figure
        labels = ['good reads', 'has_polyX', 'low_quality', 'too_short', 'too_many_N']
        counts = [GOOD_READS, BADPOL, BADLQC, BADLEN + BADTRIM1 + BADTRIM2, BADNCT]
        colors = ['#66BB11', '#FF33AF', '#FFD3F2', '#FFA322', '#FF8899']
        if self.options.read2_file != None:
            labels.append('bad_overlap')
            counts.append(BADMISMATCH + BADINDEL)
            colors.append('#FF6600')
        if self.options.debubble:
            labels.append('in_bubble')
            counts.append(BADBBL)
            colors.append('#EEBB00')
        if self.options.barcode:
            labels.append('bad_barcode')
            counts.append(BADBCD1 + BADBCD2)
            colors.append('#CCDD22')

        for i in xrange(len(counts)):
            type_percent = 0.0
            if TOTAL_READS > 0:
                type_percent = 100.0 * float(counts[i])/TOTAL_READS
            labels[i] = labels[i] + ": " + str(counts[i]) + "(" + str(type_percent) + "%)"

        reporter.addFigure('Good reads and bad reads after filtering', self.r1qc_prefilter.statPlotly(labels, counts, TOTAL_READS, 'filter_stat'), 'filter_stat', "")
        #self.r1qc_prefilter.plotFilterStats(labels, counts, colors, TOTAL_READS, os.path.join(qc_dir, "filter-stat.png"))

        stat={}
        # stat["options"]=self.options
        stat["summary"]=result
        stat["command"]=makeDict(self.options)
        stat["kmer_content"] = {}
        stat["kmer_content"]["read1_prefilter"] = self.r1qc_prefilter.topKmerCount[0:10]
        stat["kmer_content"]["read1_postfilter"] = self.r1qc_postfilter.topKmerCount[0:10]
        if self.options.read2_file != None:
            stat["kmer_content"]["read2_prefilter"] = self.r2qc_prefilter.topKmerCount[0:10]
            stat["kmer_content"]["read2_postfilter"] = self.r2qc_postfilter.topKmerCount[0:10]
            stat["overlap"]={}
            stat["overlap"]['overlapped_pairs']=OVERLAPPED
            if OVERLAPPED > 0:
                stat["overlap"]['average_overlap_length']=float(OVERLAP_LEN_SUM/OVERLAPPED)
            else:
                stat["overlap"]['average_overlap_length']=0.0
            stat["overlap"]['bad_mismatch_reads']=BADMISMATCH
            stat["overlap"]['bad_indel_reads']=BADINDEL
            stat["overlap"]['corrected_reads']=READ_CORRECTED
            stat["overlap"]['corrected_bases']=BASE_CORRECTED
            stat["overlap"]['zero_qual_masked']=BASE_ZERO_QUAL_MASKED
            if OVERLAP_BASE_SUM > 0:
                stat["overlap"]['error_rate']=float(OVERLAP_BASE_ERR)/float(OVERLAP_BASE_SUM)
            else:
                stat["overlap"]['error_rate']=0.0
            stat["overlap"]['error_matrix']=OVERLAP_ERR_MATRIX
            stat["overlap"]['edit_distance_histogram']=distance_histgram[0:10]
            reporter.addFigure('Sequence error distribution', self.r1qc_prefilter.errorPlotly(OVERLAP_ERR_MATRIX, 'error_matrix'), 'error_matrix', "")
            reporter.addFigure('Overlap length distribution', self.r1qc_prefilter.overlapPlotly(overlap_histgram, readLen, TOTAL_READS, 'overlap_stat'), 'overlap_stat', "")
            #self.r1qc_prefilter.plotOverlapHistgram(overlap_histgram, readLen, TOTAL_READS, os.path.join(qc_dir, "overlap.png"))

        stat_file = open(os.path.join(qc_dir, "after.json"), "w")
        stat_json = json.dumps(stat, sort_keys=True,indent=4, separators=(',', ': '))
        stat_file.write(stat_json)
        stat_file.close()

        self.addFiguresToReport(reporter)
        reporter.setStat(stat)
        reporter.output(os.path.join(qc_dir, "report.html"))

Example 35

Project: OSCAAR
Source File: calculateEphemerides.py
View license
def calculateEphemerides(parFile):
    '''
        :INPUTS:
        parFile	 --	  path to the parameter file
        '''

    #parFile = 'umo.par'

    '''Parse the observatory .par file'''
    parFileText = open(os.path.join(os.path.dirname(oscaar.__file__),'extras','eph','observatories',parFile),'r').read().splitlines()

    def returnBool(value):
        '''Return booleans from strings'''
        if value.upper().strip() == 'TRUE': return True
        elif value.upper().strip() == 'FALSE': return False
    if hasattr(sys, 'real_prefix'):
        show_lt = float(0)
    for line in parFileText:
        parameter = line.split(':')[0]
        if len(line.split(':')) > 1:
            value = line.split(':')[1].strip()
            if parameter == 'name': observatory_name = value
            elif parameter == 'latitude': observatory_latitude = value
            elif parameter == 'longitude': observatory_longitude = value
            elif parameter == 'elevation': observatory_elevation = float(value)
            elif parameter == 'temperature': observatory_temperature = float(value)
            elif parameter == 'min_horizon': observatory_minHorizon = value
            elif parameter == 'start_date': startSem = gd2jd(eval(value))
            elif parameter == 'end_date': endSem = gd2jd(eval(value))
            elif parameter == 'mag_limit': mag_limit = float(value)
            elif parameter == 'band': band = value
            elif parameter == 'depth_limit': depth_limit = float(value)
            elif parameter == 'calc_transits': calcTransits = returnBool(value)
            elif parameter == 'calc_eclipses': calcEclipses = returnBool(value)
            elif parameter == 'html_out': htmlOut = returnBool(value)
            elif parameter == 'text_out': textOut = returnBool(value)
            elif parameter == 'twilight': twilightType = value
            elif parameter == 'show_lt': show_lt = float(value)
    from oscaar.extras.knownSystemParameters import getLatestParams
    exoplanetDB = getLatestParams.downloadAndPickle()

    ''' Set up observatory parameters '''
    observatory = ephem.Observer()
    observatory.lat =  observatory_latitude#'38:58:50.16'	## Input format-  deg:min:sec  (type=str)
    observatory.long = observatory_longitude#'-76:56:13.92' ## Input format-  deg:min:sec  (type=str)
    observatory.elevation = observatory_elevation   # m
    observatory.temp = observatory_temperature	  ## Celsius 
    observatory.horizon = observatory_minHorizon	## Input format-  deg:min:sec  (type=str)

    def trunc(f, n):
        '''Truncates a float f to n decimal places without rounding'''
        slen = len('%.*f' % (n, f))
        return str(f)[:slen]

    def RA(planet):
        '''Type: str, Units:  hours:min:sec'''
        return exoplanetDB[planet]['RA_STRING']
    def dec(planet):
        '''Type: str, Units:  deg:min:sec'''
        return exoplanetDB[planet]['DEC_STRING']
    def period(planet):
        '''Units:  days'''
        return np.float64(exoplanetDB[planet]['PER'])
    def epoch(planet):
        '''Tc at mid-transit. Units:  days'''
        if exoplanetDB[planet]['TT'] == '': return 0.0
        else: return np.float64(exoplanetDB[planet]['TT'])
    def duration(planet):
        '''Transit/eclipse duration. Units:  days'''
        if exoplanetDB[planet]['T14'] == '': return 0.0
        else: return float(exoplanetDB[planet]['T14'])
    def V(planet):
        '''V mag'''
        if exoplanetDB[planet]['V'] == '': return 0.0
        else: return float(exoplanetDB[planet]['V'])
    def KS(planet):
        '''KS mag'''
        if exoplanetDB[planet]['KS'] == '': return 0.0
        else: return float(exoplanetDB[planet]['KS'])
    
    def bandMagnitude(planet):
        if band.upper() == 'V':
            return V(planet)
        elif band.upper() == 'K':
            return KS(planet)
    def depth(planet):
        '''Transit depth'''
        if exoplanetDB[planet]['DEPTH'] == '': return 0.0
        else: return float(exoplanetDB[planet]['DEPTH'])

    def transitBool(planet):
        '''True if exoplanet is transiting, False if detected by other means'''
        if exoplanetDB[planet]['TRANSIT'] == '0': return 0
        elif exoplanetDB[planet]['TRANSIT'] == '1': return 1
    ########################################################################################
    ########################################################################################

    def datestr2list(datestr):
        ''' Take strings of the form: "2013/1/18 20:08:18" and return them as a
            tuple of the same parameters'''
        year,month,others = datestr.split('/')
        day, time = others.split(' ')
        hour,minute,sec = time.split(':')
        return (int(year),int(month),int(day),int(hour),int(minute),int(sec))

    def list2datestr(inList):
        '''Converse function to datestr2list'''
        inList = map(str,inList)
        return inList[0]+'/'+inList[1]+'/'+inList[2]+' '+inList[3].zfill(2)+':'+inList[4].zfill(2)+':'+inList[5].zfill(2)

    def list2datestrCSV(inList):
        '''Converse function to datestr2list'''
        inList = map(str,inList)
        print inList
        return inList[0]+'/'+inList[1]+'/'+inList[2]+','+inList[3].zfill(2)+':'+inList[4].zfill(2)+':'+inList[5].zfill(2)


    def list2datestrHTML(inList,alt,direction):
        '''Converse function to datestr2list'''
        inList = map(str,inList)
        #return inList[1].zfill(2)+'/'+inList[2].zfill(2)+'<br />'+inList[3].zfill(2)+':'+inList[4].zfill(2)
        return inList[1].zfill(2)+'/<strong>'+inList[2].zfill(2)+'</strong>, '+inList[3].zfill(2)+':'+inList[4].split('.')[0].zfill(2)+'<br /> '+alt+'&deg; '+direction

    def list2datestrHTML_UTnoaltdir(inList,alt,direction):
        '''Converse function to datestr2list'''
        inList = map(str,inList)
        #return inList[1].zfill(2)+'/'+inList[2].zfill(2)+'<br />'+inList[3].zfill(2)+':'+inList[4].zfill(2)
        return inList[1].zfill(2)+'/<strong>'+inList[2].zfill(2)+'</strong>, '+inList[3].zfill(2)+':'+inList[4].split('.')[0].zfill(2)

    def list2datestrHTML_LT(inList,alt,direction):
        '''Converse function to datestr2list for daylight savings time'''
        #print "original",inList
        tempDate = ephem.Date(inList)
        inList = ephem.Date(ephem.localtime(tempDate)).tuple()
        #print "converted",lt_inList,'\n'
        inList = map(str,inList)
        #return inList[1].zfill(2)+'/'+inList[2].zfill(2)+'<br />'+inList[3].zfill(2)+':'+inList[4].zfill(2)
        return inList[1].zfill(2)+'/<strong>'+inList[2].zfill(2)+'</strong>, '+inList[3].zfill(2)+':'+inList[4].split('.')[0].zfill(2)+'<br /> '+alt+'&deg; '+direction

    def simbadURL(planet):
        if exoplanetDB[planet]['SIMBADURL'] == '': return 'http://simbad.harvard.edu/simbad/'
        else: return exoplanetDB[planet]['SIMBADURL']

    def RADecHTML(planet):
        return '<a href="'+simbadURL(planet)+'">'+RA(planet).split('.')[0]+'<br />'+dec(planet).split('.')[0]+'</a>'

    def constellation(planet):
        return exoplanetDB[planet]['Constellation']

    def orbitReference(planet):
        return exoplanetDB[planet]['TRANSITURL']

    def orbitReferenceYear(planet):
        '''ORBREF returns the citation in the format "<first author> <year>", so parse and return just the year'''
        return exoplanetDB[planet]['ORBREF'].split()[1]

    def nameWithLink(planet):
        return '<a href="'+orbitReference(planet)+'">'+planet+'</a>'

    def mass(planet):
        if exoplanetDB[planet]['MASS'] == '': return '---'
        else: return trunc(float(exoplanetDB[planet]['MASS']),2)

    def semimajorAxis(planet):
        #return trunc(0.004649*float(exoplanetDB[planet]['AR'])*float(exoplanetDB[planet]['RSTAR']),3)   ## Convert from solar radii to AU
        return trunc(float(exoplanetDB[planet]['SEP']),3)

    def radius(planet):
        if exoplanetDB[planet]['R'] == '': return '---'
        else: return trunc(float(exoplanetDB[planet]['R']),2) ## Convert from solar radii to Jupiter radii

    def midTransit(Tc, P, start, end):
        '''Calculate mid-transits between Julian Dates start and end, using a 2500 
            orbital phase kernel since T_c (for 2 day period, 2500 phases is 14 years)
            '''
        Nepochs = np.arange(0,2500,dtype=np.float64)
        transitTimes = Tc + P*Nepochs
        transitTimesInSem = transitTimes[(transitTimes < end)*(transitTimes > start)]
        return transitTimesInSem

    def midEclipse(Tc, P, start, end):
        '''Calculate mid-eclipses between Julian Dates start and end, using a 2500 
            orbital phase kernel since T_c (for 2 day period, 2500 phases is 14 years)
            '''
        Nepochs = np.arange(0,2500,dtype=np.float64)
        transitTimes = Tc + P*(0.5 + Nepochs)
        transitTimesInSem = transitTimes[(transitTimes < end)*(transitTimes > start)]
        return transitTimesInSem

    '''Choose which planets from the database to include in the search, 
        assemble a list of them.'''
    planets = []
    for planet in exoplanetDB:
        if bandMagnitude(planet) != 0.0 and depth(planet) != 0.0 and float(bandMagnitude(planet)) <= mag_limit and \
           float(depth(planet)) >= depth_limit and transitBool(planet):
            planets.append(planet)

    if calcTransits: transits = {}
    if calcEclipses: eclipses = {}
    for day in np.arange(startSem,endSem+1):
        if calcTransits: transits[str(day)] = []
        if calcEclipses: eclipses[str(day)] = []
    planetsNeverUp = []


    def azToDirection(az):
        az = float(az)
        if (az >= 0 and az < 22.5) or (az >= 337.5 and az < 360): return 'N'
        elif az >= 22.5 and az < 67.5:  return 'NE'
        elif az >= 67.5 and az < 112.5:  return 'E'
        elif az >= 112.5 and az < 157.5:  return 'SE'
        elif az >= 157.5 and az < 202.5:  return 'S'
        elif az >= 202.5 and az < 247.5:  return 'SW'
        elif az >= 247.5 and az < 292.5:  return 'W'	
        elif az >= 292.5 and az < 337.5:  return 'NW'

    def ingressEgressAltAz(planet,observatory,ingress,egress):
        altitudes = []
        directions = []
        for time in [ingress,egress]:
            observatory.date = list2datestr(jd2gd(time))
            star = ephem.FixedBody()
            star._ra = ephem.hours(RA(planet))
            star._dec = ephem.degrees(dec(planet))
            star.compute(observatory)
            altitudes.append(str(ephem.degrees(star.alt)).split(":")[0])
            directions.append(azToDirection(str(ephem.degrees(star.az)).split(":")[0]))
        ingressAlt,egressAlt = altitudes
        ingressDir,egressDir = directions
        return ingressAlt,ingressDir,egressAlt,egressDir

    def aboveHorizonForEvent(planet,observatory,ingress,egress):
        altitudes = []
        for time in [ingress,egress]:
            observatory.date = list2datestr(jd2gd(time))
            star = ephem.FixedBody()
            star._ra = ephem.hours(RA(planet))
            star._dec = ephem.degrees(dec(planet))
            star.compute(observatory)
            #altitudes.append(str(ephem.degrees(star.alt)).split(":")[0])
            altitudes.append(float(repr(star.alt))/(2*np.pi) * 360)	## Convert altitudes to degrees
        #if altitudes[0] > 0 and altitudes[1] > 0: return True
        if altitudes[0] > float(ephem.degrees(observatory_minHorizon))*(180/np.pi) and altitudes[1] > float(ephem.degrees(observatory_minHorizon))*(180/np.pi): return True
        else: return False

    def eventAfterTwilight(planet,observatory,ingress,egress,twilightType):
        altitudes = []
        for time in [ingress,egress]:
            observatory.date = list2datestr(jd2gd(time))
            sun = ephem.Sun()
            sun.compute(observatory)
            altitudes.append(float(repr(sun.alt))/(2*np.pi) * 360)	## Convert altitudes to degrees
        if altitudes[0] < float(twilightType) and altitudes[1] < float(twilightType): return True
        else: return False

    for planet in planets:		
        '''Compute all of the coming transits and eclipses for a long time out'''
        allTransitEpochs = midTransit(epoch(planet),period(planet),startSem,endSem)
        allEclipseEpochs = midEclipse(epoch(planet),period(planet),startSem,endSem)
        for day in np.arange(startSem,endSem+1,1.0):
            try:
                '''For each day, gather the transits and eclipses that happen'''
                transitEpochs = allTransitEpochs[(allTransitEpochs <= day+0.5)*(allTransitEpochs > day-0.5)]
                eclipseEpochs = allEclipseEpochs[(allEclipseEpochs <= day+0.5)*(allEclipseEpochs > day-0.5)]
                if calcTransits and len(transitEpochs) != 0:
                    transitEpoch = transitEpochs[0]
                    ingress = transitEpoch-duration(planet)/2
                    egress = transitEpoch+duration(planet)/2
                    
                    ''' Calculate positions of host stars'''
                    star = ephem.FixedBody()
                    star._ra = ephem.hours(RA(planet))
                    star._dec = ephem.degrees(dec(planet))
                    star.compute(observatory)
                    exoplanetDB[planet]['Constellation'] = ephem.constellation(star)[0]
                    
                    '''If star is above horizon and sun is below horizon during transit/eclipse:'''		
                    if aboveHorizonForEvent(planet,observatory,ingress,egress) and eventAfterTwilight(planet,observatory,ingress,egress,twilightType):
                        ingressAlt,ingressDir,egressAlt,egressDir = ingressEgressAltAz(planet,observatory,ingress,egress)
                        transitInfo = [planet,transitEpoch,duration(planet)/2,'transit',ingressAlt,ingressDir,egressAlt,egressDir]
                        transits[str(day)].append(transitInfo)		
                if calcEclipses and len(eclipseEpochs) != 0:
                    eclipseEpoch = eclipseEpochs[0]
                    ingress = eclipseEpoch-duration(planet)/2
                    egress = eclipseEpoch+duration(planet)/2
                    
                    ''' Calculate positions of host stars'''
                    star = ephem.FixedBody()
                    star._ra = ephem.hours(RA(planet))
                    star._dec = ephem.degrees(dec(planet))
                    star.compute(observatory)
                    exoplanetDB[planet]['Constellation'] = ephem.constellation(star)[0]
                    
                    if aboveHorizonForEvent(planet,observatory,ingress,egress) and eventAfterTwilight(planet,observatory,ingress,egress,twilightType):
                        ingressAlt,ingressDir,egressAlt,egressDir = ingressEgressAltAz(planet,observatory,ingress,egress)
                        eclipseInfo = [planet,eclipseEpoch,duration(planet)/2,'eclipse',ingressAlt,ingressDir,egressAlt,egressDir]
                        eclipses[str(day)].append(eclipseInfo)	
            
            except ephem.NeverUpError:
                if str(planet) not in planetsNeverUp:
                    print 'Note: planet %s is never above the horizon at this observing location.' % (planet)
                    planetsNeverUp.append(str(planet))

    def removeEmptySets(dictionary):
        '''Remove days where there were no transits/eclipses from the transit/eclipse list dictionary. 
            Can't iterate through the transits dictionary with a for loop because it would change length 
            as keys get deleted, so loop through with while loop until all entries are not empty sets'''
        dayCounter = startSem
        while any(dictionary[day] == [] for day in dictionary):	
            if dictionary[str(dayCounter)] == []:
                del dictionary[str(dayCounter)]
            dayCounter += 1

    if calcTransits: removeEmptySets(transits)
    if calcEclipses: removeEmptySets(eclipses)

    events = {}
    def mergeDictionaries(dict):
        for key in dict:
            if any(key == eventKey for eventKey in events) == False:	## If key does not exist in events,
                if np.shape(dict[key])[0] == 1:	## If new event is the only one on that night, add only it
                    events[key] = [dict[key][0]]
                else:			## If there were multiple events that night, add them each
                    events[key] = []
                    for event in dict[key]:
                        events[key].append(event)
            else:
                if np.shape(dict[key])[0] > 1: ## If there are multiple entries to append,
                    for event in dict[key]:
                        events[key].append(event)
                else:							## If there is only one to add,
                    events[key].append(dict[key][0])
    if calcTransits: mergeDictionaries(transits)
    if calcEclipses: mergeDictionaries(eclipses)

    if textOut: 
        allKeys = events.keys()
        allKeys = np.array(allKeys)[np.argsort(allKeys)]
        report = open(os.path.join(os.path.dirname(oscaar.__file__),'extras','eph','ephOutputs','eventReport.csv'),'w')
        firstLine = 'Planet,Event,Ingress Date, Ingress Time (UT) ,Altitude at Ingress,Azimuth at Ingress,Egress Date, Egress Time (UT) ,Altitude at Egress,Azimuth at Egress,V mag,Depth,Duration,RA,Dec,Const.,Mass,Semimajor Axis (AU),Radius (R_J)\n'
        report.write(firstLine)
        
        for key in allKeys:
            def writeCSVtransit():
                middle = ','.join([planet[0],str(planet[3]),list2datestrCSV(jd2gd(float(planet[1]-planet[2]))),planet[4],planet[5],\
                                   list2datestrCSV(jd2gd(float(planet[1]+planet[2]))),planet[6],planet[7],trunc(bandMagnitude(str(planet[0])),2),\
                                   trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RA(planet[0]),dec(planet[0]),constellation(planet[0]),\
                                   mass(planet[0]),semimajorAxis(planet[0]),radius(planet[0])])
                line = middle+'\n'
                report.write(line)
            
            def writeCSVeclipse():
                middle = ','.join([planet[0],str(planet[3]),list2datestrCSV(jd2gd(float(planet[1]-planet[2]))),planet[4],planet[5],\
                                   list2datestrCSV(jd2gd(float(planet[1]+planet[2]))),planet[6],planet[7],trunc(bandMagnitude(str(planet[0])),2),\
                                   trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RA(planet[0]),dec(planet[0]),constellation(planet[0]),\
                                   mass(planet[0]),semimajorAxis(planet[0]),radius(planet[0])])
                line = middle+'\n'
                report.write(line)
            
            if np.shape(events[key])[0] > 1:
                elapsedTime = []
                
                for i in range(1,len(events[key])):
                    nextPlanet = events[key][1]
                    planet = events[key][0]
                    double = False
                    '''If the other planet's ingress is before this one's egress, then'''
                    if ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) -\
                        ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))) > 0.0:
                            double = True
                            elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) - \
                                               ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))))
                    
                    if ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
                        ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))) > 0.0:
                            '''If the other planet's egress is before this one's ingress, then'''
                            double = True
                            elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
                                               ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))))
                
                for planet in events[key]:
                    if calcTransits and planet[3] == 'transit':
                        writeCSVtransit()
                    if calcEclipses and planet[3] == 'eclipse':
                        writeCSVeclipse()		  
            
            elif np.shape(events[key])[0] == 1:
                planet = events[key][0]
                if calcTransits and planet[3] == 'transit':
                    writeCSVtransit()
                if calcEclipses and planet[3] == 'eclipse':
                    writeCSVeclipse()
        # report.write('\n')
        
        report.close()
    #print exoplanetDB['HD 209458 b']
    print 'calculateEphemerides.py: Done'


    if htmlOut: 
        '''Write out a text report with the transits/eclipses. Write out the time of 
            ingress, egress, whether event is transit/eclipse, elapsed in time between
            ingress/egress of the temporally isolated events'''
        report = open(os.path.join(os.path.dirname(oscaar.__file__),'extras','eph','ephOutputs','eventReport.html'),'w')
        allKeys = events.keys()
        ## http://www.kryogenix.org/code/browser/sorttable/
        htmlheader = '\n'.join([
                                '<!doctype html>',\
                                '<html>',\
                                '	<head>',\
                                '		<meta http-equiv="content-type" content="text/html; charset=UTF-8" />',\
                                '		<title>Ephemeris</title>',\
                                '		<link rel="stylesheet" href="stylesheetEphem.css" type="text/css" />',\
                                '		 <script type="text/javascript">',\
                                '		  function changeCSS(cssFile, cssLinkIndex) {',\
                                '			var oldlink = document.getElementsByTagName("link").item(cssLinkIndex);',\
                                '			var newlink = document.createElement("link")',\
                                '			newlink.setAttribute("rel", "stylesheet");',\
                                '			newlink.setAttribute("type", "text/css");',\
                                '			newlink.setAttribute("href", cssFile);',\
                                
                                '			document.getElementsByTagName("head").item(0).replaceChild(newlink, oldlink);',\
                                '		  }',\
                                '		</script>',\
                                '	   <script src="./sorttable.js"></script>',\
                                '	</head>',\
                                '	<body>',\
                                '		<div id="textDiv">',\
                                '		<h1>Ephemerides for: '+observatory_name+'</h1>',\
                                '		<h2>Observing dates (UT): '+list2datestr(jd2gd(startSem)).split(' ')[0]+' - '+list2datestr(jd2gd(endSem)).split(' ')[0]+'</h2>'
                                '	   Click the column headers to sort. ',\
                                '		<table class="daynight" id="eph">',\
                                '		<tr><th colspan=2>Toggle Color Scheme</th></tr>',\
                                '		<tr><td><a href="#" onclick="changeCSS(\'stylesheetEphem.css\', 0);">Day</a></td><td><a href="#" onclick="changeCSS(\'stylesheetEphemDark.css\', 0);">Night</a></td></tr>',\
                                '		</table>'])
        
        if show_lt == 0:
            tableheader = '\n'.join([
                                     '\n		<table class="sortable" id="eph">',\
                                     '		<tr> <th>Planet<br /><span class="small">[Link: Orbit ref.]</span></th>	  <th>Event<br /><span class="small">[Transit/<br />Eclipse]</span></th>	<th>Ingress <br /><span class="small">(MM/DD<br />HH:MM, UT)</span></th> <th>Egress <br /><span class="small">(MM/DD<br />HH:MM, (UT), Alt., Dir.)</span></th>'+\
                                     '<th>'+band.upper()+'</th> <th>Depth<br />(mag)</th> <th>Duration<br />(hrs)</th> <th>RA/Dec<br /><span class="small">[Link: Simbad ref.]</span></th> <th>Const.</th> <th>Mass<br />(M<sub>J</sub>)</th>'+\
                                     '<th>Radius<br />(R<sub>J</sub>)</th> <th>Ref. Year</th></tr>'])
        else:
            tableheader = '\n'.join([
                                     '\n        <table class="sortable" id="eph">',\
                                     '        <tr> <th>Planet<br /><span class="small">[Link: Orbit ref.]</span></th>      <th>Event<br /><span class="small">[Transit/<br />Eclipse]</span></th> <th>Ingress <br /><span class="small">(MM/DD<br />HH:MM (LT), Alt., Dir.)</span></th> <th>Egress <br /><span class="small">(MM/DD<br />HH:MM (LT), Alt., Dir.)</span></th>   '+\
                                     '<th>'+band.upper()+'</th> <th>Depth<br />(mag)</th> <th>Duration<br />(hrs)</th> <th>RA/Dec<br /><span class="small">[Link: Simbad ref.]</span></th> <th>Const.</th> <th>Mass<br />(M<sub>J</sub>)</th>'+\
                                     ' <th>Radius<br />(R<sub>J</sub>)</th> <th>Ref. Year</th> <th>Ingress <br /><span class="small">(MM/DD<br />HH:MM (UT))</span></th> <th>Egress <br /><span class="small">(MM/DD<br />HH:MM, (UT))</span></th></tr>'])
    
        
        tablefooter = '\n'.join([
                                 '\n		</table>',\
                                 '		<br /><br />',])
        htmlfooter = '\n'.join([
                                '\n		<p class="headinfo">',\
                                '		Developed by Brett Morris with great gratitude for the help of <a href="http://rhodesmill.org/pyephem/">PyEphem</a>,<br/>',\
                                '		and for up-to-date exoplanet parameters from <a href="http://www.exoplanets.org/">exoplanets.org</a> (<a href="http://adsabs.harvard.edu/abs/2011PASP..123..412W">Wright et al. 2011</a>).<br />',\
                                '		</p>',\
                                '		</div>',\
                                '	</body>',\
                                '</html>'])
        report.write(htmlheader)
        report.write(tableheader)
        
        allKeys = np.array(allKeys)[np.argsort(allKeys)]
        for key in allKeys:
            def writeHTMLtransit():
                indentation = '		'
                if show_lt != 0: 
                    middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML_LT(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
                                               list2datestrHTML_LT(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
                                               trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
                                               mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0]),list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
                                               list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7])])
                else:
                    middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
                                               list2datestrHTML(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
                                               trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
                                               mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0])])
                line = indentation+'<tr><td>'+middle+'</td></tr>\n'
                report.write(line)
            
            def writeHTMLeclipse():
                indentation = '		'
                if show_lt != 0:
                    middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML_LT(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
                                               list2datestrHTML_LT(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
                                               '---',trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
                                               mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0]),list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
                                               list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7])])
                else: 
                    middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
                                               list2datestrHTML(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
                                               '---',trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
                                               mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0])])

                line = indentation+'<tr><td>'+middle+'</td></tr>\n'
                report.write(line)
            
            
            if np.shape(events[key])[0] > 1:
                elapsedTime = []
                
                for i in range(1,len(events[key])):
                    nextPlanet = events[key][1]
                    planet = events[key][0]
                    double = False
                    '''If the other planet's ingress is before this one's egress, then'''
                    if ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) -\
                        ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))) > 0.0:
                            double = True
                            elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) - \
                                               ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))))
                    
                    if ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
                        ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))) > 0.0:
                            '''If the other planet's egress is before this one's ingress, then'''
                            double = True
                            elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
                                               ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))))
                
                for planet in events[key]:
                    if calcTransits and planet[3] == 'transit':
                        writeHTMLtransit()
                    if calcEclipses and planet[3] == 'eclipse':
                        writeHTMLeclipse()		  
            elif np.shape(events[key])[0] == 1:
                planet = events[key][0]
                if calcTransits and planet[3] == 'transit':
                    writeHTMLtransit()
                if calcEclipses and planet[3] == 'eclipse':
                    writeHTMLeclipse()
        report.write(tablefooter)
        report.write(htmlfooter)
        report.close()

Example 36

View license
def setUp(self):
	global testenv, agent
	global settableInteger32, settableUnsigned32, settableTimeTicks
	global settableOctetString

	testenv = netsnmpTestEnv()

	# Create a new netsnmpAgent instance which
	# - connects to the net-snmp test environment's snmpd instance
	# - uses its statedir
	# - loads the TEST-MIB from our tests directory
	testMIBPath = os.path.abspath(os.path.dirname(__file__)) + \
				  "/TEST-MIB.txt"
	agent = netsnmpagent.netsnmpAgent(
		AgentName      = "netsnmpAgentTestAgent",
		MasterSocket   = testenv.mastersocket,
		PersistenceDir = testenv.statedir,
		MIBFiles       = [ testMIBPath ],
	)

	# Test OIDs for Integer32 scalar type
	settableInteger32 = agent.Integer32(
		oidstr = "TEST-MIB::testInteger32NoInitval",
	)

	agent.Integer32(
		oidstr  = "TEST-MIB::testInteger32ZeroInitval",
		initval = 0,
	)

	agent.Integer32(
		oidstr  = "TEST-MIB::testInteger32MinusOneInitval",
		initval = -1,
	)

	agent.Integer32(
		oidstr  = "TEST-MIB::testInteger32MinInitval",
		initval = -2147483648,
	)

	agent.Integer32(
		oidstr  = "TEST-MIB::testInteger32MinMinusOneInitval",
		initval = -2147483649,
	)

	agent.Integer32(
		oidstr  = "TEST-MIB::testInteger32OneInitval",
		initval = 1,
	)

	agent.Integer32(
		oidstr  = "TEST-MIB::testInteger32MaxInitval",
		initval = 2147483647,
	)

	agent.Integer32(
		oidstr  = "TEST-MIB::testInteger32MaxPlusOneInitval",
		initval = 2147483648,
	)

	agent.Integer32(
		oidstr   = "TEST-MIB::testInteger32ReadOnly",
		writable = False,
	)

	# Test OIDs for Unsigned32 scalar type
	settableUnsigned32 = agent.Unsigned32(
		oidstr = "TEST-MIB::testUnsigned32NoInitval",
	)

	agent.Unsigned32(
		oidstr  = "TEST-MIB::testUnsigned32ZeroInitval",
		initval = 0,
	)

	agent.Unsigned32(
		oidstr  = "TEST-MIB::testUnsigned32MinusOneInitval",
		initval = -1,
	)

	agent.Unsigned32(
		oidstr  = "TEST-MIB::testUnsigned32OneInitval",
		initval = 1,
	)

	agent.Unsigned32(
		oidstr  = "TEST-MIB::testUnsigned32MaxInitval",
		initval = 4294967295,
	)

	agent.Unsigned32(
		oidstr  = "TEST-MIB::testUnsigned32MaxPlusOneInitval",
		initval = 4294967296,
	)

	agent.Unsigned32(
		oidstr   = "TEST-MIB::testUnsigned32ReadOnly",
		writable = False,
	)

	# Test OIDs for Counter32 scalar type
	agent.Counter32(
		oidstr = "TEST-MIB::testCounter32NoInitval",
	)

	agent.Counter32(
		oidstr  = "TEST-MIB::testCounter32ZeroInitval",
		initval = 0,
	)

	agent.Counter32(
		oidstr  = "TEST-MIB::testCounter32MinusOneInitval",
		initval = -1,
	)

	agent.Counter32(
		oidstr  = "TEST-MIB::testCounter32OneInitval",
		initval = 1,
	)

	agent.Counter32(
		oidstr  = "TEST-MIB::testCounter32MaxInitval",
		initval = 4294967295,
	)

	agent.Counter32(
		oidstr  = "TEST-MIB::testCounter32MaxPlusOneInitval",
		initval = 4294967296,
	)

	# Test OIDs for Counter64 scalar type
	agent.Counter64(
		oidstr = "TEST-MIB::testCounter64NoInitval",
	)

	agent.Counter64(
		oidstr  = "TEST-MIB::testCounter64ZeroInitval",
		initval = 0,
	)

	agent.Counter64(
		oidstr  = "TEST-MIB::testCounter64MinusOneInitval",
		initval = -1,
	)

	agent.Counter64(
		oidstr  = "TEST-MIB::testCounter64OneInitval",
		initval = 1,
	)

	agent.Counter64(
		oidstr  = "TEST-MIB::testCounter64MaxInitval",
		initval = 18446744073709551615,
	)

	agent.Counter64(
		oidstr  = "TEST-MIB::testCounter64MaxPlusOneInitval",
		initval = 18446744073709551616,
	)

	# Test OIDs for TimeTicks scalar type
	settableTimeTicks = agent.TimeTicks(
		oidstr = "TEST-MIB::testTimeTicksNoInitval",
	)

	agent.TimeTicks(
		oidstr  = "TEST-MIB::testTimeTicksZeroInitval",
		initval = 0,
	)

	agent.TimeTicks(
		oidstr  = "TEST-MIB::testTimeTicksMinusOneInitval",
		initval = -1,
	)

	agent.TimeTicks(
		oidstr  = "TEST-MIB::testTimeTicksOneInitval",
		initval = 1,
	)

	agent.TimeTicks(
		oidstr  = "TEST-MIB::testTimeTicksMaxInitval",
		initval = 4294967295,
	)

	agent.TimeTicks(
		oidstr  = "TEST-MIB::testTimeTicksMaxPlusOneInitval",
		initval = 4294967296,
	)

	agent.TimeTicks(
		oidstr   = "TEST-MIB::testTimeTicksReadOnly",
		writable = False,
	)

	# Test OIDs for OctetString scalar type
	settableOctetString = agent.OctetString(
		oidstr = "TEST-MIB::testOctetStringNoInitval",
	)

	agent.OctetString(
		oidstr  = "TEST-MIB::testOctetStringEmptyInitval",
		initval = "",
	)

	agent.OctetString(
		oidstr  = "TEST-MIB::testOctetStringOneASCIICharInitval",
		initval = "A",
	)

	agent.OctetString(
		oidstr  = "TEST-MIB::testOctetStringOneUTF8CharInitval",
		initval = "Ä",
	)

	agent.OctetString(
		oidstr  = "TEST-MIB::testOctetString255ASCIICharsInitval",
		initval = "A" * 255,
	)

	agent.OctetString(
		oidstr  = "TEST-MIB::testOctetString255UTF8CharsInitval",
		initval = "Ä" * 255,
	)

	agent.OctetString(
		oidstr  = "TEST-MIB::testOctetString256ASCIICharsInitval",
		initval = "A" * 256,
	)

	agent.OctetString(
		oidstr  = "TEST-MIB::testOctetString256UTF8CharsInitval",
		initval = "Ä" * 256,
	)

	# Connect to master snmpd instance
	agent.start()

	# Create a separate thread to implement the absolutely most
	# minimalistic possible agent doing nothing but request handling
	agent.loop = True
	def RequestHandler():
		while self.agent.loop:
			agent.check_and_process(False)

	agent.thread = threading.Thread(target=RequestHandler)
	agent.thread.daemon = True
	agent.thread.start()

Example 37

Project: GAE-Bulk-Mailer
Source File: loaddata.py
View license
    def handle(self, *fixture_labels, **options):

        ignore = options.get('ignore')
        using = options.get('database')

        connection = connections[using]

        if not len(fixture_labels):
            raise CommandError(
                "No database fixture specified. Please provide the path of at "
                "least one fixture in the command line."
            )

        verbosity = int(options.get('verbosity'))
        show_traceback = options.get('traceback')

        # commit is a stealth option - it isn't really useful as
        # a command line option, but it can be useful when invoking
        # loaddata from within another script.
        # If commit=True, loaddata will use its own transaction;
        # if commit=False, the data load SQL will become part of
        # the transaction in place when loaddata was invoked.
        commit = options.get('commit', True)

        # Keep a count of the installed objects and fixtures
        fixture_count = 0
        loaded_object_count = 0
        fixture_object_count = 0
        models = set()

        humanize = lambda dirname: "'%s'" % dirname if dirname else 'absolute path'

        # Get a cursor (even though we don't need one yet). This has
        # the side effect of initializing the test database (if
        # it isn't already initialized).
        cursor = connection.cursor()

        # Start transaction management. All fixtures are installed in a
        # single transaction to ensure that all references are resolved.
        if commit:
            transaction.commit_unless_managed(using=using)
            transaction.enter_transaction_management(using=using)
            transaction.managed(True, using=using)

        class SingleZipReader(zipfile.ZipFile):
            def __init__(self, *args, **kwargs):
                zipfile.ZipFile.__init__(self, *args, **kwargs)
                if settings.DEBUG:
                    assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
            def read(self):
                return zipfile.ZipFile.read(self, self.namelist()[0])

        compression_types = {
            None:   open,
            'gz':   gzip.GzipFile,
            'zip':  SingleZipReader
        }
        if has_bz2:
            compression_types['bz2'] = bz2.BZ2File

        app_module_paths = []
        for app in get_apps():
            if hasattr(app, '__path__'):
                # It's a 'models/' subpackage
                for path in app.__path__:
                    app_module_paths.append(upath(path))
            else:
                # It's a models.py module
                app_module_paths.append(upath(app.__file__))

        app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]

        try:
            with connection.constraint_checks_disabled():
                for fixture_label in fixture_labels:
                    parts = fixture_label.split('.')

                    if len(parts) > 1 and parts[-1] in compression_types:
                        compression_formats = [parts[-1]]
                        parts = parts[:-1]
                    else:
                        compression_formats = compression_types.keys()

                    if len(parts) == 1:
                        fixture_name = parts[0]
                        formats = serializers.get_public_serializer_formats()
                    else:
                        fixture_name, format = '.'.join(parts[:-1]), parts[-1]
                        if format in serializers.get_public_serializer_formats():
                            formats = [format]
                        else:
                            formats = []

                    if formats:
                        if verbosity >= 2:
                            self.stdout.write("Loading '%s' fixtures..." % fixture_name)
                    else:
                        raise CommandError(
                            "Problem installing fixture '%s': %s is not a known serialization format." %
                                (fixture_name, format))

                    if os.path.isabs(fixture_name):
                        fixture_dirs = [fixture_name]
                    else:
                        fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']

                    for fixture_dir in fixture_dirs:
                        if verbosity >= 2:
                            self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))

                        label_found = False
                        for combo in product([using, None], formats, compression_formats):
                            database, format, compression_format = combo
                            file_name = '.'.join(
                                p for p in [
                                    fixture_name, database, format, compression_format
                                ]
                                if p
                            )

                            if verbosity >= 3:
                                self.stdout.write("Trying %s for %s fixture '%s'..." % \
                                    (humanize(fixture_dir), file_name, fixture_name))
                            full_path = os.path.join(fixture_dir, file_name)
                            open_method = compression_types[compression_format]
                            try:
                                fixture = open_method(full_path, 'r')
                            except IOError:
                                if verbosity >= 2:
                                    self.stdout.write("No %s fixture '%s' in %s." % \
                                        (format, fixture_name, humanize(fixture_dir)))
                            else:
                                try:
                                    if label_found:
                                        raise CommandError("Multiple fixtures named '%s' in %s. Aborting." %
                                            (fixture_name, humanize(fixture_dir)))

                                    fixture_count += 1
                                    objects_in_fixture = 0
                                    loaded_objects_in_fixture = 0
                                    if verbosity >= 2:
                                        self.stdout.write("Installing %s fixture '%s' from %s." % \
                                            (format, fixture_name, humanize(fixture_dir)))

                                    objects = serializers.deserialize(format, fixture, using=using, ignorenonexistent=ignore)

                                    for obj in objects:
                                        objects_in_fixture += 1
                                        if router.allow_syncdb(using, obj.object.__class__):
                                            loaded_objects_in_fixture += 1
                                            models.add(obj.object.__class__)
                                            try:
                                                obj.save(using=using)
                                            except (DatabaseError, IntegrityError) as e:
                                                e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
                                                        'app_label': obj.object._meta.app_label,
                                                        'object_name': obj.object._meta.object_name,
                                                        'pk': obj.object.pk,
                                                        'error_msg': force_text(e)
                                                    },)
                                                raise

                                    loaded_object_count += loaded_objects_in_fixture
                                    fixture_object_count += objects_in_fixture
                                    label_found = True
                                except Exception as e:
                                    if not isinstance(e, CommandError):
                                        e.args = ("Problem installing fixture '%s': %s" % (full_path, e),)
                                    raise
                                finally:
                                    fixture.close()

                                # If the fixture we loaded contains 0 objects, assume that an
                                # error was encountered during fixture loading.
                                if objects_in_fixture == 0:
                                    raise CommandError(
                                        "No fixture data found for '%s'. (File format may be invalid.)" %
                                            (fixture_name))

            # Since we disabled constraint checks, we must manually check for
            # any invalid keys that might have been added
            table_names = [model._meta.db_table for model in models]
            try:
                connection.check_constraints(table_names=table_names)
            except Exception as e:
                e.args = ("Problem installing fixtures: %s" % e,)
                raise

        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception as e:
            if commit:
                transaction.rollback(using=using)
                transaction.leave_transaction_management(using=using)
            raise

        # If we found even one object in a fixture, we need to reset the
        # database sequences.
        if loaded_object_count > 0:
            sequence_sql = connection.ops.sequence_reset_sql(no_style(), models)
            if sequence_sql:
                if verbosity >= 2:
                    self.stdout.write("Resetting sequences\n")
                for line in sequence_sql:
                    cursor.execute(line)

        if commit:
            transaction.commit(using=using)
            transaction.leave_transaction_management(using=using)

        if verbosity >= 1:
            if fixture_object_count == loaded_object_count:
                self.stdout.write("Installed %d object(s) from %d fixture(s)" % (
                    loaded_object_count, fixture_count))
            else:
                self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" % (
                    loaded_object_count, fixture_object_count, fixture_count))

        # Close the DB connection. This is required as a workaround for an
        # edge case in MySQL: if the same connection is used to
        # create tables, load data, and query, the query can return
        # incorrect results. See Django #7572, MySQL #37735.
        if commit:
            connection.close()

Example 38

Project: PokemonGo-Map
Source File: utils.py
View license
@memoize
def get_args():
    # pre-check to see if the -cf or --config flag is used on the command line
    # if not, we'll use the env var or default value.  this prevents layering of
    # config files, and handles missing config.ini as well
    defaultconfigfiles = []
    if '-cf' not in sys.argv and '--config' not in sys.argv:
        defaultconfigfiles = [os.getenv('POGOMAP_CONFIG', os.path.join(os.path.dirname(__file__), '../config/config.ini'))]
    parser = configargparse.ArgParser(default_config_files=defaultconfigfiles, auto_env_var_prefix='POGOMAP_')
    parser.add_argument('-cf', '--config', is_config_file=True, help='Configuration file')
    parser.add_argument('-a', '--auth-service', type=str.lower, action='append', default=[],
                        help='Auth Services, either one for all accounts or one per account: ptc or google. Defaults all to ptc.')
    parser.add_argument('-u', '--username', action='append', default=[],
                        help='Usernames, one per account.')
    parser.add_argument('-p', '--password', action='append', default=[],
                        help='Passwords, either single one for all accounts or one per account.')
    parser.add_argument('-w', '--workers', type=int,
                        help='Number of search worker threads to start. Defaults to the number of accounts specified.')
    parser.add_argument('-asi', '--account-search-interval', type=int, default=0,
                        help='Seconds for accounts to search before switching to a new account. 0 to disable.')
    parser.add_argument('-ari', '--account-rest-interval', type=int, default=7200,
                        help='Seconds for accounts to rest when they fail or are switched out')
    parser.add_argument('-ac', '--accountcsv',
                        help='Load accounts from CSV file containing "auth_service,username,passwd" lines')
    parser.add_argument('-l', '--location', type=parse_unicode,
                        help='Location, can be an address or coordinates')
    parser.add_argument('-j', '--jitter', help='Apply random -9m to +9m jitter to location',
                        action='store_true', default=False)
    parser.add_argument('-st', '--step-limit', help='Steps', type=int,
                        default=12)
    parser.add_argument('-sd', '--scan-delay',
                        help='Time delay between requests in scan threads',
                        type=float, default=10)
    parser.add_argument('-enc', '--encounter',
                        help='Start an encounter to gather IVs and moves',
                        action='store_true', default=False)
    parser.add_argument('-ed', '--encounter-delay',
                        help='Time delay between encounter pokemon in scan threads',
                        type=float, default=1)
    encounter_list = parser.add_mutually_exclusive_group()
    encounter_list.add_argument('-ewht', '--encounter-whitelist', action='append', default=[],
                                help='List of pokemon to encounter for more stats')
    encounter_list.add_argument('-eblk', '--encounter-blacklist', action='append', default=[],
                                help='List of pokemon to NOT encounter for more stats')
    parser.add_argument('-ld', '--login-delay',
                        help='Time delay between each login attempt',
                        type=float, default=5)
    parser.add_argument('-lr', '--login-retries',
                        help='Number of logins attempts before refreshing a thread',
                        type=int, default=3)
    parser.add_argument('-mf', '--max-failures',
                        help='Maximum number of failures to parse locations before an account will go into a two hour sleep',
                        type=int, default=5)
    parser.add_argument('-msl', '--min-seconds-left',
                        help='Time that must be left on a spawn before considering it too late and skipping it. eg. 600 would skip anything with < 10 minutes remaining. Default 0.',
                        type=int, default=0)
    parser.add_argument('-dc', '--display-in-console',
                        help='Display Found Pokemon in Console',
                        action='store_true', default=False)
    parser.add_argument('-H', '--host', help='Set web server listening host',
                        default='127.0.0.1')
    parser.add_argument('-P', '--port', type=int,
                        help='Set web server listening port', default=5000)
    parser.add_argument('-L', '--locale',
                        help='Locale for Pokemon names (default: {},\
                        check {} for more)'.
                        format(config['LOCALE'], config['LOCALES_DIR']), default='en')
    parser.add_argument('-c', '--china',
                        help='Coordinates transformer for China',
                        action='store_true')
    parser.add_argument('-m', '--mock', type=str,
                        help='Mock mode - point to a fpgo endpoint instead of using the real PogoApi, ec: http://127.0.0.1:9090',
                        default='')
    parser.add_argument('-ns', '--no-server',
                        help='No-Server Mode. Starts the searcher but not the Webserver.',
                        action='store_true', default=False)
    parser.add_argument('-os', '--only-server',
                        help='Server-Only Mode. Starts only the Webserver without the searcher.',
                        action='store_true', default=False)
    parser.add_argument('-nsc', '--no-search-control',
                        help='Disables search control',
                        action='store_false', dest='search_control', default=True)
    parser.add_argument('-fl', '--fixed-location',
                        help='Hides the search bar for use in shared maps.',
                        action='store_true', default=False)
    parser.add_argument('-k', '--gmaps-key',
                        help='Google Maps Javascript API Key',
                        required=True)
    parser.add_argument('--skip-empty', help='Enables skipping of empty cells  in normal scans - requires previously populated database (not to be used with -ss)',
                        action='store_true', default=False)
    parser.add_argument('-C', '--cors', help='Enable CORS on web server',
                        action='store_true', default=False)
    parser.add_argument('-D', '--db', help='Database filename',
                        default='pogom.db')
    parser.add_argument('-cd', '--clear-db',
                        help='Deletes the existing database before starting the Webserver.',
                        action='store_true', default=False)
    parser.add_argument('-np', '--no-pokemon',
                        help='Disables Pokemon from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-ng', '--no-gyms',
                        help='Disables Gyms from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-nk', '--no-pokestops',
                        help='Disables PokeStops from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-ss', '--spawnpoint-scanning',
                        help='Use spawnpoint scanning (instead of hex grid). Scans in a circle based on step_limit when on DB', nargs='?', const='nofile', default=False)
    parser.add_argument('--dump-spawnpoints', help='dump the spawnpoints from the db to json (only for use with -ss)',
                        action='store_true', default=False)
    parser.add_argument('-pd', '--purge-data',
                        help='Clear pokemon from database this many hours after they disappear \
                        (0 to disable)', type=int, default=0)
    parser.add_argument('-px', '--proxy', help='Proxy url (e.g. socks5://127.0.0.1:9050)', action='append')
    parser.add_argument('-pxsc', '--proxy-skip-check', help='Disable checking of proxies before start', action='store_true', default=False)
    parser.add_argument('-pxt', '--proxy-timeout', help='Timeout settings for proxy checker in seconds ', type=int, default=5)
    parser.add_argument('-pxd', '--proxy-display', help='Display info on which proxy beeing used (index or full) To be used with -ps', type=str, default='index')
    parser.add_argument('--db-type', help='Type of database to be used (default: sqlite)',
                        default='sqlite')
    parser.add_argument('--db-name', help='Name of the database to be used')
    parser.add_argument('--db-user', help='Username for the database')
    parser.add_argument('--db-pass', help='Password for the database')
    parser.add_argument('--db-host', help='IP or hostname for the database')
    parser.add_argument('--db-port', help='Port for the database', type=int, default=3306)
    parser.add_argument('--db-max_connections', help='Max connections (per thread) for the database',
                        type=int, default=5)
    parser.add_argument('--db-threads', help='Number of db threads; increase if the db queue falls behind',
                        type=int, default=1)
    parser.add_argument('-wh', '--webhook', help='Define URL(s) to POST webhook information to',
                        nargs='*', default=False, dest='webhooks')
    parser.add_argument('-gi', '--gym-info', help='Get all details about gyms (causes an additional API hit for every gym)',
                        action='store_true', default=False)
    parser.add_argument('--disable-clean', help='Disable clean db loop',
                        action='store_true', default=False)
    parser.add_argument('--webhook-updates-only', help='Only send updates (pokémon & lured pokéstops)',
                        action='store_true', default=False)
    parser.add_argument('--wh-threads', help='Number of webhook threads; increase if the webhook queue falls behind',
                        type=int, default=1)
    parser.add_argument('--ssl-certificate', help='Path to SSL certificate file')
    parser.add_argument('--ssl-privatekey', help='Path to SSL private key file')
    parser.add_argument('-ps', '--print-status', action='store_true',
                        help='Show a status screen instead of log messages. Can switch between status and logs by pressing enter.', default=False)
    parser.add_argument('-sn', '--status-name', default=None,
                        help='Enable status page database update using STATUS_NAME as main worker name')
    parser.add_argument('-spp', '--status-page-password', default=None,
                        help='Set the status page password')
    parser.add_argument('-el', '--encrypt-lib', help='Path to encrypt lib to be used instead of the shipped ones')
    parser.add_argument('-odt', '--on-demand_timeout', help='Pause searching while web UI is inactive for this timeout(in seconds)', type=int, default=0)
    verbosity = parser.add_mutually_exclusive_group()
    verbosity.add_argument('-v', '--verbose', help='Show debug messages from PomemonGo-Map and pgoapi. Optionally specify file to log to.', nargs='?', const='nofile', default=False, metavar='filename.log')
    verbosity.add_argument('-vv', '--very-verbose', help='Like verbose, but show debug messages from all modules as well.  Optionally specify file to log to.', nargs='?', const='nofile', default=False, metavar='filename.log')
    parser.set_defaults(DEBUG=False)

    args = parser.parse_args()

    if args.only_server:
        if args.location is None:
            parser.print_usage()
            print(sys.argv[0] + ": error: arguments -l/--location is required")
            sys.exit(1)
    else:
        # If using a CSV file, add the data where needed into the username,password and auth_service arguments.
        # CSV file should have lines like "ptc,username,password", "username,password" or "username".
        if args.accountcsv is not None:
            # Giving num_fields something it would usually not get
            num_fields = -1
            with open(args.accountcsv, 'r') as f:
                for num, line in enumerate(f, 1):

                    fields = []

                    # First time around populate num_fields with current field count.
                    if num_fields < 0:
                        num_fields = line.count(',') + 1

                    csv_input = []
                    csv_input.append('')
                    csv_input.append('<username>')
                    csv_input.append('<username>,<password>')
                    csv_input.append('<ptc/google>,<username>,<password>')

                    # If the number of fields is differend this is not a CSV
                    if num_fields != line.count(',') + 1:
                        print(sys.argv[0] + ": Error parsing CSV file on line " + str(num) + ". Your file started with the following input, '" + csv_input[num_fields] + "' but now you gave us '" + csv_input[line.count(',') + 1] + "'.")
                        sys.exit(1)

                    field_error = ''
                    line = line.strip()

                    # Ignore blank lines and comment lines
                    if len(line) == 0 or line.startswith('#'):
                        continue

                    # If number of fields is more than 1 split the line into fields and strip them
                    if num_fields > 1:
                        fields = line.split(",")
                        fields = map(str.strip, fields)

                    # If the number of fields is one then assume this is "username". As requested..
                    if num_fields == 1:
                        # Empty lines are already ignored.
                        args.username.append(line)

                    # If the number of fields is two then assume this is "username,password". As requested..
                    if num_fields == 2:
                        # If field length is not longer then 0 something is wrong!
                        if len(fields[0]) > 0:
                            args.username.append(fields[0])
                        else:
                            field_error = 'username'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[1]) > 0:
                            args.password.append(fields[1])
                        else:
                            field_error = 'password'

                    # If the number of fields is three then assume this is "ptc,username,password". As requested..
                    if num_fields == 3:
                        # If field 0 is not ptc or google something is wrong!
                        if fields[0].lower() == 'ptc' or fields[0].lower() == 'google':
                            args.auth_service.append(fields[0])
                        else:
                            field_error = 'method'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[1]) > 0:
                            args.username.append(fields[1])
                        else:
                            field_error = 'username'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[2]) > 0:
                            args.password.append(fields[2])
                        else:
                            field_error = 'password'

                    if num_fields > 3:
                        print 'Too many fields in accounts file: max supported are 3 fields. Found {} fields'.format(num_fields)
                        sys.exit(1)

                    # If something is wrong display error.
                    if field_error != '':
                        type_error = 'empty!'
                        if field_error == 'method':
                            type_error = 'not ptc or google instead we got \'' + fields[0] + '\'!'
                        print(sys.argv[0] + ": Error parsing CSV file on line " + str(num) + ". We found " + str(num_fields) + " fields, so your input should have looked like '" + csv_input[num_fields] + "'\nBut you gave us '" + line + "', your " + field_error + " was " + type_error)
                        sys.exit(1)

        errors = []

        num_auths = len(args.auth_service)
        num_usernames = 0
        num_passwords = 0

        if len(args.username) == 0:
            errors.append('Missing `username` either as -u/--username, csv file using -ac, or in config')
        else:
            num_usernames = len(args.username)

        if args.location is None:
            errors.append('Missing `location` either as -l/--location or in config')

        if len(args.password) == 0:
            errors.append('Missing `password` either as -p/--password, csv file, or in config')
        else:
            num_passwords = len(args.password)

        if args.step_limit is None:
            errors.append('Missing `step_limit` either as -st/--step-limit or in config')

        if num_auths == 0:
            args.auth_service = ['ptc']

        num_auths = len(args.auth_service)

        if num_usernames > 1:
            if num_passwords > 1 and num_usernames != num_passwords:
                errors.append('The number of provided passwords ({}) must match the username count ({})'.format(num_passwords, num_usernames))
            if num_auths > 1 and num_usernames != num_auths:
                errors.append('The number of provided auth ({}) must match the username count ({})'.format(num_auths, num_usernames))

        if len(errors) > 0:
            parser.print_usage()
            print(sys.argv[0] + ": errors: \n - " + "\n - ".join(errors))
            sys.exit(1)

        # Fill the pass/auth if set to a single value
        if num_passwords == 1:
            args.password = [args.password[0]] * num_usernames
        if num_auths == 1:
            args.auth_service = [args.auth_service[0]] * num_usernames

        # Make our accounts list
        args.accounts = []

        # Make the accounts list
        for i, username in enumerate(args.username):
            args.accounts.append({'username': username, 'password': args.password[i], 'auth_service': args.auth_service[i]})

        # Make max workers equal number of accounts if unspecified, and disable account switching
        if args.workers is None:
            args.workers = len(args.accounts)
            args.account_search_interval = None

        # Disable search interval if 0 specified
        if args.account_search_interval == 0:
            args.account_search_interval = None

        # Make sure we don't have an empty account list after adding command line and CSV accounts
        if len(args.accounts) == 0:
            print(sys.argv[0] + ": Error: no accounts specified. Use -a, -u, and -p or --accountcsv to add accounts")
            sys.exit(1)

        args.encounter_blacklist = [int(i) for i in args.encounter_blacklist]
        args.encounter_whitelist = [int(i) for i in args.encounter_whitelist]

        # Decide which scanning mode to use
        if args.spawnpoint_scanning:
            args.scheduler = 'SpawnScan'
        elif args.skip_empty:
            args.scheduler = 'HexSearchSpawnpoint'
        else:
            args.scheduler = 'HexSearch'

    return args

Example 39

Project: PokemonGo-Map
Source File: runserver.py
View license
def main():
    # Patch threading to make exceptions catchable
    install_thread_excepthook()

    # Make sure exceptions get logged
    sys.excepthook = handle_exception

    args = get_args()

    # Add file logging if enabled
    if args.verbose and args.verbose != 'nofile':
        filelog = logging.FileHandler(args.verbose)
        filelog.setFormatter(logging.Formatter('%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'))
        logging.getLogger('').addHandler(filelog)
    if args.very_verbose and args.very_verbose != 'nofile':
        filelog = logging.FileHandler(args.very_verbose)
        filelog.setFormatter(logging.Formatter('%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'))
        logging.getLogger('').addHandler(filelog)

    # Check if we have the proper encryption library file and get its path
    encryption_lib_path = get_encryption_lib_path(args)
    if encryption_lib_path is "":
        sys.exit(1)

    if args.verbose or args.very_verbose:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.INFO)

    # Let's not forget to run Grunt / Only needed when running with webserver
    if not args.no_server:
        if not os.path.exists(os.path.join(os.path.dirname(__file__), 'static/dist')):
            log.critical('Missing front-end assets (static/dist) -- please run "npm install && npm run build" before starting the server')
            sys.exit()

    # These are very noisey, let's shush them up a bit
    logging.getLogger('peewee').setLevel(logging.INFO)
    logging.getLogger('requests').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)
    logging.getLogger('werkzeug').setLevel(logging.ERROR)

    config['parse_pokemon'] = not args.no_pokemon
    config['parse_pokestops'] = not args.no_pokestops
    config['parse_gyms'] = not args.no_gyms

    # Turn these back up if debugging
    if args.verbose or args.very_verbose:
        logging.getLogger('pgoapi').setLevel(logging.DEBUG)
    if args.very_verbose:
        logging.getLogger('peewee').setLevel(logging.DEBUG)
        logging.getLogger('requests').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.pgoapi').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('werkzeug').setLevel(logging.DEBUG)

    # use lat/lng directly if matches such a pattern
    prog = re.compile("^(\-?\d+\.\d+),?\s?(\-?\d+\.\d+)$")
    res = prog.match(args.location)
    if res:
        log.debug('Using coordinates from CLI directly')
        position = (float(res.group(1)), float(res.group(2)), 0)
    else:
        log.debug('Looking up coordinates in API')
        position = util.get_pos_by_name(args.location)

    # Use the latitude and longitude to get the local altitude from Google
    try:
        url = 'https://maps.googleapis.com/maps/api/elevation/json?locations={},{}'.format(
            str(position[0]), str(position[1]))
        altitude = requests.get(url).json()[u'results'][0][u'elevation']
        log.debug('Local altitude is: %sm', altitude)
        position = (position[0], position[1], altitude)
    except (requests.exceptions.RequestException, IndexError, KeyError):
        log.error('Unable to retrieve altitude from Google APIs; setting to 0')

    if not any(position):
        log.error('Could not get a position by name, aborting')
        sys.exit()

    log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)',
             position[0], position[1], position[2])

    if args.no_pokemon:
        log.info('Parsing of Pokemon disabled')
    if args.no_pokestops:
        log.info('Parsing of Pokestops disabled')
    if args.no_gyms:
        log.info('Parsing of Gyms disabled')
    if args.encounter:
        log.info('Encountering pokemon enabled')

    config['LOCALE'] = args.locale
    config['CHINA'] = args.china

    app = Pogom(__name__)
    db = init_database(app)
    if args.clear_db:
        log.info('Clearing database')
        if args.db_type == 'mysql':
            drop_tables(db)
        elif os.path.isfile(args.db):
            os.remove(args.db)
    create_tables(db)

    app.set_current_location(position)

    # Control the search status (running or not) across threads
    pause_bit = Event()
    pause_bit.clear()
    if args.on_demand_timeout > 0:
        pause_bit.set()

    heartbeat = [now()]

    # Setup the location tracking queue and push the first location on
    new_location_queue = Queue()
    new_location_queue.put(position)

    # DB Updates
    db_updates_queue = Queue()

    # Thread(s) to process database updates
    for i in range(args.db_threads):
        log.debug('Starting db-updater worker thread %d', i)
        t = Thread(target=db_updater, name='db-updater-{}'.format(i), args=(args, db_updates_queue))
        t.daemon = True
        t.start()

    # db clearner; really only need one ever
    if not args.disable_clean:
        t = Thread(target=clean_db_loop, name='db-cleaner', args=(args,))
        t.daemon = True
        t.start()

    # WH Updates
    wh_updates_queue = Queue()

    # Thread to process webhook updates
    for i in range(args.wh_threads):
        log.debug('Starting wh-updater worker thread %d', i)
        t = Thread(target=wh_updater, name='wh-updater-{}'.format(i), args=(args, wh_updates_queue))
        t.daemon = True
        t.start()

    if not args.only_server:

        # Check all proxies before continue so we know they are good
        if args.proxy and not args.proxy_skip_check:

            # Overwrite old args.proxy with new working list
            args.proxy = check_proxies(args)

        # Gather the pokemons!

        # attempt to dump the spawn points (do this before starting threads of endure the woe)
        if args.spawnpoint_scanning and args.spawnpoint_scanning != 'nofile' and args.dump_spawnpoints:
            with open(args.spawnpoint_scanning, 'w+') as file:
                log.info('Saving spawn points to %s', args.spawnpoint_scanning)
                spawns = Pokemon.get_spawnpoints_in_hex(position, args.step_limit)
                file.write(json.dumps(spawns))
                log.info('Finished exporting spawn points')

        argset = (args, new_location_queue, pause_bit, heartbeat, encryption_lib_path, db_updates_queue, wh_updates_queue)

        log.debug('Starting a %s search thread', args.scheduler)
        search_thread = Thread(target=search_overseer_thread, name='search-overseer', args=argset)
        search_thread.daemon = True
        search_thread.start()

    if args.cors:
        CORS(app)

    # No more stale JS
    init_cache_busting(app)

    app.set_search_control(pause_bit)
    app.set_heartbeat_control(heartbeat)
    app.set_location_queue(new_location_queue)

    config['ROOT_PATH'] = app.root_path
    config['GMAPS_KEY'] = args.gmaps_key

    if args.no_server:
        # This loop allows for ctrl-c interupts to work since flask won't be holding the program open
        while search_thread.is_alive():
            time.sleep(60)
    else:
        ssl_context = None
        if args.ssl_certificate and args.ssl_privatekey \
                and os.path.exists(args.ssl_certificate) and os.path.exists(args.ssl_privatekey):
            ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
            ssl_context.load_cert_chain(args.ssl_certificate, args.ssl_privatekey)
            log.info('Web server in SSL mode.')
        if args.verbose or args.very_verbose:
            app.run(threaded=True, use_reloader=False, debug=True, host=args.host, port=args.port, ssl_context=ssl_context)
        else:
            app.run(threaded=True, use_reloader=False, debug=False, host=args.host, port=args.port, ssl_context=ssl_context)

Example 40

Project: mrq
Source File: config.py
View license
def add_parser_args(parser, config_type):

    # General arguments

    parser.add_argument(
        '--trace_greenlets',
        action='store_true',
        default=False,
        help='Collect stats about each greenlet execution time and switches.')

    parser.add_argument(
        '--trace_memory',
        action='store_true',
        default=False,
        help='Collect stats about memory for each task. Incompatible with --greenlets > 1')

    parser.add_argument(
        '--trace_io',
        action='store_true',
        default=False,
        help='Collect stats about all I/O operations')

    parser.add_argument(
        '--print_mongodb',
        action='store_true',
        default=False,
        help='Print all MongoDB requests')

    parser.add_argument(
        '--trace_memory_type',
        action='store',
        default="",
        help='Create a .png object graph in trace_memory_output_dir ' +
             'with a random object of this type.')

    parser.add_argument(
        '--trace_memory_output_dir',
        action='store',
        default="memory_traces",
        help='Directory where to output .pngs with object graphs')

    parser.add_argument(
        '--profile',
        action='store_true',
        default=False,
        help='Run profiling on the whole worker')

    parser.add_argument(
        '--mongodb_jobs', '--mongodb',
        action='store',
        default="mongodb://127.0.0.1:27017/mrq",
        help='MongoDB URI for the jobs, scheduled_jobs & workers database')

    parser.add_argument(
        '--mongodb_logs',
        action='store',
        default="1",
        help='MongoDB URI for the logs database. ' +
             ' "0" will disable remote logs, "1" will use main MongoDB.')

    parser.add_argument(
        '--mongodb_logs_size',
        action='store',
        default=16 *
        1024 *
        1024,
        type=int,
        help='If provided, sets the log collection to capped to that amount of bytes.')

    parser.add_argument(
        '--no_mongodb_ensure_indexes',
        action='store_true',
        default=False,
        help='If provided, skip the creation of MongoDB indexes at worker startup.')

    parser.add_argument(
        '--redis',
        action='store',
        default="redis://127.0.0.1:6379",
        help='Redis URI')

    parser.add_argument(
        '--redis_prefix',
        action='store',
        default="mrq",
        help='Redis key prefix')

    parser.add_argument(
        '--redis_max_connections',
        action='store',
        type=int,
        default=1000,
        help='Redis max connection pool size')

    parser.add_argument(
        '--redis_timeout',
        action='store',
        type=float,
        default=30,
        help='Redis connection pool timeout to wait for an available connection')

    parser.add_argument(
        '--name',
        default=None,
        action='store',
        help='Specify a different name')

    parser.add_argument(
        '--quiet',
        default=False,
        action='store_true',
        help='Don\'t output task logs')

    parser.add_argument(
        '--config',
        '-c',
        default=None,
        action="store",
        help='Path of a config file')

    parser.add_argument(
        '--worker_class',
        default="mrq.worker.Worker",
        action="store",
        help='Path to a custom worker class')

    parser.add_argument(
        '--version',
        '-v',
        default=False,
        action="store_true",
        help='Prints current MRQ version')

    parser.add_argument(
        '--no_import_patch',
        default=False,
        action='store_true',
        help='(DEPRECATED) Skips patching __import__ to fix gevent bug #108')

    parser.add_argument(
        '--add_network_latency',
        default="0",
        action='store',
        type=str,
        help='Adds random latency to the network calls, zero to N seconds. Can be a range (1-2)')

    parser.add_argument(
        '--default_job_result_ttl',
        default=7 * 24 * 3600,
        action='store',
        type=float,
        help='Seconds the results are kept in MongoDB when status is success')

    parser.add_argument(
        '--default_job_abort_ttl',
        default=24 * 3600,
        action='store',
        type=float,
        help='Seconds the tasks are kept in MongoDB when status is abort')

    parser.add_argument(
        '--default_job_cancel_ttl',
        default=24 * 3600,
        action='store',
        type=float,
        help='Seconds the tasks are kept in MongoDB when status is cancel')

    parser.add_argument(
        '--default_job_timeout',
        default=3600,
        action='store',
        type=float,
        help='In seconds, delay before interrupting the job')

    parser.add_argument(
        '--default_job_max_retries',
        default=3,
        action='store',
        type=int,
        help='Set the status to "maxretries" after retrying that many times')

    parser.add_argument(
        '--default_job_retry_delay',
        default=3,
        action='store',
        type=int,
        help='Seconds before a job in retry status is requeued again')

    parser.add_argument(
        '--use_large_job_ids',
        action='store_true',
        default=False,
        help='Do not use compacted job IDs in Redis. For compatibility with 0.1.x only')

    # mrq-run-specific arguments

    if config_type == "run":

        parser.add_argument(
            '--queue',
            action='store',
            default="",
            help='Queue the task on this queue instead of running it right away')

        parser.add_argument(
            'taskpath',
            action='store',
            help='Task to run')

        parser.add_argument(
            'taskargs',
            action='store',
            default='{}',
            nargs='*',
            help='JSON-encoded arguments, or "key value" pairs')

    # Dashboard-specific arguments

    elif config_type == "dashboard":

        parser.add_argument(
            '--dashboard_httpauth',
            default="",
            action="store",
            help='HTTP Auth for the Dashboard. Format is user:pass')

        parser.add_argument(
            '--dashboard_queue',
            default=None,
            action="store",
            help='Default queue for dashboard actions.')

        parser.add_argument(
            '--dashboard_port',
            default=5555,
            action="store",
            type=int,
            help='Use this port for mrq-dashboard. 5555 by default.')

        parser.add_argument(
            '--dashboard_ip',
            default="0.0.0.0",
            action="store",
            type=str,
            help='Bind the dashboard to this IP. Default is "0.0.0.0", use "127.0.0.1" to restrict access.')

    # Worker-specific args

    elif config_type == "worker":

        parser.add_argument(
            '--max_jobs',
            default=0,
            type=int,
            action='store',
            help='Gevent: max number of jobs to do before quitting.' +
                 ' Temp workaround for memory leaks')

        parser.add_argument(
            '--max_memory',
            default=0,
            type=int,
            action='store',
            help='Max memory (in Mb) after which the process will be shut down. Use with --processes [1-N]' +
                 'to have supervisord automatically respawn the worker when this happens')

        parser.add_argument(
            '--greenlets',
            '--gevent',  # deprecated
            '-g',
            default=1,
            type=int,
            action='store',
            help='Max number of greenlets to use')

        parser.add_argument(
            '--processes',
            '-p',
            default=0,
            type=int,
            action='store',
            help='Number of processes to launch with supervisord')

        default_template = os.path.abspath(os.path.join(
            os.path.dirname(__file__),
            "supervisord_templates/default.conf"
        ))

        parser.add_argument(
            '--supervisord_template',
            default=default_template,
            action='store',
            help='Path of supervisord template to use')

        parser.add_argument(
            '--scheduler',
            default=False,
            action='store_true',
            help='Run the scheduler')

        parser.add_argument(
            '--scheduler_interval',
            default=60,
            action='store',
            type=float,
            help='Seconds between scheduler checks')

        parser.add_argument(
            '--report_interval',
            default=10,
            action='store',
            type=float,
            help='Seconds between worker reports to MongoDB')

        parser.add_argument(
            '--report_file',
            default="",
            action='store',
            type=unicode,
            help='Filepath of a json dump of the worker status. Disabled if none')

        parser.add_argument(
            'queues',
            nargs='*',
            default=["default"],
            help='The queues to listen on (default: \'default\')')

        parser.add_argument(
            '--subqueues_refresh_interval',
            default=10,
            action='store',
            type=float,
            help="Seconds between worker refreshes of the known subqueues")

        parser.add_argument(
            '--paused_queues_refresh_interval',
            default=10,
            action='store',
            type=float,
            help="Seconds between worker refreshes of the paused queues list")

        parser.add_argument(
            '--subqueues_delimiter',
            default='/',
            help='Delimiter between main queue and subqueue names',
            action=DelimiterArgParser)

        parser.add_argument(
            '--admin_port',
            default=0,
            action="store",
            type=int,
            help='Start an admin server on this port, if provided. Incompatible with --processes')

        parser.add_argument(
            '--admin_ip',
            default="127.0.0.1",
            action="store",
            type=str,
            help='IP for the admin server to listen on. Use "0.0.0.0" to allow access from outside')

        parser.add_argument(
            '--local_ip',
            default=get_local_ip(),
            action="store",
            type=str,
            help='Overwrite the local IP, to be displayed in the dashboard.')

        parser.add_argument(
            '--max_latency',
            default=1.,
            type=float,
            action='store',
            help='Max seconds while worker may sleep waiting for a new job. ' +
                 'Can be < 1.')

        parser.add_argument(
            '--dequeue_strategy',
            default="sequential",
            type=str,
            action='store',
            help='Strategy for dequeuing multiple queues. Default is \'sequential\',' +
                 'to dequeue them in command-line order.')

Example 41

Project: auto-sklearn
Source File: smbo.py
View license
    def run_smbo(self, max_iters=1000):
        global evaluator

        self.watcher.start_task('SMBO')

        # == first things first: load the datamanager
        self.reset_data_manager()
        
        # == Initialize SMBO stuff
        # first create a scenario
        seed = self.seed # TODO
        num_params = len(self.config_space.get_hyperparameters())
        # allocate a run history
        run_history = RunHistory()
        meta_runhistory = RunHistory()
        meta_runs_dataset_indices = {}
        num_run = self.start_num_run
        instance_id = self.dataset_name + SENTINEL

        # == Train on subset
        #    before doing anything, let us run the default_cfg
        #    on a subset of the available data to ensure that
        #    we at least have some models
        #    we will try three different ratios of decreasing magnitude
        #    in the hope that at least on the last one we will be able
        #    to get a model
        n_data = self.datamanager.data['X_train'].shape[0]
        subset_ratio = 10000. / n_data
        if subset_ratio >= 0.5:
            subset_ratio = 0.33
            subset_ratios = [subset_ratio, subset_ratio * 0.10]
        else:
            subset_ratios = [subset_ratio, 500. / n_data]
        self.logger.info("Training default configurations on a subset of "
                         "%d/%d data points." %
                         (int(n_data * subset_ratio), n_data))

        # the time limit for these function evaluations is rigorously
        # set to only 1/2 of a full function evaluation
        subset_time_limit = max(5, int(self.func_eval_time_limit / 2))
        # the configs we want to run on the data subset are:
        # 1) the default configs
        # 2) a set of configs we selected for training on a subset
        subset_configs = [self.config_space.get_default_configuration()] \
                          + self.collect_additional_subset_defaults()
        subset_config_succesful = [False] * len(subset_configs)
        for subset_config_id, next_config in enumerate(subset_configs):
            for i, ratio in enumerate(subset_ratios):
                self.reset_data_manager()
                n_data_subsample = int(n_data * ratio)

                # run the config, but throw away the result afterwards
                # since this cfg was evaluated only on a subset
                # and we don't want  to confuse SMAC
                self.logger.info("Starting to evaluate %d on SUBSET "
                                 "with size %d and time limit %ds.",
                                 num_run, n_data_subsample,
                                 subset_time_limit)
                self.logger.info(next_config)
                _info = eval_with_limits(
                    datamanager=self.datamanager, backend=self.backend,
                    config=next_config, seed=seed, num_run=num_run,
                    resampling_strategy=self.resampling_strategy,
                    resampling_strategy_args=self.resampling_strategy_args,
                    memory_limit=self.memory_limit,
                    func_eval_time_limit=subset_time_limit,
                    subsample=n_data_subsample,
                    logger=self.logger)
                (duration, result, _, additional_run_info, status) = _info
                self.logger.info("Finished evaluating %d. configuration on SUBSET. "
                                 "Duration %f; loss %f; status %s; additional run "
                                 "info: %s ", num_run, duration, result,
                                 str(status), additional_run_info)

                num_run += 1
                if i < len(subset_ratios) - 1:
                    if status != StatusType.SUCCESS:
                        # Do not increase num_run here, because we will try
                        # the same configuration with less data
                        self.logger.info("A CONFIG did not finish "
                                         " for subset ratio %f -> going smaller",
                                         ratio)
                        continue
                    else:
                        self.logger.info("Finished SUBSET training successfully"
                                         " with ratio %f", ratio)
                        subset_config_succesful[subset_config_id] = True
                        break
                else:
                    if status != StatusType.SUCCESS:
                        self.logger.info("A CONFIG did not finish "
                                         " for subset ratio %f.",
                                         ratio)
                        continue
                    else:
                        self.logger.info("Finished SUBSET training successfully"
                                         " with ratio %f", ratio)
                        subset_config_succesful[subset_config_id] = True
                        break

        # Use the first non-failing configuration from the subsets as the new
        #  default configuration -> this guards us against the random forest
        # failing on large, sparse datasets
        default_cfg = None
        for subset_config_id, next_config in enumerate(subset_configs):
            if subset_config_succesful[subset_config_id]:
                default_cfg = next_config
                break
        if default_cfg is None:
            default_cfg = self.config_space.get_default_configuration()

        # == METALEARNING suggestions
        # we start by evaluating the defaults on the full dataset again
        # and add the suggestions from metalearning behind it

        if self.metadata_directory is None:
            metalearning_directory = os.path.dirname(
                autosklearn.metalearning.__file__)
            # There is no multilabel data in OpenML
            if self.task == MULTILABEL_CLASSIFICATION:
                meta_task = BINARY_CLASSIFICATION
            else:
                meta_task = self.task
            metadata_directory = os.path.join(
                metalearning_directory, 'files',
                '%s_%s_%s' % (METRIC_TO_STRING[self.metric],
                              TASK_TYPES_TO_STRING[meta_task],
                              'sparse' if self.datamanager.info['is_sparse']
                              else 'dense'))
            self.metadata_directory = metadata_directory

        self.logger.info('Metadata directory: %s', self.metadata_directory)
        meta_base = MetaBase(self.config_space, self.metadata_directory)

        metafeature_calculation_time_limit = int(
            self.total_walltime_limit / 4)
        metafeature_calculation_start_time = time.time()
        meta_features = self._calculate_metafeatures_with_limits(
            metafeature_calculation_time_limit)
        metafeature_calculation_end_time = time.time()
        metafeature_calculation_time_limit = \
            metafeature_calculation_time_limit - (
            metafeature_calculation_end_time -
            metafeature_calculation_start_time)

        if metafeature_calculation_time_limit < 1:
            self.logger.warning('Time limit for metafeature calculation less '
                                'than 1 seconds (%f). Skipping calculation '
                                'of metafeatures for encoded dataset.',
                                metafeature_calculation_time_limit)
            meta_features_encoded = None
        else:
            with warnings.catch_warnings():
                warnings.showwarning = self._send_warnings_to_log
                self.datamanager.perform1HotEncoding()
            meta_features_encoded = \
                self._calculate_metafeatures_encoded_with_limits(
                    metafeature_calculation_time_limit)

        # In case there is a problem calculating the encoded meta-features
        if meta_features is None:
            if meta_features_encoded is not None:
                meta_features = meta_features_encoded
        else:
            if meta_features_encoded is not None:
                meta_features.metafeature_values.update(
                    meta_features_encoded.metafeature_values)

        if meta_features is not None:
            meta_base.add_dataset(instance_id, meta_features)
            # Do mean imputation of the meta-features - should be done specific
            # for each prediction model!
            all_metafeatures = meta_base.get_metafeatures(
                features=list(meta_features.keys()))
            all_metafeatures.fillna(all_metafeatures.mean(), inplace=True)

            with warnings.catch_warnings():
                warnings.showwarning = self._send_warnings_to_log
                metalearning_configurations = self.collect_metalearning_suggestions(
                    meta_base)
            if metalearning_configurations is None:
                metalearning_configurations = []
            self.reset_data_manager()

            self.logger.info('%s', meta_features)

            # Convert meta-features into a dictionary because the scenario
            # expects a dictionary
            meta_features_dict = {}
            for dataset, series in all_metafeatures.iterrows():
                meta_features_dict[dataset] = series.values
            meta_features_list = []
            for meta_feature_name in all_metafeatures.columns:
                meta_features_list.append(meta_features[meta_feature_name].value)
            meta_features_list = np.array(meta_features_list).reshape((1, -1))
            self.logger.info(list(meta_features_dict.keys()))

            meta_runs = meta_base.get_all_runs(METRIC_TO_STRING[self.metric])
            meta_runs_index = 0
            try:
                meta_durations = meta_base.get_all_runs('runtime')
                read_runtime_data = True
            except KeyError:
                read_runtime_data = False
                self.logger.critical('Cannot read runtime data.')
                if self.acquisition_function == 'EIPS':
                    self.logger.critical('Reverting to acquisition function EI!')
                    self.acquisition_function = 'EI'

            for meta_dataset in meta_runs.index:
                meta_dataset_start_index = meta_runs_index
                for meta_configuration in meta_runs.columns:
                    if np.isfinite(meta_runs.loc[meta_dataset, meta_configuration]):
                        try:
                            config = meta_base.get_configuration_from_algorithm_index(
                                meta_configuration)
                            cost = meta_runs.loc[meta_dataset, meta_configuration]
                            if read_runtime_data:
                                runtime = meta_durations.loc[meta_dataset,
                                                             meta_configuration]
                            else:
                                runtime = 1
                            # TODO read out other status types!
                            meta_runhistory.add(config, cost, runtime,
                                                StatusType.SUCCESS,
                                                instance_id=meta_dataset)
                            meta_runs_index += 1
                        except:
                            # TODO maybe add warning
                            pass

                meta_runs_dataset_indices[meta_dataset] = (
                    meta_dataset_start_index, meta_runs_index)
        else:
            if self.acquisition_function == 'EIPS':
                self.logger.critical('Reverting to acquisition function EI!')
                self.acquisition_function = 'EI'
            meta_features_list = []
            meta_features_dict = {}
            metalearning_configurations = []

        self.scenario = AutoMLScenario(config_space=self.config_space,
                                       limit=self.total_walltime_limit,
                                       cutoff_time=self.func_eval_time_limit,
                                       metafeatures=meta_features_dict,
                                       output_dir=self.backend.temporary_directory,
                                       shared_model=self.shared_mode)

        types = get_types(self.config_space, self.scenario.feature_array)
        if self.acquisition_function == 'EI':
            rh2EPM = RunHistory2EPM4Cost(num_params=num_params,
                                         scenario=self.scenario,
                                         success_states=None,
                                         impute_censored_data=False,
                                         impute_state=None)
            model = RandomForestWithInstances(types,
                                              instance_features=meta_features_list,
                                              seed=1, num_trees=10)
            smac = SMBO(self.scenario, model=model,
                        rng=seed)
        elif self.acquisition_function == 'EIPS':
            rh2EPM = RunHistory2EPM4EIPS(num_params=num_params,
                                         scenario=self.scenario,
                                         success_states=None,
                                         impute_censored_data=False,
                                         impute_state=None)
            model = UncorrelatedMultiObjectiveRandomForestWithInstances(
                ['cost', 'runtime'], types, num_trees = 10,
                instance_features=meta_features_list, seed=1)
            acquisition_function = EIPS(model)
            smac = SMBO(self.scenario,
                        acquisition_function=acquisition_function,
                        model=model, runhistory2epm=rh2EPM, rng=seed)
        else:
            raise ValueError('Unknown acquisition function value %s!' %
                             self.acquisition_function)

        # Build a runtime model
        # runtime_rf = RandomForestWithInstances(types,
        #                                        instance_features=meta_features_list,
        #                                        seed=1, num_trees=10)
        # runtime_rh2EPM = RunHistory2EPM4EIPS(num_params=num_params,
        #                                      scenario=self.scenario,
        #                                      success_states=None,
        #                                      impute_censored_data=False,
        #                                      impute_state=None)
        # X_runtime, y_runtime = runtime_rh2EPM.transform(meta_runhistory)
        # runtime_rf.train(X_runtime, y_runtime[:, 1].flatten())
        X_meta, Y_meta = rh2EPM.transform(meta_runhistory)
        # Transform Y_meta on a per-dataset base
        for meta_dataset in meta_runs_dataset_indices:
            start_index, end_index = meta_runs_dataset_indices[meta_dataset]
            end_index += 1  # Python indexing
            Y_meta[start_index:end_index, 0]\
                [Y_meta[start_index:end_index, 0] >2.0] =  2.0
            dataset_minimum = np.min(Y_meta[start_index:end_index, 0])
            Y_meta[start_index:end_index, 0] = 1 - (
                (1. - Y_meta[start_index:end_index, 0]) /
                (1. - dataset_minimum))
            Y_meta[start_index:end_index, 0]\
                  [Y_meta[start_index:end_index, 0] > 2] = 2

        # == first, evaluate all metelearning and default configurations
        finished = False
        for i, next_config in enumerate(([default_cfg] +
                                          metalearning_configurations)):
            # Do not evaluate default configurations more than once
            if i >= len([default_cfg]) and next_config in [default_cfg]:
                continue

            config_name = 'meta-learning' if i >= len([default_cfg]) \
                else 'default'

            self.logger.info("Starting to evaluate %d. configuration "
                             "(%s configuration) with time limit %ds.",
                             num_run, config_name, self.func_eval_time_limit)
            self.logger.info(next_config)
            self.reset_data_manager()
            info = eval_with_limits(datamanager=self.datamanager,
                                    backend=self.backend,
                                    config=next_config,
                                    seed=seed, num_run=num_run,
                                    resampling_strategy=self.resampling_strategy,
                                    resampling_strategy_args=self.resampling_strategy_args,
                                    memory_limit=self.memory_limit,
                                    func_eval_time_limit=self.func_eval_time_limit,
                                    logger=self.logger)
            (duration, result, _, additional_run_info, status) = info
            run_history.add(config=next_config, cost=result,
                            time=duration, status=status,
                            instance_id=instance_id, seed=seed,
                            additional_info=additional_run_info)
            run_history.update_cost(next_config, result)
            self.logger.info("Finished evaluating %d. configuration. "
                             "Duration %f; loss %f; status %s; additional run "
                             "info: %s ", num_run, duration, result,
                             str(status), additional_run_info)
            num_run += 1
            if smac.incumbent is None:
                smac.incumbent = next_config
            elif result < run_history.get_cost(smac.incumbent):
                smac.incumbent = next_config

            if self.scenario.shared_model:
                pSMAC.write(run_history=run_history,
                            output_directory=self.scenario.output_dir,
                            num_run=self.seed)

            if self.watcher.wall_elapsed(
                    'SMBO') > self.total_walltime_limit:
                finished = True

            if finished:
                break

        # == after metalearning run SMAC loop
        smac.runhistory = run_history
        smac_iter = 0
        while not finished:
            if self.scenario.shared_model:
                pSMAC.read(run_history=run_history,
                           output_directory=self.scenario.output_dir,
                           configuration_space=self.config_space,
                           logger=self.logger)

            next_configs = []
            time_for_choose_next = -1
            try:
                X_cfg, Y_cfg = rh2EPM.transform(run_history)

                if not run_history.empty():
                    # Update costs by normalization
                    dataset_minimum = np.min(Y_cfg[:, 0])
                    Y_cfg[:, 0] = 1 - ((1. - Y_cfg[:, 0]) /
                                       (1. - dataset_minimum))
                    Y_cfg[:, 0][Y_cfg[:, 0] > 2] = 2

                if len(X_meta) > 0 and len(X_cfg) > 0:
                    pass
                    #X_cfg = np.concatenate((X_meta, X_cfg))
                    #Y_cfg = np.concatenate((Y_meta, Y_cfg))
                elif len(X_meta) > 0:
                    X_cfg = X_meta.copy()
                    Y_cfg = Y_meta.copy()
                elif len(X_cfg) > 0:
                    X_cfg = X_cfg.copy()
                    Y_cfg = Y_cfg.copy()
                else:
                    raise ValueError('No training data for SMAC random forest!')

                self.logger.info('Using %d training points for SMAC.' %
                                 X_cfg.shape[0])
                choose_next_start_time = time.time()
                next_configs_tmp = smac.choose_next(X_cfg, Y_cfg,
                                                    num_interleaved_random=110,
                                                    num_configurations_by_local_search=10,
                                                    num_configurations_by_random_search_sorted=100)
                time_for_choose_next = time.time() - choose_next_start_time
                self.logger.info('Used %g seconds to find next '
                                 'configurations' % (time_for_choose_next))
                next_configs.extend(next_configs_tmp)
            # TODO put Exception here!
            except Exception as e:
                self.logger.error(e)
                self.logger.error("Error in getting next configurations "
                                  "with SMAC. Using random configuration!")
                next_config = self.config_space.sample_configuration()
                next_configs.append(next_config)

            models_fitted_this_iteration = 0
            start_time_this_iteration = time.time()
            for next_config in next_configs:
                x_runtime = impute_inactive_values(next_config)
                x_runtime = impute_inactive_values(x_runtime).get_array()
                # predicted_runtime = runtime_rf.predict_marginalized_over_instances(
                #     x_runtime.reshape((1, -1)))
                # predicted_runtime = np.exp(predicted_runtime[0][0][0]) - 1

                self.logger.info("Starting to evaluate %d. configuration (from "
                                 "SMAC) with time limit %ds.", num_run,
                                 self.func_eval_time_limit)
                self.logger.info(next_config)
                self.reset_data_manager()
                info = eval_with_limits(datamanager=self.datamanager,
                                        backend=self.backend,
                                        config=next_config,
                                        seed=seed, num_run=num_run,
                                        resampling_strategy=self.resampling_strategy,
                                        resampling_strategy_args=self.resampling_strategy_args,
                                        memory_limit=self.memory_limit,
                                        func_eval_time_limit=self.func_eval_time_limit,
                                        logger=self.logger)
                (duration, result, _, additional_run_info, status) = info
                run_history.add(config=next_config, cost=result,
                                time=duration, status=status,
                                instance_id=instance_id, seed=seed,
                                additional_info=additional_run_info)
                run_history.update_cost(next_config, result)

                #self.logger.info('Predicted runtime %g, true runtime %g',
                #                 predicted_runtime, duration)

                # TODO add unittest to make sure everything works fine and
                # this does not get outdated!
                if smac.incumbent is None:
                    smac.incumbent = next_config
                elif result < run_history.get_cost(smac.incumbent):
                    smac.incumbent = next_config

                self.logger.info("Finished evaluating %d. configuration. "
                                 "Duration: %f; loss: %f; status %s; additional "
                                 "run info: %s ", num_run, duration, result,
                                 str(status), additional_run_info)
                smac_iter += 1
                num_run += 1

                models_fitted_this_iteration += 1
                time_used_this_iteration = time.time() - start_time_this_iteration

                if max_iters is not None:
                    finished = (smac_iter >= max_iters)

                if self.watcher.wall_elapsed(
                        'SMBO') > self.total_walltime_limit:
                    finished = True

                if models_fitted_this_iteration >= 2 and \
                        time_for_choose_next > 0 and \
                        time_used_this_iteration > time_for_choose_next:
                    break
                elif time_for_choose_next <= 0 and \
                        models_fitted_this_iteration >= 1:
                    break
                elif models_fitted_this_iteration >= 50:
                    break

                if finished:
                    break

            if self.scenario.shared_model:
                pSMAC.write(run_history=run_history,
                            output_directory=self.scenario.output_dir,
                            num_run=self.seed)

        self.runhistory = run_history

Example 42

Project: pwn_plug_sources
Source File: targetScanner.py
View license
    def identifyVuln(self, URL, Params, VulnParam, PostData, Language, isPost=False, blindmode=None, isUnix=None):
        xml2config = self.config["XML2CONFIG"]
        
        if (blindmode == None):

            script = None
            scriptpath = None
            pre = None
            
            langClass = xml2config.getAllLangSets()[Language]
            
            if (not isPost):
                self._log("[%s] Identifying Vulnerability '%s' with Parameter '%s'..."%(Language, URL, VulnParam), self.LOG_ALWAYS)
            else:
                self._log("[%s] Identifying Vulnerability '%s' with POST-Parameter '%s'..."%(Language, URL, VulnParam), self.LOG_ALWAYS)

            tmpurl = URL
            PostHax = PostData
            rndStr = self.getRandomStr()

            if (not isPost):
                tmpurl = tmpurl.replace("%s=%s"%(VulnParam,Params[VulnParam]), "%s=%s"%(VulnParam, rndStr))
            else:
                PostHax = PostHax.replace("%s=%s"%(VulnParam,Params[VulnParam]), "%s=%s"%(VulnParam, rndStr))

            RE_SUCCESS_MSG = re.compile(langClass.getSniper()%(rndStr), re.DOTALL)

            code = self.doPostRequest(tmpurl, PostHax)
            if (code == None):
                self._log("Identification of vulnerability failed. (code == None)", self.LOG_ERROR)
                return None
                
            m = RE_SUCCESS_MSG.search(code)
            if (m == None):
                self._log("Identification of vulnerability failed. (m == None)", self.LOG_ERROR)
                return None


            r = report(URL, Params, VulnParam)
            r.setPost(isPost)
            r.setPostData(PostData)
            
            for sp_err_msg in langClass.getIncludeDetectors():
                RE_SCRIPT_PATH = re.compile(sp_err_msg)
                s = RE_SCRIPT_PATH.search(code)
                if (s != None): break
            if (s == None):
                self._log("Failed to retrieve script path.", self.LOG_WARN)

                print "[MINOR BUG FOUND]"
                print "------------------------------------------------------"
                print "It's possible that fimap was unable to retrieve the scriptpath"
                print "because the regex for this kind of error message is missing."
                a = raw_input("Do you want to help me and send the URL of the site? [y = Print Info/N = Discard]")
                if (a=="y" or a=="Y"):
                    print "-----------SEND THIS TO '[email protected]'-----------"
                    print "SUBJECT: fimap Regex"
                    print "ERROR  : Failed to retrieve script path."
                    print "URL    : " + URL
                    print "-----------------------------------------------------------"
                    raw_input("Copy it and press enter to proceed with scanning...")
                else:
                    print "No problem! I'll continue with your scan..."

                return(None)
            else:
                script = s.group('script')
                if (script != None and script[1] == ":"): # Windows detection quick hack
                    scriptpath = script[:script.rfind("\\")]
                    r.setWindows()
                elif (script != None and script.startswith("\\\\")):
                    scriptpath = script[:script.rfind("\\")]
                    r.setWindows()
                else:
                    scriptpath = os.path.dirname(script)
                    if (scriptpath == None or scriptpath == ""):
                        self._log("Scriptpath is empty! Assuming that we are on toplevel.", self.LOG_WARN)
                        scriptpath = "/"
                        script = "/" + script

                # Check if scriptpath was received correctly.
                if(scriptpath!=""):
                    self._log("Scriptpath received: '%s'" %(scriptpath), self.LOG_INFO)
                    r.setServerPath(scriptpath)
                    r.setServerScript(script)


            if (r.isWindows()):
                self._log("Operating System is 'Windows'.", self.LOG_INFO)
            else:
                self._log("Operating System is 'Unix-Like'.", self.LOG_INFO)


            errmsg = m.group("incname")

            if (errmsg == rndStr):
                r.setPrefix("")
                r.setSurfix("")
            else:
                tokens = errmsg.split(rndStr)
                pre = tokens[0]
                addSlash = False
                if (pre == ""):
                    pre = "/"
                #else:
                #    if pre[-1] != "/":
                #       addSlash = True


                rootdir = None
                
                if (pre[0] != "/"):
                    if (r.isUnix()):
                        pre = posixpath.join(r.getServerPath(), pre)
                        pre = posixpath.normpath(pre)
                        rootdir = "/"
                        pre = self.relpath_unix(rootdir, pre)
                    else:
                        pre = ntpath.join(r.getServerPath(), pre)
                        pre = ntpath.normpath(pre)
                        if (pre[1] == ":"):
                            rootdir = pre[0:3]
                        pre = self.relpath_win(rootdir, pre)
                else:
                    pre = self.relpath_unix("/", pre)
                if addSlash: pre = rootdir + pre
                
                #Quick fix for increasing success :P
                if (pre != "."):
                    pre = "/" + pre
                
                sur = tokens[1]
                if (pre == "."): pre = ""
                r.setPrefix(pre)
                r.setSurfix(sur)

                if (sur != ""):
                    self._log("Trying NULL-Byte Poisoning to get rid of the suffix...", self.LOG_INFO)
                    tmpurl = URL
                    tmpurl = tmpurl.replace("%s=%s"%(VulnParam,Params[VulnParam]), "%s=%s%%00"%(VulnParam, rndStr))
                    code = self.doGetRequest(tmpurl)
                    if (code == None):
                        self._log("NULL-Byte testing failed.", self.LOG_WARN)
                        r.setNullBytePossible(False)
                    elif (code.find("%s\\0%s"%(rndStr, sur)) != -1 or code.find("%s%s"%(rndStr, sur)) != -1):
                        self._log("NULL-Byte Poisoning not possible.", self.LOG_INFO)
                        r.setNullBytePossible(False)
                    else:
                        self._log("NULL-Byte Poisoning successfull!", self.LOG_INFO)
                        r.setSurfix("%00")
                        r.setNullBytePossible(True)


            if (scriptpath == ""):
                # Failed to get scriptpath with easy method :(
                if (pre != ""):
                    self._log("Failed to retrieve path but we are forced to go relative!", self.LOG_WARN)
                    self._log("Go and try it to scan with --enable-blind.", self.LOG_WARN)
                    return(None)
                else:
                    self._log("Failed to retrieve path! It's an absolute injection so I'll fake it to '/'...", self.LOG_WARN)
                    scriptpath = "/"
                    r.setServerPath(scriptpath)
                    r.setServerScript(script)

            return(r)
        
        
        else:
            # Blindmode
            prefix = blindmode[0]
            isNull = blindmode[1]
            self._log("Identifying Vulnerability '%s' with Parameter '%s' blindly..."%(URL, VulnParam), self.LOG_ALWAYS)
            r = report(URL, Params, VulnParam)
            r.setBlindDiscovered(True)
            r.setSurfix("")
            if isNull: r.setSurfix("%00")
            r.setNullBytePossible(isNull)
            if (prefix.strip() == ""):
                r.setServerPath("/noop")
            else:
                r.setServerPath(prefix.replace("..", "a"))
            r.setServerScript("noop")
            r.setPrefix(prefix)
            if (not isUnix):
                r.setWindows()
            return(r)

Example 43

Project: raspberry_pwn
Source File: targetScanner.py
View license
    def identifyVuln(self, URL, Params, VulnParam, PostData, Language, isPost=False, blindmode=None, isUnix=None):
        xml2config = self.config["XML2CONFIG"]
        
        if (blindmode == None):

            script = None
            scriptpath = None
            pre = None
            
            langClass = xml2config.getAllLangSets()[Language]
            
            if (not isPost):
                self._log("[%s] Identifying Vulnerability '%s' with Parameter '%s'..."%(Language, URL, VulnParam), self.LOG_ALWAYS)
            else:
                self._log("[%s] Identifying Vulnerability '%s' with POST-Parameter '%s'..."%(Language, URL, VulnParam), self.LOG_ALWAYS)

            tmpurl = URL
            PostHax = PostData
            rndStr = self.getRandomStr()

            if (not isPost):
                tmpurl = tmpurl.replace("%s=%s"%(VulnParam,Params[VulnParam]), "%s=%s"%(VulnParam, rndStr))
            else:
                PostHax = PostHax.replace("%s=%s"%(VulnParam,Params[VulnParam]), "%s=%s"%(VulnParam, rndStr))

            RE_SUCCESS_MSG = re.compile(langClass.getSniper()%(rndStr), re.DOTALL)

            code = self.doPostRequest(tmpurl, PostHax)
            if (code == None):
                self._log("Identification of vulnerability failed. (code == None)", self.LOG_ERROR)
                return None
                
            m = RE_SUCCESS_MSG.search(code)
            if (m == None):
                self._log("Identification of vulnerability failed. (m == None)", self.LOG_ERROR)
                return None


            r = report(URL, Params, VulnParam)
            r.setPost(isPost)
            r.setPostData(PostData)
            
            for sp_err_msg in langClass.getIncludeDetectors():
                RE_SCRIPT_PATH = re.compile(sp_err_msg)
                s = RE_SCRIPT_PATH.search(code)
                if (s != None): break
            if (s == None):
                self._log("Failed to retrieve script path.", self.LOG_WARN)

                print "[MINOR BUG FOUND]"
                print "------------------------------------------------------"
                print "It's possible that fimap was unable to retrieve the scriptpath"
                print "because the regex for this kind of error message is missing."
                a = raw_input("Do you want to help me and send the URL of the site? [y = Print Info/N = Discard]")
                if (a=="y" or a=="Y"):
                    print "-----------SEND THIS TO '[email protected]'-----------"
                    print "SUBJECT: fimap Regex"
                    print "ERROR  : Failed to retrieve script path."
                    print "URL    : " + URL
                    print "-----------------------------------------------------------"
                    raw_input("Copy it and press enter to proceed with scanning...")
                else:
                    print "No problem! I'll continue with your scan..."

                return(None)
            else:
                script = s.group('script')
                if (script != None and script[1] == ":"): # Windows detection quick hack
                    scriptpath = script[:script.rfind("\\")]
                    r.setWindows()
                elif (script != None and script.startswith("\\\\")):
                    scriptpath = script[:script.rfind("\\")]
                    r.setWindows()
                else:
                    scriptpath = os.path.dirname(script)
                    if (scriptpath == None or scriptpath == ""):
                        self._log("Scriptpath is empty! Assuming that we are on toplevel.", self.LOG_WARN)
                        scriptpath = "/"
                        script = "/" + script

                # Check if scriptpath was received correctly.
                if(scriptpath!=""):
                    self._log("Scriptpath received: '%s'" %(scriptpath), self.LOG_INFO)
                    r.setServerPath(scriptpath)
                    r.setServerScript(script)


            if (r.isWindows()):
                self._log("Operating System is 'Windows'.", self.LOG_INFO)
            else:
                self._log("Operating System is 'Unix-Like'.", self.LOG_INFO)


            errmsg = m.group("incname")

            if (errmsg == rndStr):
                r.setPrefix("")
                r.setSurfix("")
            else:
                tokens = errmsg.split(rndStr)
                pre = tokens[0]
                addSlash = False
                if (pre == ""):
                    pre = "/"
                #else:
                #    if pre[-1] != "/":
                #       addSlash = True


                rootdir = None
                
                if (pre[0] != "/"):
                    if (r.isUnix()):
                        pre = posixpath.join(r.getServerPath(), pre)
                        pre = posixpath.normpath(pre)
                        rootdir = "/"
                        pre = self.relpath_unix(rootdir, pre)
                    else:
                        pre = ntpath.join(r.getServerPath(), pre)
                        pre = ntpath.normpath(pre)
                        if (pre[1] == ":"):
                            rootdir = pre[0:3]
                        pre = self.relpath_win(rootdir, pre)
                else:
                    pre = self.relpath_unix("/", pre)
                if addSlash: pre = rootdir + pre
                
                #Quick fix for increasing success :P
                if (pre != "."):
                    pre = "/" + pre
                
                sur = tokens[1]
                if (pre == "."): pre = ""
                r.setPrefix(pre)
                r.setSurfix(sur)

                if (sur != ""):
                    self._log("Trying NULL-Byte Poisoning to get rid of the suffix...", self.LOG_INFO)
                    tmpurl = URL
                    tmpurl = tmpurl.replace("%s=%s"%(VulnParam,Params[VulnParam]), "%s=%s%%00"%(VulnParam, rndStr))
                    code = self.doGetRequest(tmpurl)
                    if (code == None):
                        self._log("NULL-Byte testing failed.", self.LOG_WARN)
                        r.setNullBytePossible(False)
                    elif (code.find("%s\\0%s"%(rndStr, sur)) != -1 or code.find("%s%s"%(rndStr, sur)) != -1):
                        self._log("NULL-Byte Poisoning not possible.", self.LOG_INFO)
                        r.setNullBytePossible(False)
                    else:
                        self._log("NULL-Byte Poisoning successfull!", self.LOG_INFO)
                        r.setSurfix("%00")
                        r.setNullBytePossible(True)


            if (scriptpath == ""):
                # Failed to get scriptpath with easy method :(
                if (pre != ""):
                    self._log("Failed to retrieve path but we are forced to go relative!", self.LOG_WARN)
                    self._log("Go and try it to scan with --enable-blind.", self.LOG_WARN)
                    return(None)
                else:
                    self._log("Failed to retrieve path! It's an absolute injection so I'll fake it to '/'...", self.LOG_WARN)
                    scriptpath = "/"
                    r.setServerPath(scriptpath)
                    r.setServerScript(script)

            return(r)
        
        
        else:
            # Blindmode
            prefix = blindmode[0]
            isNull = blindmode[1]
            self._log("Identifying Vulnerability '%s' with Parameter '%s' blindly..."%(URL, VulnParam), self.LOG_ALWAYS)
            r = report(URL, Params, VulnParam)
            r.setBlindDiscovered(True)
            r.setSurfix("")
            if isNull: r.setSurfix("%00")
            r.setNullBytePossible(isNull)
            if (prefix.strip() == ""):
                r.setServerPath("/noop")
            else:
                r.setServerPath(prefix.replace("..", "a"))
            r.setServerScript("noop")
            r.setPrefix(prefix)
            if (not isUnix):
                r.setWindows()
            return(r)

Example 44

Project: tp-libvirt
Source File: virsh_dump.py
View license
def run(test, params, env):
    """
    Test command: virsh dump.

    This command can dump the core of a domain to a file for analysis.
    1. Positive testing
        1.1 Dump domain with valid options.
        1.2 Avoid file system cache when dumping.
        1.3 Compress the dump images to valid/invalid formats.
    2. Negative testing
        2.1 Dump domain to a non-exist directory.
        2.2 Dump domain with invalid option.
        2.3 Dump a shut-off domain.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    options = params.get("dump_options")
    dump_file = params.get("dump_file", "vm.core")
    dump_dir = params.get("dump_dir", data_dir.get_tmp_dir())
    if os.path.dirname(dump_file) is "":
        dump_file = os.path.join(dump_dir, dump_file)
    dump_image_format = params.get("dump_image_format")
    start_vm = params.get("start_vm") == "yes"
    paused_after_start_vm = params.get("paused_after_start_vm") == "yes"
    status_error = params.get("status_error", "no") == "yes"
    timeout = int(params.get("check_pid_timeout", "5"))
    memory_dump_format = params.get("memory_dump_format", "")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = 'testacl'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    def check_domstate(actual, options):
        """
        Check the domain status according to dump options.
        """

        if options.find('live') >= 0:
            domstate = "running"
            if options.find('crash') >= 0 or options.find('reset') > 0:
                domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        elif options.find('crash') >= 0:
            domstate = "shut off"
            if options.find('reset') >= 0:
                domstate = "running"
        elif options.find('reset') >= 0:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"
        else:
            domstate = "running"
            if paused_after_start_vm:
                domstate = "paused"

        if not start_vm:
            domstate = "shut off"

        logging.debug("Domain should %s after run dump %s", domstate, options)

        return (domstate == actual)

    def check_dump_format(dump_image_format, dump_file):
        """
        Check the format of dumped file.

        If 'dump_image_format' is not specified or invalid in qemu.conf, then
        the file shoule be normal raw file, otherwise it shoud be compress to
        specified format, the supported compress format including: lzop, gzip,
        bzip2, and xz.
        For memory-only dump, the default dump format is ELF, and it can also
        specify format by --format option, the result could be 'elf' or 'data'.
        """

        valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data']
        if len(dump_image_format) == 0 or dump_image_format not in valid_format:
            logging.debug("No need check the dumped file format")
            return True
        else:
            file_cmd = "file %s" % dump_file
            (status, output) = commands.getstatusoutput(file_cmd)
            if status:
                logging.error("Fail to check dumped file %s", dump_file)
                return False
            logging.debug("Run file %s output: %s", dump_file, output)
            actual_format = output.split(" ")[1]
            if actual_format.lower() != dump_image_format.lower():
                logging.error("Compress dumped file to %s fail: %s" %
                              (dump_image_format, actual_format))
                return False
            else:
                return True

    # Configure dump_image_format in /etc/libvirt/qemu.conf.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    if len(dump_image_format):
        qemu_config.dump_image_format = dump_image_format
        libvirtd.restart()

    # Deal with bypass-cache option
    child_pid = 0
    if options.find('bypass-cache') >= 0:
        pid = os.fork()
        if pid:
            # Guarantee check_bypass function has run before dump
            child_pid = pid
            try:
                wait_pid_active(pid, timeout)
            finally:
                os.kill(child_pid, signal.SIGTERM)
        else:
            check_bypass(dump_file)
            # Wait for parent process kills us
            while True:
                time.sleep(1)

    # Deal with memory-only dump format
    if len(memory_dump_format):
        # Make sure libvirt support this option
        if virsh.has_command_help_match("dump", "--format") is None:
            raise error.TestNAError("Current libvirt version doesn't support"
                                    " --format option for dump command")
        # Make sure QEMU support this format
        query_cmd = '{"execute":"query-dump-guest-memory-capability"}'
        qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout
        if (memory_dump_format not in qemu_capa) and not status_error:
            raise error.TestNAError("Unsupported dump format '%s' for"
                                    " this QEMU binary" % memory_dump_format)
        options += " --format %s" % memory_dump_format
        if memory_dump_format == 'elf':
            dump_image_format = 'elf'
        if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']:
            dump_image_format = 'data'

    # Back up xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    dump_guest_core = params.get("dump_guest_core", "")
    if dump_guest_core not in ["", "on", "off"]:
        raise error.TestError("invalid dumpCore value: %s" % dump_guest_core)
    try:
        # Set dumpCore in guest xml
        if dump_guest_core:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.dumpcore = dump_guest_core
            vmxml.sync()
            vm.start()
            # check qemu-kvm cmdline
            vm_pid = vm.get_pid()
            cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid
            cmd += "|grep dump-guest-core=%s" % dump_guest_core
            result = utils.run(cmd, ignore_status=True)
            logging.debug("cmdline: %s" % result.stdout)
            if result.exit_status:
                error.TestFail("Not find dump-guest-core=%s in qemu cmdline"
                               % dump_guest_core)
            else:
                logging.info("Find dump-guest-core=%s in qemum cmdline",
                             dump_guest_core)

        # Run virsh command
        cmd_result = virsh.dump(vm_name, dump_file, options,
                                unprivileged_user=unprivileged_user,
                                uri=uri,
                                ignore_status=True, debug=True)
        status = cmd_result.exit_status

        logging.info("Start check result")
        if not check_domstate(vm.state(), options):
            raise error.TestFail("Domain status check fail.")
        if status_error:
            if not status:
                raise error.TestFail("Expect fail, but run successfully")
        else:
            if status:
                raise error.TestFail("Expect succeed, but run fail")
            if not os.path.exists(dump_file):
                raise error.TestFail("Fail to find domain dumped file.")
            if check_dump_format(dump_image_format, dump_file):
                logging.info("Successfully dump domain to %s", dump_file)
            else:
                raise error.TestFail("The format of dumped file is wrong.")
    finally:
        if child_pid:
            os.kill(child_pid, signal.SIGTERM)
        if os.path.isfile(dump_file):
            os.remove(dump_file)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        qemu_config.restore()
        libvirtd.restart()

Example 45

Project: tp-libvirt
Source File: virsh_volume.py
View license
def run(test, params, env):
    """
    1. Create a pool
    2. Create n number of volumes(vol-create-as)
    3. Check the volume details from the following commands
       vol-info
       vol-key
       vol-list
       vol-name
       vol-path
       vol-pool
       qemu-img info
    4. Delete the volume and check in vol-list
    5. Repeat the steps for number of volumes given
    6. Delete the pool and target
    TODO: Handle negative testcases
    """

    def delete_volume(expected_vol):
        """
        Deletes Volume
        """
        pool_name = expected_vol['pool_name']
        vol_name = expected_vol['name']
        pv = libvirt_storage.PoolVolume(pool_name)
        if not pv.delete_volume(vol_name):
            raise error.TestFail("Delete volume failed." % vol_name)
        else:
            logging.debug("Volume: %s successfully deleted on pool: %s",
                          vol_name, pool_name)

    def get_vol_list(pool_name, vol_name):
        """
        Parse the volume list
        """
        output = virsh.vol_list(pool_name, "--details")
        rg = re.compile(
            r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)')
        vol = {}
        vols = []
        volume_detail = None
        for line in output.stdout.splitlines():
            match = re.search(rg, line.lstrip())
            if match is not None:
                vol['name'] = match.group(1)
                vol['path'] = match.group(2)
                vol['type'] = match.group(3)
                vol['capacity'] = match.group(4)
                vol['allocation'] = match.group(5)
                vols.append(vol)
                vol = {}
        for volume in vols:
            if volume['name'] == vol_name:
                volume_detail = volume
        return volume_detail

    def norm_capacity(capacity):
        """
        Normalize the capacity values to bytes
        """
        # Normaize all values to bytes
        norm_capacity = {}
        des = {'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K',
               'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K',
               'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M',
               'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G',
               'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T',
               'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T'
               }
        val = {'B': 1,
               'K': 1024,
               'M': 1048576,
               'G': 1073741824,
               'T': 1099511627776
               }

        reg_list = re.compile(r'(\S+)\s(\S+)')
        match_list = re.search(reg_list, capacity['list'])
        if match_list is not None:
            mem_value = float(match_list.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['list'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value in"
                                 " virsh vol-list")

        match_info = re.search(reg_list, capacity['info'])
        if match_info is not None:
            mem_value = float(match_info.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['info'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value "
                                 "in virsh vol-info")

        norm_capacity['qemu_img'] = capacity['qemu_img']
        norm_capacity['xml'] = int(capacity['xml'])

        return norm_capacity

    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'],
                                                         expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error("Volume key is mismatch \n%s"
                          "Key from xml: %s\nKey from command: %s",
                          expected['name'], volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug("virsh vol-key for volume: %s successfully"
                          " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error("Volume name mismatch\n"
                          "Expected name: %s\nOutput of vol-name: %s",
                          expected['name'], get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected path: %s\nOutput of vol-path: %s\n",
                          expected['name'],
                          expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug("virsh vol-path for volume: %s successfully checked"
                          " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error("Volume path mismatch for volume:%s\n"
                          "Expected Path: %s\nPath from virsh vol-list: %s",
                          expected['name'], expected['path'],
                          actual_list['path'])
            error_count += 1
        else:
            logging.debug("Path of volume: %s from virsh vol-list "
                          "successfully checked against created "
                          "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                          expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug("Path of volume: %s from virsh vol-dumpxml "
                          "successfully checked against created volume path",
                          expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-list: %s",
                          expected['name'], expected['type'],
                          actual_list['type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-list "
                          "successfully checked against the created "
                          "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-info: %s",
                          expected['name'], expected['type'],
                          actual_info['Type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-info successfully"
                          " checked against the created volume type",
                          expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error("Volume name mismatch for volume: %s\n"
                          "Expected name: %s\n Name from vol-info: %s",
                          expected['name'],
                          expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug("Name of volume: %s from virsh vol-info successfully"
                          " checked against the created volume name",
                          expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from qemu-img info: %s",
                              expected['name'], expected['format'],
                              img_info['format'])
                error_count += 1
            else:
                logging.debug("Format of volume: %s from qemu-img info "
                              "checked successfully against the created "
                              "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from vol-dumpxml: %s",
                              expected['name'], expected['format'],
                              volume_xml.format)
                error_count += 1
            else:
                logging.debug("Format of volume: %s from virsh vol-dumpxml "
                              "checked successfully against the created"
                              " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error("Encryption secret mismatch for volume: %s\n"
                                  "Expected secret uuid: %s\n"
                                  "Secret uuid from vol-dumpxml: %s",
                                  expected['name'], expected['encrypt_secret'],
                                  secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s", secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error("Pool name mismatch for volume: %s against"
                          "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug("Pool name of volume: %s checked successfully"
                          " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = params.get('delta_size', "1024")
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-list\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['list'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['info'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-dumpxml\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['xml'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-dumpxml for volume: %s",
                          expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against "
                          "qemu-img info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " qemu-img info for volume: %s",
                          expected['name'])
        return error_count

    def get_all_secrets():
        """
        Return all exist libvirt secrets uuid in a list
        """
        secret_list = []
        secrets = virsh.secret_list().stdout.strip()
        for secret in secrets.splitlines()[2:]:
            secret_list.append(secret.strip().split()[0])
        return secret_list

    # Initialize the variables
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("volume_name")
    vol_number = int(params.get("number_of_volumes", "2"))
    capacity = params.get("volume_size", "1048576")
    allocation = params.get("volume_allocation", "1048576")
    vol_format = params.get("volume_format")
    source_name = params.get("gluster_source_name", "gluster-vol1")
    source_path = params.get("gluster_source_path", "/")
    encrypt_format = params.get("vol_encrypt_format")
    encrypt_secret = params.get("encrypt_secret")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    try:
        str_capa = utils_misc.normalize_data_size(capacity, "B")
        int_capa = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % capacity)
    try:
        str_capa = utils_misc.normalize_data_size(allocation, "B")
        int_allo = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % allocation)

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Get exists libvirt secrets before test
    ori_secrets = get_all_secrets()
    expected_vol = {}
    vol_type = 'file'
    if pool_type in ['disk', 'logical']:
        vol_type = 'block'
    if pool_type == 'gluster':
        vol_type = 'network'
    logging.debug("Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n"
                  "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n"
                  "vol_format:%s", pool_name, pool_type, pool_target,
                  vol_name, vol_number, capacity, allocation, vol_format)

    libv_pvt = utlv.PoolVolumeTest(test, params)
    # Run Testcase
    total_err_count = 0
    try:
        # Create a new pool
        libv_pvt.pre_pool(pool_name=pool_name,
                          pool_type=pool_type,
                          pool_target=pool_target,
                          emulated_image=emulated_image,
                          image_size=emulated_image_size,
                          source_name=source_name,
                          source_path=source_path)
        for i in range(vol_number):
            volume_name = "%s_%d" % (vol_name, i)
            expected_vol['pool_name'] = pool_name
            expected_vol['pool_type'] = pool_type
            expected_vol['pool_target'] = pool_target
            expected_vol['capacity'] = int_capa
            expected_vol['allocation'] = int_allo
            expected_vol['format'] = vol_format
            expected_vol['name'] = volume_name
            expected_vol['type'] = vol_type
            expected_vol['encrypt_format'] = encrypt_format
            expected_vol['encrypt_secret'] = encrypt_secret
            # Creates volume
            if pool_type != "gluster":
                expected_vol['path'] = pool_target + '/' + volume_name
                new_volxml = vol_xml.VolXML()
                new_volxml.name = volume_name
                new_volxml.capacity = int_capa
                new_volxml.allocation = int_allo
                if vol_format:
                    new_volxml.format = vol_format
                encrypt_dict = {}
                if encrypt_format:
                    encrypt_dict.update({"format": encrypt_format})
                if encrypt_secret:
                    encrypt_dict.update({"secret": {'uuid': encrypt_secret}})
                if encrypt_dict:
                    new_volxml.encryption = new_volxml.new_encryption(**encrypt_dict)
                logging.debug("Volume XML for creation:\n%s", str(new_volxml))
                virsh.vol_create(pool_name, new_volxml.xml, debug=True)
            else:
                ip_addr = utlv.get_host_ipv4_addr()
                expected_vol['path'] = "gluster://%s/%s/%s" % (ip_addr,
                                                               source_name,
                                                               volume_name)
                utils.run("qemu-img create -f %s %s %s" % (vol_format,
                                                           expected_vol['path'],
                                                           capacity))
            virsh.pool_refresh(pool_name)
            # Check volumes
            total_err_count += check_vol(expected_vol)
            # Delete volume and check for results
            delete_volume(expected_vol)
            total_err_count += check_vol(expected_vol, False)
        if total_err_count > 0:
            raise error.TestFail("Get %s errors when checking volume" % total_err_count)
    finally:
        # Clean up
        for sec in get_all_secrets():
            if sec not in ori_secrets:
                virsh.secret_undefine(sec)
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image, source_name=source_name)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()

Example 46

Project: numexpr
Source File: setup.py
View license
def setup_package():
    metadata = dict(
                      description='Fast numerical expression evaluator for NumPy',
                      author='David M. Cooke, Francesc Alted and others',
                      author_email='[email protected], [email protected]',
                      url='https://github.com/pydata/numexpr',
                      license='MIT',
                      packages=['numexpr'],
                      install_requires=requirements,
                      setup_requires=requirements
    )
    if (len(sys.argv) >= 2 and
        ('--help' in sys.argv[1:] or
         (sys.argv[1] in (
             '--help-commands', 'egg_info', '--version', 'clean', '--name')))):

        # For these actions, NumPy is not required.
        #
        # They are required to succeed without Numpy for example when
        # pip is used to install Numexpr when Numpy is not yet present in
        # the system.
        # (via https://github.com/abhirk/scikit-learn/blob/master/setup.py)
        try:
            from setuptools import setup
        except ImportError:
            from distutils.core import setup

        metadata['name']    = 'numexpr'
        metadata['version'] = version
    else:
        from numpy.distutils.core import setup
        from numpy.distutils.command.build_ext import build_ext as numpy_build_ext

        try:  # Python 3
            # Code taken form numpy/distutils/command/build_py.py
            # XXX: update LICENSES
            from distutils.command.build_py import build_py_2to3 as old_build_py
            from numpy.distutils.misc_util import is_string

            class build_py(old_build_py):

                def run(self):
                    build_src = self.get_finalized_command('build_src')
                    if build_src.py_modules_dict and self.packages is None:
                        self.packages = list(build_src.py_modules_dict.keys())
                    old_build_py.run(self)

                def find_package_modules(self, package, package_dir):
                    modules = old_build_py.find_package_modules(
                        self, package, package_dir)

                    # Find build_src generated *.py files.
                    build_src = self.get_finalized_command('build_src')
                    modules += build_src.py_modules_dict.get(package, [])

                    return modules

                def find_modules(self):
                    old_py_modules = self.py_modules[:]
                    new_py_modules = list(filter(is_string, self.py_modules))
                    self.py_modules[:] = new_py_modules
                    modules = old_build_py.find_modules(self)
                    self.py_modules[:] = old_py_modules

                    return modules

        except ImportError:  # Python 2
            from numpy.distutils.command.build_py import build_py

        DEBUG = False

        def localpath(*args):
            return op.abspath(op.join(*((op.dirname(__file__),) + args)))

        def debug(instring):
            if DEBUG:
                print(" DEBUG: " + instring)


        def configuration():
            from numpy.distutils.misc_util import Configuration, dict_append
            from numpy.distutils.system_info import system_info

            config = Configuration('numexpr')

            #try to find configuration for MKL, either from environment or site.cfg
            if op.exists('site.cfg'):
                mkl_config_data = config.get_info('mkl')
                # Some version of MKL needs to be linked with libgfortran.
                # For this, use entries of DEFAULT section in site.cfg.
                default_config = system_info()
                dict_append(mkl_config_data,
                            libraries=default_config.get_libraries(),
                            library_dirs=default_config.get_lib_dirs())
            else:
                mkl_config_data = {}

            # setup information for C extension
            if os.name == 'nt':
                pthread_win = ['numexpr/win32/pthread.c']
            else:
                pthread_win = []
            extension_config_data = {
                'sources': ['numexpr/interpreter.cpp',
                            'numexpr/module.cpp',
                            'numexpr/numexpr_object.cpp'] + pthread_win,
                'depends': ['numexpr/interp_body.cpp',
                            'numexpr/complex_functions.hpp',
                            'numexpr/interpreter.hpp',
                            'numexpr/module.hpp',
                            'numexpr/msvc_function_stubs.hpp',
                            'numexpr/numexpr_config.hpp',
                            'numexpr/numexpr_object.hpp'],
                'libraries': ['m'],
                'extra_compile_args': ['-funroll-all-loops', ],
            }
            dict_append(extension_config_data, **mkl_config_data)
            if 'library_dirs' in mkl_config_data:
                library_dirs = ':'.join(mkl_config_data['library_dirs'])
            config.add_extension('interpreter', **extension_config_data)
            config.set_options(quiet=True)

            config.make_config_py()
            config.add_subpackage('tests', 'numexpr/tests')

            #version handling
            config.get_version('numexpr/version.py')
            return config


        class cleaner(clean):

            def run(self):
                # Recursive deletion of build/ directory
                path = localpath("build")
                try:
                    shutil.rmtree(path)
                except Exception:
                    debug("Failed to remove directory %s" % path)
                else:
                    debug("Cleaned up %s" % path)

                # Now, the extension and other files
                try:
                    import imp
                except ImportError:
                    if os.name == 'posix':
                        paths = [localpath("numexpr/interpreter.so")]
                    else:
                        paths = [localpath("numexpr/interpreter.pyd")]
                else:
                    paths = []
                    for suffix, _, _ in imp.get_suffixes():
                        if suffix == '.py':
                            continue
                        paths.append(localpath("numexpr", "interpreter" + suffix))
                paths.append(localpath("numexpr/__config__.py"))
                paths.append(localpath("numexpr/__config__.pyc"))
                for path in paths:
                    try:
                        os.remove(path)
                    except Exception:
                        debug("Failed to clean up file %s" % path)
                    else:
                        debug("Cleaning up %s" % path)

                clean.run(self)

        class build_ext(numpy_build_ext):
            def build_extension(self, ext):
                # at this point we know what the C compiler is.
                if self.compiler.compiler_type == 'msvc' or self.compiler.compiler_type == 'intelemw':
                    ext.extra_compile_args = []
                    # also remove extra linker arguments msvc doesn't understand
                    ext.extra_link_args = []
                    # also remove gcc math library
                    ext.libraries.remove('m')
                numpy_build_ext.build_extension(self, ext)

        if setuptools:
            metadata['zip_safe'] = False

        metadata['cmdclass'] = {
            'build_ext': build_ext,
            'clean': cleaner,
            'build_py': build_py,
        }
        metadata['configuration'] = configuration

    setup(**metadata)

Example 47

Project: pyqtgraph
Source File: PlotItem.py
View license
    def __init__(self, parent=None, name=None, labels=None, title=None, viewBox=None, axisItems=None, enableMenu=True, **kargs):
        """
        Create a new PlotItem. All arguments are optional.
        Any extra keyword arguments are passed to PlotItem.plot().
        
        ==============  ==========================================================================================
        **Arguments:**
        *title*         Title to display at the top of the item. Html is allowed.
        *labels*        A dictionary specifying the axis labels to display::
                   
                            {'left': (args), 'bottom': (args), ...}
                     
                        The name of each axis and the corresponding arguments are passed to 
                        :func:`PlotItem.setLabel() <pyqtgraph.PlotItem.setLabel>`
                        Optionally, PlotItem my also be initialized with the keyword arguments left,
                        right, top, or bottom to achieve the same effect.
        *name*          Registers a name for this view so that others may link to it
        *viewBox*       If specified, the PlotItem will be constructed with this as its ViewBox.
        *axisItems*     Optional dictionary instructing the PlotItem to use pre-constructed items
                        for its axes. The dict keys must be axis names ('left', 'bottom', 'right', 'top')
                        and the values must be instances of AxisItem (or at least compatible with AxisItem).
        ==============  ==========================================================================================
        """
        
        GraphicsWidget.__init__(self, parent)
        
        self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
        
        ## Set up control buttons
        path = os.path.dirname(__file__)
        #self.autoImageFile = os.path.join(path, 'auto.png')
        #self.lockImageFile = os.path.join(path, 'lock.png')
        self.autoBtn = ButtonItem(pixmaps.getPixmap('auto'), 14, self)
        self.autoBtn.mode = 'auto'
        self.autoBtn.clicked.connect(self.autoBtnClicked)
        #self.autoBtn.hide()
        self.buttonsHidden = False ## whether the user has requested buttons to be hidden
        self.mouseHovering = False
        
        self.layout = QtGui.QGraphicsGridLayout()
        self.layout.setContentsMargins(1,1,1,1)
        self.setLayout(self.layout)
        self.layout.setHorizontalSpacing(0)
        self.layout.setVerticalSpacing(0)
        
        if viewBox is None:
            viewBox = ViewBox(parent=self)
        self.vb = viewBox
        self.vb.sigStateChanged.connect(self.viewStateChanged)
        self.setMenuEnabled(enableMenu, enableMenu) ## en/disable plotitem and viewbox menus
        
        if name is not None:
            self.vb.register(name)
        self.vb.sigRangeChanged.connect(self.sigRangeChanged)
        self.vb.sigXRangeChanged.connect(self.sigXRangeChanged)
        self.vb.sigYRangeChanged.connect(self.sigYRangeChanged)
        
        self.layout.addItem(self.vb, 2, 1)
        self.alpha = 1.0
        self.autoAlpha = True
        self.spectrumMode = False
        
        self.legend = None
        
        ## Create and place axis items
        if axisItems is None:
            axisItems = {}
        self.axes = {}
        for k, pos in (('top', (1,1)), ('bottom', (3,1)), ('left', (2,0)), ('right', (2,2))):
            if k in axisItems:
                axis = axisItems[k]
            else:
                axis = AxisItem(orientation=k, parent=self)
            axis.linkToView(self.vb)
            self.axes[k] = {'item': axis, 'pos': pos}
            self.layout.addItem(axis, *pos)
            axis.setZValue(-1000)
            axis.setFlag(axis.ItemNegativeZStacksBehindParent)
        
        self.titleLabel = LabelItem('', size='11pt', parent=self)
        self.layout.addItem(self.titleLabel, 0, 1)
        self.setTitle(None)  ## hide
        
        
        for i in range(4):
            self.layout.setRowPreferredHeight(i, 0)
            self.layout.setRowMinimumHeight(i, 0)
            self.layout.setRowSpacing(i, 0)
            self.layout.setRowStretchFactor(i, 1)
            
        for i in range(3):
            self.layout.setColumnPreferredWidth(i, 0)
            self.layout.setColumnMinimumWidth(i, 0)
            self.layout.setColumnSpacing(i, 0)
            self.layout.setColumnStretchFactor(i, 1)
        self.layout.setRowStretchFactor(2, 100)
        self.layout.setColumnStretchFactor(1, 100)
        

        self.items = []
        self.curves = []
        self.itemMeta = weakref.WeakKeyDictionary()
        self.dataItems = []
        self.paramList = {}
        self.avgCurves = {}
        
        ### Set up context menu
        
        w = QtGui.QWidget()
        self.ctrl = c = Ui_Form()
        c.setupUi(w)
        dv = QtGui.QDoubleValidator(self)
        
        menuItems = [
            ('Transforms', c.transformGroup),
            ('Downsample', c.decimateGroup),
            ('Average', c.averageGroup),
            ('Alpha', c.alphaGroup),
            ('Grid', c.gridGroup),
            ('Points', c.pointsGroup),
        ]
        
        
        self.ctrlMenu = QtGui.QMenu()
        
        self.ctrlMenu.setTitle('Plot Options')
        self.subMenus = []
        for name, grp in menuItems:
            sm = QtGui.QMenu(name)
            act = QtGui.QWidgetAction(self)
            act.setDefaultWidget(grp)
            sm.addAction(act)
            self.subMenus.append(sm)
            self.ctrlMenu.addMenu(sm)
        
        self.stateGroup = WidgetGroup()
        for name, w in menuItems:
            self.stateGroup.autoAdd(w)
        
        self.fileDialog = None
        
        c.alphaGroup.toggled.connect(self.updateAlpha)
        c.alphaSlider.valueChanged.connect(self.updateAlpha)
        c.autoAlphaCheck.toggled.connect(self.updateAlpha)

        c.xGridCheck.toggled.connect(self.updateGrid)
        c.yGridCheck.toggled.connect(self.updateGrid)
        c.gridAlphaSlider.valueChanged.connect(self.updateGrid)

        c.fftCheck.toggled.connect(self.updateSpectrumMode)
        c.logXCheck.toggled.connect(self.updateLogMode)
        c.logYCheck.toggled.connect(self.updateLogMode)

        c.downsampleSpin.valueChanged.connect(self.updateDownsampling)
        c.downsampleCheck.toggled.connect(self.updateDownsampling)
        c.autoDownsampleCheck.toggled.connect(self.updateDownsampling)
        c.subsampleRadio.toggled.connect(self.updateDownsampling)
        c.meanRadio.toggled.connect(self.updateDownsampling)
        c.clipToViewCheck.toggled.connect(self.updateDownsampling)

        self.ctrl.avgParamList.itemClicked.connect(self.avgParamListClicked)
        self.ctrl.averageGroup.toggled.connect(self.avgToggled)
        
        self.ctrl.maxTracesCheck.toggled.connect(self.updateDecimation)
        self.ctrl.maxTracesSpin.valueChanged.connect(self.updateDecimation)
        
        self.hideAxis('right')
        self.hideAxis('top')
        self.showAxis('left')
        self.showAxis('bottom')
        
        if labels is None:
            labels = {}
        for label in list(self.axes.keys()):
            if label in kargs:
                labels[label] = kargs[label]
                del kargs[label]
        for k in labels:
            if isinstance(labels[k], basestring):
                labels[k] = (labels[k],)
            self.setLabel(k, *labels[k])
                
        if title is not None:
            self.setTitle(title)
        
        if len(kargs) > 0:
            self.plot(**kargs)

Example 48

Project: django-pyodbc
Source File: ss_loaddata.py
View license
    def handle(self, *fixture_labels, **options):
        from django.db.models import get_apps
        from django.core import serializers
        from django.db import connection, transaction
        from django.conf import settings

        self.style = no_style()

        verbosity = int(options.get('verbosity', 1))
        show_traceback = options.get('traceback', False)

        # commit is a stealth option - it isn't really useful as
        # a command line option, but it can be useful when invoking
        # loaddata from within another script.
        # If commit=True, loaddata will use its own transaction;
        # if commit=False, the data load SQL will become part of
        # the transaction in place when loaddata was invoked.
        commit = options.get('commit', True)

        # Keep a count of the installed objects and fixtures
        fixture_count = 0
        object_count = 0
        models = set()

        humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'

        # Get a cursor (even though we don't need one yet). This has
        # the side effect of initializing the test database (if
        # it isn't already initialized).
        cursor = connection.cursor()

        # Start transaction management. All fixtures are installed in a
        # single transaction to ensure that all references are resolved.
        if commit:
            transaction.commit_unless_managed()
            transaction.enter_transaction_management()
            transaction.managed(True)

        self.disable_forward_ref_checks()

        class SingleZipReader(zipfile.ZipFile):
            def __init__(self, *args, **kwargs):
                zipfile.ZipFile.__init__(self, *args, **kwargs)
                if settings.DEBUG:
                    assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
            def read(self):
                return zipfile.ZipFile.read(self, self.namelist()[0])

        compression_types = {
            None:   file,
            'gz':   gzip.GzipFile,
            'zip':  SingleZipReader
        }
        if has_bz2:
            compression_types['bz2'] = bz2.BZ2File

        app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') for app in get_apps()]
        for fixture_label in fixture_labels:
            parts = fixture_label.split('.')

            if len(parts) > 1 and parts[-1] in compression_types:
                compression_formats = [parts[-1]]
                parts = parts[:-1]
            else:
                compression_formats = compression_types.keys()

            if len(parts) == 1:
                fixture_name = parts[0]
                formats = serializers.get_public_serializer_formats()
            else:
                fixture_name, format = '.'.join(parts[:-1]), parts[-1]
                if format in serializers.get_public_serializer_formats():
                    formats = [format]
                else:
                    formats = []

            if formats:
                if verbosity > 1:
                    print "Loading '%s' fixtures..." % fixture_name
            else:
                self.enable_forward_ref_checks(cursor)
                sys.stderr.write(
                    self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format." %
                        (fixture_name, format)))
                transaction.rollback()
                transaction.leave_transaction_management()
                return

            if os.path.isabs(fixture_name):
                fixture_dirs = [fixture_name]
            else:
                fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']

            for fixture_dir in fixture_dirs:
                if verbosity > 1:
                    print "Checking %s for fixtures..." % humanize(fixture_dir)

                label_found = False
                for format in formats:
                    for compression_format in compression_formats:
                        if compression_format:
                            file_name = '.'.join([fixture_name, format,
                                                  compression_format])
                        else:
                            file_name = '.'.join([fixture_name, format])

                        if verbosity > 1:
                            print "Trying %s for %s fixture '%s'..." % \
                                (humanize(fixture_dir), file_name, fixture_name)
                        full_path = os.path.join(fixture_dir, file_name)
                        open_method = compression_types[compression_format]
                        try:
                            fixture = open_method(full_path, 'r')
                            if label_found:
                                fixture.close()
                                self.enable_forward_ref_checks(cursor)
                                print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." %
                                    (fixture_name, humanize(fixture_dir)))
                                transaction.rollback()
                                transaction.leave_transaction_management()
                                return
                            else:
                                fixture_count += 1
                                objects_in_fixture = 0
                                if verbosity > 0:
                                    print "Installing %s fixture '%s' from %s." % \
                                        (format, fixture_name, humanize(fixture_dir))
                                try:
                                    objects = serializers.deserialize(format, fixture)
                                    for obj in objects:
                                        objects_in_fixture += 1
                                        self.handle_ref_checks(cursor, obj)
                                        models.add(obj.object.__class__)
                                        obj.save()
                                    object_count += objects_in_fixture
                                    label_found = True
                                except (SystemExit, KeyboardInterrupt):
                                    self.enable_forward_ref_checks(cursor)
                                    raise
                                except Exception:
                                    import traceback
                                    fixture.close()
                                    self.enable_forward_ref_checks(cursor)
                                    transaction.rollback()
                                    transaction.leave_transaction_management()
                                    if show_traceback:
                                        traceback.print_exc()
                                    else:
                                        sys.stderr.write(
                                            self.style.ERROR("Problem installing fixture '%s': %s\n" %
                                                 (full_path, ''.join(traceback.format_exception(sys.exc_type,
                                                     sys.exc_value, sys.exc_traceback)))))
                                    return
                                fixture.close()

                                # If the fixture we loaded contains 0 objects, assume that an
                                # error was encountered during fixture loading.
                                if objects_in_fixture == 0:
                                    self.enable_forward_ref_checks(cursor)
                                    sys.stderr.write(
                                        self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" %
                                            (fixture_name)))
                                    transaction.rollback()
                                    transaction.leave_transaction_management()
                                    return

                        except Exception, e:
                            if verbosity > 1:
                                print "No %s fixture '%s' in %s." % \
                                    (format, fixture_name, humanize(fixture_dir))

        self.enable_forward_ref_checks(cursor)

        # If we found even one object in a fixture, we need to reset the
        # database sequences.
        if object_count > 0:
            sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
            if sequence_sql:
                if verbosity > 1:
                    print "Resetting sequences"
                for line in sequence_sql:
                    cursor.execute(line)

        if commit:
            transaction.commit()
            transaction.leave_transaction_management()

        if object_count == 0:
            if verbosity > 1:
                print "No fixtures found."
        else:
            if verbosity > 0:
                print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)

        # Close the DB connection. This is required as a workaround for an
        # edge case in MySQL: if the same connection is used to
        # create tables, load data, and query, the query can return
        # incorrect results. See Django #7572, MySQL #37735.
        if commit:
            connection.close()

Example 49

View license
    def test_public_worksheets_visible_readonly_and_copiable_for_others(self):
        # * Harold logs in and creates a new sheet
        sheet_id = self.login_and_create_new_sheet()

        # * He gives the sheet a catchy name
        self.set_sheet_name('spaceshuttle')

        # * He enters some formulae n stuff
        self.enter_cell_text(2, 3, '23')
        self.enter_cell_text(2, 4, '=my_add_function(B3)')
        self.prepend_usercode('my_add_function = lambda x : x + 2')
        self.wait_for_cell_value(2, 4, '25')

        # * He notes that the tooltip for the security icon indicates that the
        # sheet is private
        self.waitForButtonToIndicateSheetIsPublic(False)

        # * He clicks on the security icon
        self.selenium.click('id=id_security_button')

        # He sees a tickbox, currently unticked, saying make worksheet public
        self.wait_for_element_visibility(
                'id=id_security_form', True)
        self.wait_for_element_visibility(
                'id=id_security_form_public_sheet_checkbox', True)

        self.assertEquals(
            self.selenium.get_value('id=id_security_form_public_sheet_checkbox'),
            'off'
        )
        # He ticks it and dismisses the dialog
        self.selenium.click('id=id_security_form_public_sheet_checkbox')
        self.selenium.click('id=id_security_form_ok_button')

        # * He notes that the tooltip for the security icon indicates that the
        # sheet is public
        self.waitForButtonToIndicateSheetIsPublic(True)

        # He notes down the URL and emails it to his colleague Harriet
        harolds_url = self.browser.current_url

        # He logs out
        self.logout()

        # * Later on, Harriet logs into teh Dirigible and heads on over to
        #   Harold's spreadsheet
        self.login(self.get_my_usernames()[1])
        self.go_to_url(harolds_url)

        # She sees the values n stuff
        self.wait_for_grid_to_appear()
        self.wait_for_cell_value(2, 4, '25')

        # * She notices that all toolbar icons are missing,
        # apart from download-as-csv
        map(
            lambda e: self.wait_for_element_presence(e, False),
            [
                'id=id_import_button',
                'id=id_cut_button',
                'id=id_copy_button',
                'id=id_paste_button',
                'id=id_security_button',
            ]
        )
        self.wait_for_element_visibility('id=id_export_button', True)

        # * She tries to edit some formulae, but can't
        self.selenium.double_click(
                self.get_cell_locator(1, 1)
        )
        self.selenium.focus(
                self.get_cell_locator(1, 1)
        )
        time.sleep(1)
        self.wait_for_element_presence(
                self.get_active_cell_editor_locator(),
                False
        )

        # * she tries to edit the cell again, using the formula bar, but cannot
        self.assertEquals(
            self.selenium.get_attribute(self.get_formula_bar_locator() + '@readonly'),
            'true'
        )

        # * She tries to edit some usercode, but can't
        original_code = self.get_usercode()
        self.selenium.get_eval('window.editor.focus()')
        self.human_key_press(key_codes.LETTER_A)
        time.sleep(1)
        self.wait_for_usercode_editor_content(original_code)

        # * She tries to edit the sheet name, but can't

        # * mouses over the sheet name and notes that the appearance
        #   does not change to indicate that it's editable
        self.selenium.mouse_over('id=id_sheet_name')
        time.sleep(1)
        self.wait_for(
            lambda: self.get_css_property('#id_sheet_name', 'background-color') == 'transparent',
            lambda: 'ensure sheet name background stays normal')

        # * He clicks on the sheet name, the sheetname edit textarea does
        #   not appear,
        self.selenium.click('id=id_sheet_name')
        time.sleep(1)
        self.wait_for(
            lambda: not self.is_element_present('id=edit-id_sheet_name'),
            lambda: 'ensure editable sheetname does not appear')

        def download_as_csv():
            self.selenium.click('id=id_export_button')
            self.wait_for_element_visibility('id=id_export_dialog', True)
            download_url = self.selenium.get_attribute('[email protected]')
            download_url = urljoin(self.browser.current_url, download_url)

            stream = self.get_url_with_session_cookie(download_url)
            self.assertEquals(stream.info().gettype(), "text/csv")
            self.assertEquals(
                    stream.info()['Content-Disposition'],
                    'attachment; filename=spaceshuttle.csv'
            )

            expected_file_name = path.join(
                    path.dirname(__file__),
                    "test_data", "public_sheet_csv_file.csv"
            )
            with open(expected_file_name) as expected_file:
                self.assertEquals(
                    stream.read().replace("\r\n", "\n"),
                    expected_file.read().replace("\r\n", "\n")
                )

        # * She confirms that she can download a csv of the sheet
        download_as_csv()

        # * She uses some l33t haxx0ring skillz to try and send a
        #   setcellformula Ajax call directly
        # It doesn't work.
        with self.assertRaises(HTTPError):
            response = self.get_url_with_session_cookie(
                    urljoin(harolds_url, '/set_cell_formula/'),
                    data={'column':3, 'row': 4, 'formula': '=jeffk'}
            )

        # * "Aha!" she says, as she notices a link allowing her to copy the sheet,
        self.wait_for_element_visibility('id_copy_sheet_link', True)
        # which she then clicks
        self.selenium.click('id=id_copy_sheet_link')

        # She is taken to a sheet of her own
        self.selenium.wait_for_page_to_load(PAGE_LOAD_TIMEOUT)
        self.wait_for_grid_to_appear()

        # It looks a lot like Harold's but has a different url
        harriets_url = self.browser.current_url
        self.assertFalse(harriets_url == harolds_url)
        self.wait_for_cell_value(2, 4, '25')

        # And she is able to change cell formulae
        self.enter_cell_text(2, 3, '123')
        self.wait_for_cell_value(2, 4, '125')

        # And she is able to change usercode
        self.append_usercode('worksheet[2, 4].value += 100')
        self.wait_for_cell_value(2, 4, '225')

        # And she is well pleased. So much so that she emails two
        # friends about these two sheets (and they tell two
        # friends, and they tell two friends, and so on, and so
        # on.  $$$$)
        self.logout()

        # * Helga is a Dirigible user, but she isn't logged in.
        #   She goes to Harold's page, and sees that it is good.
        self.go_to_url(harolds_url)
        self.wait_for_grid_to_appear()
        self.wait_for_cell_value(2, 4, '25')

        # She clicks on the big copy button, and is taken to the
        # login form
        self.selenium.click('id=id_copy_sheet_link')
        self.selenium.wait_for_page_to_load(PAGE_LOAD_TIMEOUT)
        self.wait_for_element_visibility('id_login_form_wrap', True)

        # She logs in, and is taken straight to her new copy of
        # Harold's sheet
        self.login(
                self.get_my_usernames()[2],
                already_on_login_page=True
        )
        self.wait_for_grid_to_appear()

        helgas_url = self.browser.current_url
        self.assertFalse(helgas_url == harolds_url)
        self.assertFalse(helgas_url == harriets_url)
        self.wait_for_cell_value(2, 4, '25')

        # Helga makes some edits, which she considers superior to
        # Harriet's
        self.enter_cell_text(2, 3, '1000')
        self.append_usercode('worksheet[2, 4].value += 1000')
        self.wait_for_cell_value(2, 4, '2002')

        # Helga now decides to go and see Harriet's sheet, to
        # laugh at the inferiority of Harriet's fork
        # Her access is denied.
        self.assert_HTTP_error(harriets_url, 403)

        # * Harriet's other friend, Hugh, is not a Dirigible user.... yet.
        # He goes to Harold's sheet and sees that it is good
        self.logout()
        self.go_to_url(harolds_url)
        self.wait_for_grid_to_appear()
        self.wait_for_cell_value(2, 4, '25')

        # So good that he clicks the copy button too, despite never
        # having heard of this Dirigible thingy
        self.selenium.click('id=id_copy_sheet_link')
        self.selenium.wait_for_page_to_load(PAGE_LOAD_TIMEOUT)

        # He is taken to the login form,
        self.wait_for_element_visibility('id_login_form_wrap', True)

        # on which he spots a nice friendly link inviting him to register.
        # It says 'free' and everyfink.
        self.wait_for_element_to_appear('id=id_login_signup_link')
        self.wait_for_element_to_appear('id=id_login_signup_blurb')
        self.assertTrue("free" in self.get_text('id=id_login_signup_blurb'))

        # Hugh goes through the whole registration rigmarole,
        self.selenium.click('id=id_login_signup_link')
        self.selenium.wait_for_page_to_load(PAGE_LOAD_TIMEOUT)
        username = self.get_my_username() + "_x"
        self.email_address = 'harold.testuser-%[email protected]' % (username,)
        password = "p4ssw0rd"
        self.selenium.type('id=id_username', username)
        self.selenium.type('id=id_email', self.email_address)
        self.selenium.type('id=id_password1', password)
        self.selenium.type('id=id_password2', password)
        self.click_link('id_signup_button')

        email_from, email_to, subject, message = self.pop_email_for_client(self.email_address)
        self.assertEquals(subject, 'Dirigible Beta Sign-up')
        confirm_url_re = re.compile(
            r'<(http://projectdirigible\.com/signup/activate/[^>]+)>'
        )
        match = confirm_url_re.search(message)
        self.assertTrue(match)
        confirmation_url = match.group(1).replace('projectdirigible.com', SERVER_IP)

        # * Hugh then logs in
        self.go_to_url(confirmation_url)
        self.login(username, password, already_on_login_page=True)

        # and has his socks knocked off by the presence of the copy of Harold's
        # sheet in his dashboard
        self.selenium.click('link=spaceshuttle')

        # and it has the copied content
        self.selenium.wait_for_page_to_load(PAGE_LOAD_TIMEOUT)
        self.wait_for_grid_to_appear()
        self.wait_for_cell_value(2, 4, '25')

        # Harold logs in and sees that his original sheet is unharmed by all of
        # the other users editing theirs
        self.login(self.get_my_usernames()[0])
        self.go_to_url(harolds_url)
        self.wait_for_grid_to_appear()
        self.wait_for_cell_value(2, 4, '25')

Example 50

Project: RMG-Py
Source File: statmech.py
View license
    def load(self):
        """
        Load the statistical mechanics parameters for each conformer from
        the associated files on disk. Creates :class:`Conformer` objects for
        each conformer and appends them to the list of confomers on the
        species object.
        """
        logging.info('Loading statistical mechanics parameters for {0}...'.format(self.species.label))
        
        path = self.path
        
        TS = isinstance(self.species, TransitionState)
    
        global_context = {
            '__builtins__': None,
        }
        local_context = {
            '__builtins__': None,
            'True': True,
            'False': False,
            'HinderedRotor': hinderedRotor,
            # File formats
            'GaussianLog': GaussianLog,
            'QchemLog': QchemLog,
            'MoleProLog': MoleProLog,
            'ScanLog': ScanLog,
        }
    
        directory = os.path.abspath(os.path.dirname(path))
    
        with open(path, 'r') as f:
            try:
                exec f in global_context, local_context
            except (NameError, TypeError, SyntaxError), e:
                logging.error('The species file {0} was invalid:'.format(path))
                raise
        
        try:
            atoms = local_context['atoms']
        except KeyError:
            raise InputError('Required attribute "atoms" not found in species file {0!r}.'.format(path))
        
        try:
            bonds = local_context['bonds']
        except KeyError:
            bonds = {}
            
        try:
            linear = local_context['linear']
        except KeyError:
            raise InputError('Required attribute "linear" not found in species file {0!r}.'.format(path))
        
        try:
            externalSymmetry = local_context['externalSymmetry']
        except KeyError:
            raise InputError('Required attribute "externalSymmetry" not found in species file {0!r}.'.format(path))
        
        try:
            spinMultiplicity = local_context['spinMultiplicity']
        except KeyError:
            raise InputError('Required attribute "spinMultiplicity" not found in species file {0!r}.'.format(path))
       
        try:
            opticalIsomers = local_context['opticalIsomers']
        except KeyError:
            raise InputError('Required attribute "opticalIsomers" not found in species file {0!r}.'.format(path))
        
        try:
            energy = local_context['energy']
        except KeyError:
            raise InputError('Required attribute "energy" not found in species file {0!r}.'.format(path))
        if isinstance(energy, dict):
            try:
                energy = energy[self.modelChemistry]
            except KeyError:
                raise InputError('Model chemistry {0!r} not found in from dictionary of energy values in species file {1!r}.'.format(self.modelChemistry, path))
        if isinstance(energy, GaussianLog):
            energyLog = energy; E0 = None
            energyLog.path = os.path.join(directory, energyLog.path)
        elif isinstance(energy, QchemLog):
            energyLog = energy; E0 = None
            energyLog.path = os.path.join(directory, energyLog.path)
        elif isinstance(energy, MoleProLog):
            energyLog = energy; E0 = None
            energyLog.path = os.path.join(directory, energyLog.path)
        elif isinstance(energy, float):
            energyLog = None; E0 = energy
        
        try:
            geomLog = local_context['geometry']
        except KeyError:
            raise InputError('Required attribute "geometry" not found in species file {0!r}.'.format(path))
        geomLog.path = os.path.join(directory, geomLog.path)
    
        try:
            statmechLog = local_context['frequencies']
        except KeyError:
            raise InputError('Required attribute "frequencies" not found in species file {0!r}.'.format(path))
        statmechLog.path = os.path.join(directory, statmechLog.path)
        
        if 'frequencyScaleFactor' in local_context:
            logging.warning('Ignoring frequency scale factor in species file {0!r}.'.format(path))
        
        try:
            rotors = local_context['rotors']
        except KeyError:
            rotors = []
        
        # But don't consider hindered rotors if flag is not set
        if not self.includeHinderedRotors:
            rotors = []
        
        logging.debug('    Reading molecular degrees of freedom...')
        conformer = statmechLog.loadConformer(symmetry=externalSymmetry, spinMultiplicity=spinMultiplicity, opticalIsomers=opticalIsomers)
        
        logging.debug('    Reading optimized geometry...')
        coordinates, number, mass = geomLog.loadGeometry()
        conformer.coordinates = (coordinates,"angstroms") 
        conformer.number = number
        conformer.mass = (mass,"amu")
        
        logging.debug('    Reading energy...')
        # The E0 that is read from the log file is without the ZPE and corresponds to E_elec
        if E0 is None:
            E0 = energyLog.loadEnergy(self.frequencyScaleFactor)
        else:
            E0 = E0 * constants.E_h * constants.Na         # Hartree/particle to J/mol
        E0 = applyEnergyCorrections(E0, self.modelChemistry, atoms, bonds if self.applyBondEnergyCorrections else {})
        ZPE = statmechLog.loadZeroPointEnergy() * self.frequencyScaleFactor
        
        # The E0_withZPE at this stage contains the ZPE
        E0_withZPE = E0 + ZPE
        
        logging.debug('         Scaling factor used = {0:g}'.format(self.frequencyScaleFactor))
        logging.debug('         ZPE (0 K) = {0:g} kcal/mol'.format(ZPE / 4184.))
        logging.debug('         E0 (0 K) = {0:g} kcal/mol'.format(E0_withZPE / 4184.))
       
        conformer.E0 = (E0_withZPE*0.001,"kJ/mol")
        
        # If loading a transition state, also read the imaginary frequency
        if TS:
            self.species.frequency = (statmechLog.loadNegativeFrequency() * self.frequencyScaleFactor, "cm^-1")

        # Read and fit the 1D hindered rotors if applicable
        # If rotors are found, the vibrational frequencies are also
        # recomputed with the torsional modes removed
        F = statmechLog.loadForceConstantMatrix()
        if F is not None and len(mass) > 1 and len(rotors) > 0:
            
            logging.debug('    Fitting {0} hindered rotors...'.format(len(rotors)))
            rotorCount = 0
            for scanLog, pivots, top, symmetry, fit in rotors:
                
                # Load the hindered rotor scan energies
                if isinstance(scanLog, GaussianLog):
                    scanLog.path = os.path.join(directory, scanLog.path)
                    Vlist, angle = scanLog.loadScanEnergies()
                    scanLogOutput = ScanLog(os.path.join(directory, '{0}_rotor_{1}.txt'.format(self.species.label, rotorCount+1)))
                    scanLogOutput.save(angle, Vlist)
                elif isinstance(scanLog, QchemLog):
                    scanLog.path = os.path.join(directory, scanLog.path)
                    Vlist, angle = scanLog.loadScanEnergies()
                    scanLogOutput = ScanLog(os.path.join(directory, '{0}_rotor_{1}.txt'.format(self.species.label, rotorCount+1)))
                    scanLogOutput.save(angle, Vlist)
                elif isinstance(scanLog, ScanLog):
                    scanLog.path = os.path.join(directory, scanLog.path)
                    angle, Vlist = scanLog.load()
                else:
                    raise Exception('Invalid log file type {0} for scan log.'.format(scanLog.__class__))
                    
                inertia = conformer.getInternalReducedMomentOfInertia(pivots, top) * constants.Na * 1e23
                
                cosineRotor = HinderedRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
                cosineRotor.fitCosinePotentialToData(angle, Vlist)
                fourierRotor = HinderedRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
                fourierRotor.fitFourierPotentialToData(angle, Vlist)
                
                Vlist_cosine = numpy.zeros_like(angle)
                Vlist_fourier = numpy.zeros_like(angle)
                for i in range(angle.shape[0]):
                    Vlist_cosine[i] = cosineRotor.getPotential(angle[i])
                    Vlist_fourier[i] = fourierRotor.getPotential(angle[i])
                
                if fit=='cosine':
                    rotor=cosineRotor
                elif fit =='fourier':
                    rotor=fourierRotor
                elif fit =='best':
                
                    rms_cosine = numpy.sqrt(numpy.sum((Vlist_cosine - Vlist) * (Vlist_cosine - Vlist)) / (len(Vlist) - 1)) / 4184.
                    rms_fourier = numpy.sqrt(numpy.sum((Vlist_fourier - Vlist) * (Vlist_fourier - Vlist))/ (len(Vlist) - 1)) / 4184.
                
                    # Keep the rotor with the most accurate potential
                    rotor = cosineRotor if rms_cosine < rms_fourier else fourierRotor
                    # However, keep the cosine rotor if it is accurate enough, the
                    # fourier rotor is not significantly more accurate, and the cosine
                    # rotor has the correct symmetry 
                    if rms_cosine < 0.05 and rms_cosine / rms_fourier < 2.0 and rms_cosine / rms_fourier < 4.0 and symmetry == cosineRotor.symmetry:
                        rotor = cosineRotor
                    
                    conformer.modes.append(rotor)
                    
                    self.plotHinderedRotor(angle, Vlist, cosineRotor, fourierRotor, rotor, rotorCount, directory)
                    
                    rotorCount += 1
                       
            logging.debug('    Determining frequencies from reduced force constant matrix...')
            frequencies = numpy.array(projectRotors(conformer, F, rotors, linear, TS))
            
            # The frequencies have changed after projection, hence we need to recompute the ZPE
            # We might need to multiply the scaling factor to the frequencies 
            ZPE = self.getZPEfromfrequencies(frequencies)
            E0_withZPE = E0 + ZPE
            # Reset the E0 of the conformer
            conformer.E0 = (E0_withZPE*0.001,"kJ/mol")

        elif len(conformer.modes) > 2:
            frequencies = conformer.modes[2].frequencies.value_si
            rotors = numpy.array([])
        else:
            frequencies = numpy.array([])
            rotors = numpy.array([])
    
        for mode in conformer.modes:
            if isinstance(mode, HarmonicOscillator):
                mode.frequencies = (frequencies * self.frequencyScaleFactor,"cm^-1")
        
        self.species.conformer = conformer