shutil.rmtree

Here are the examples of the python api shutil.rmtree taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: pymo
Source File: build.py
View license
def build(iface, directory, commands):

    # Are we doing a Ren'Py build?

    global RENPY
    RENPY = os.path.exists("renpy")

    if not os.path.isdir(directory):
        iface.fail("{} is not a directory.".format(directory))

    if RENPY and not os.path.isdir(os.path.join(directory, "game")):
        iface.fail("{} does not contain a Ren'Py game.".format(directory))

    
    config = configure.Configuration(directory)
    if config.package is None:
        iface.fail("Run configure before attempting to build the app.")


    global blacklist
    global whitelist
    
    blacklist = PatternList("blacklist.txt")
    whitelist = PatternList("whitelist.txt")
        
    if RENPY:
        manifest_extra = None        
        default_icon = "templates/renpy-icon.png"
        default_presplash = "templates/renpy-presplash.jpg"

        public_dir = None
        private_dir = None
        assets_dir = directory
    
    else:
        manifest_extra = ""
        default_icon = "templates/pygame-icon.png"
        default_presplash = "templates/pygame-presplash.jpg"
        
        if config.layout == "internal":
            private_dir = directory
            public_dir = None
            assets_dir = None
        elif config.layout == "external":
            private_dir = None
            public_dir = directory
            assets_dir = None
        elif config.layout == "split":
            private_dir = join_and_check(directory, "internal")
            public_dir = join_and_check(directory, "external")
            assets_dir = join_and_check(directory, "assets")
        
    versioned_name = config.name.replace(" ", "").replace("'", "") + "-" + config.version

    # Annoying fixups.
    config.name = config.name.replace("'", "\\'")
    config.icon_name = config.icon_name.replace("'", "\\'")
    
    # Figure out versions of the private and public data.
    private_version = str(time.time())

    if public_dir:
        public_version = private_version
    else:
        public_version = None
            
    # Render the various templates into control files.
    render(
        "AndroidManifest.tmpl.xml",
        "AndroidManifest.xml", 
        config = config,
        manifest_extra = manifest_extra,
        )

    render(
        "strings.xml",
        "res/values/strings.xml",
        public_version = public_version,
        private_version = private_version,
        config = config)

    try:
        os.unlink("build.xml")
    except:
        pass
        
    iface.info("Updating source code.")
    
    edit_file("src/org/renpy/android/DownloaderActivity.java", r'import .*\.R;', 'import {}.R;'.format(config.package))
    
    iface.info("Updating build files.")
        
    # Update the project to a recent version.
    subprocess.call([plat.android, "update", "project", "-p", '.', '-t', 'android-19', '-n', versioned_name,
        # "--library", "android-sdk/extras/google/play_licensing/library",
        "--library", "android-sdk/extras/google/play_apk_expansion/downloader_library",
        ])


    iface.info("Creating assets directory.")

    if os.path.isdir("assets"):
        shutil.rmtree("assets")
    
    if assets_dir is not None:
        make_tree(assets_dir, "assets")
    else:
        os.mkdir("assets")

    # Copy in the Ren'Py common assets.
    if os.path.exists("renpy/common"):

        if os.path.isdir("assets/common"):
            shutil.rmtree("assets/common")
        
        make_tree("renpy/common", "assets/common")

        # Ren'Py uses a lot of names that don't work as assets. Auto-rename
        # them.
        for dirpath, dirnames, filenames in os.walk("assets", topdown=False):
            
            for fn in filenames + dirnames:
                if fn[0] == ".":
                    continue
                
                old = os.path.join(dirpath, fn)
                new = os.path.join(dirpath, "x-" + fn)
                
                os.rename(old, new)


    if config.expansion:
        iface.info("Creating expansion file.")
        expansion_file = "main.{}.{}.obb".format(config.numeric_version, config.package)

        zf = zipfile.ZipFile(expansion_file, "w", zipfile.ZIP_STORED)
        zip_directory(zf, "assets")
        zf.close()

        # Delete and re-make the assets directory.
        shutil.rmtree("assets")
        os.mkdir("assets")
        
        # Write the file size into DownloaderActivity.
        file_size = os.path.getsize(expansion_file)
        
        edit_file("src/org/renpy/android/DownloaderActivity.java", 
            r'    private int fileVersion =', 
            '    private int fileVersion = {};'.format(config.numeric_version))

        edit_file("src/org/renpy/android/DownloaderActivity.java", 
            r'    private int fileSize =', 
            '    private int fileSize = {};'.format(file_size))
        
    else:
        expansion_file = None

    iface.info("Packaging internal data.")

    private_dirs = [ 'private' ]

    if private_dir is not None:
        private_dirs.append(private_dir)
        
    if os.path.exists("engine-private"):
        private_dirs.append("engine-private")

    make_tar("assets/private.mp3", private_dirs)
    
    if public_dir is not None:
        iface.info("Packaging external data.")
        make_tar("assets/public.mp3", [ public_dir ])

    # Copy over the icon and presplash files.
    shutil.copy(join_and_check(directory, "android-icon.png") or default_icon, "res/drawable/icon.png")
    shutil.copy(join_and_check(directory, "android-presplash.jpg") or default_presplash, "res/drawable/presplash.jpg")

    # Build.
    iface.info("I'm using Ant to build the package.")

    # Clean is required 
    try:   
        subprocess.check_call([plat.ant, "clean"] +  commands)
        iface.success("It looks like the build succeeded.")
    except:
        iface.fail("The build seems to have failed.")


    if (expansion_file is not None) and ("install" in commands):
        iface.info("Uploading expansion file.")
        
        dest = "/mnt/sdcard/{}".format(expansion_file)

        subprocess.check_call([ plat.adb, "push", expansion_file, dest ])
        
        iface.success("Uploaded the expansion file.")

    if expansion_file is not None:
        os.rename(expansion_file, "bin/" + expansion_file)

Example 2

Project: pagure
Source File: dev-data.py
View license
def insert_data(session, username, user_email):
    pagure.APP.config['EMAIL_SEND'] = False
    pagure.APP.config['TESTING'] = True

    ######################################
    # tags
    item = pagure.lib.model.Tag(
        tag='tag1',
    )
    session.add(item)
    session.commit()

    ######################################
    # Users
    # Create a couple of users
    item = pagure.lib.model.User(
        user='pingou',
        fullname='PY C',
        password='foo',
        default_email='[email protected]',
    )
    session.add(item)
    session.commit()

    item = pagure.lib.model.User(
        user='foo',
        fullname='foo bar',
        password='foo',
        default_email='[email protected]',
    )
    session.add(item)
    session.commit()

    item = pagure.lib.model.User(
        user=username,
        fullname=username,
        password='foo',
        default_email=user_email,
    )
    session.add(item)
    session.commit()

    ######################################
    # pagure_group
    item = pagure.lib.model.PagureGroup(
        group_name='admin',
        user_id=1,
        display_name='admin',
        description='Admin Group',
    )
    session.add(item)
    session.commit()

    # Add a couple of groups so that we can list them
    item = pagure.lib.model.PagureGroup(
        group_name='group',
        group_type='user',
        user_id=1,  # pingou
        display_name='group group',
        description='this is a group group',
    )
    session.add(item)
    session.commit()

    item = pagure.lib.model.PagureGroup(
        group_name='rel-eng',
        group_type='user',
        user_id=1,  # pingou
        display_name='Release Engineering',
        description='The group of release engineers',
    )
    session.add(item)
    session.commit()
    ######################################
    # projects

    import shutil
    # delete folder from local instance to start from a clean slate
    if os.path.exists(pagure.APP.config['GIT_FOLDER']):
        shutil.rmtree(pagure.APP.config['GIT_FOLDER'])

    tests.create_projects(session)
    tests.create_projects_git(pagure.APP.config['GIT_FOLDER'], bare=True)
    tests.add_content_git_repo(
        os.path.join(pagure.APP.config['GIT_FOLDER'], 'test.git'))
    tests.add_readme_git_repo(
        os.path.join(pagure.APP.config['GIT_FOLDER'], 'test.git'))

    # Add some content to the git repo
    tests.add_content_git_repo(
        os.path.join(pagure.APP.config['GIT_FOLDER'], 'forks', 'pingou',
                     'test.git'))
    tests.add_readme_git_repo(
        os.path.join(pagure.APP.config['GIT_FOLDER'], 'forks', 'pingou',
                     'test.git'))
    tests.add_commit_git_repo(
        os.path.join(pagure.APP.config['GIT_FOLDER'], 'forks', 'pingou',
                     'test.git'), ncommits=10)

    ######################################
    # user_emails
    item = pagure.lib.model.UserEmail(
        user_id=1,
        email='[email protected]')
    session.add(item)

    item = pagure.lib.model.UserEmail(
        user_id=1,
        email='[email protected]')
    session.add(item)

    item = pagure.lib.model.UserEmail(
        user_id=2,
        email='[email protected]')
    session.add(item)

    item = pagure.lib.model.UserEmail(
        user_id=3,
        email=user_email)
    session.add(item)

    session.commit()

    ######################################
    # user_emails_pending
    user = pagure.lib.search_user(session, username='pingou')
    email_pend = pagure.lib.model.UserEmailPending(
        user_id=user.id,
        email='[email protected]',
        token='abcdef',
    )
    session.add(email_pend)
    session.commit()

    ######################################
    # issues
    # Add an issue and tag it so that we can list them
    item = pagure.lib.model.Issue(
        id=1,
        uid='foobar',
        project_id=1,
        title='Problem with jenkins build',
        content='For some reason the tests fail at line:24',
        user_id=1,  # pingou
    )
    session.add(item)
    session.commit()

    item = pagure.lib.model.Issue(
        id=2,
        uid='foobar2',
        project_id=1,
        title='Unit tests failing',
        content='Need to fix code for the unit tests to '
                'pass so jenkins build can complete.',
        user_id=1,  # pingou
    )
    session.add(item)
    session.commit()

    user = pagure.lib.search_user(session, username=username)
    item = pagure.lib.model.Issue(
        id=3,
        uid='foobar3',
        project_id=1,
        title='Segfault during execution',
        content='Index out of bounds for variable i?',
        user_id=user.id,  # current user
    )
    session.add(item)
    session.commit()

    ######################################
    # pagure_user_group
    group = pagure.lib.search_groups(session, pattern=None,
                                     group_name="rel-eng", group_type=None)
    user = pagure.lib.search_user(session, username='pingou')
    item = pagure.lib.model.PagureUserGroup(
        user_id=user.id,
        group_id=group.id
    )
    session.add(item)
    session.commit()

    user = pagure.lib.search_user(session, username=username)
    group = pagure.lib.search_groups(session, pattern=None,
                                     group_name="admin", group_type=None)

    item = pagure.lib.model.PagureUserGroup(
        user_id=user.id,
        group_id=group.id
    )
    session.add(item)
    session.commit()

    user = pagure.lib.search_user(session, username='foo')
    group = pagure.lib.search_groups(session, pattern=None,
                                     group_name="group", group_type=None)

    item = pagure.lib.model.PagureUserGroup(
        user_id=user.id,
        group_id=group.id
    )
    session.add(item)
    session.commit()

    ######################################
    # projects_groups
    group = pagure.lib.search_groups(session, pattern=None,
                                     group_name="rel-eng", group_type=None)
    repo = pagure.lib.get_project(session, 'test')
    item = pagure.lib.model.ProjectGroup(
        project_id=repo.id,
        group_id=group.id
    )
    session.add(item)
    session.commit()

    group = pagure.lib.search_groups(session, pattern=None,
                                     group_name="admin", group_type=None)
    repo = pagure.lib.get_project(session, 'test2')
    item = pagure.lib.model.ProjectGroup(
        project_id=repo.id,
        group_id=group.id
    )
    session.add(item)
    session.commit()

    ######################################
    # pull_requests
    repo = pagure.lib.get_project(session, 'test')
    forked_repo = pagure.lib.get_project(session, 'test')
    req = pagure.lib.new_pull_request(
        session=session,
        repo_from=forked_repo,
        branch_from='master',
        repo_to=repo,
        branch_to='master',
        title='Fixing code for unittest',
        user=username,
        requestfolder=None,
    )
    session.commit()

    ######################################
    # tokens
    tests.create_tokens(session)

    ######################################
    # user_projects
    user = pagure.lib.search_user(session, username='foo')
    repo = pagure.lib.get_project(session, 'test')
    item = pagure.lib.model.ProjectUser(
        project_id=repo.id,
        user_id=user.id
    )
    session.add(item)
    session.commit()

    user = pagure.lib.search_user(session, username=username)
    repo = pagure.lib.get_project(session, 'test2')
    item = pagure.lib.model.ProjectUser(
        project_id=repo.id,
        user_id=user.id
    )
    session.add(item)
    session.commit()

    ######################################
    # issue_comments
    item = pagure.lib.model.IssueComment(
        user_id=1,
        issue_uid='foobar',
        comment='We may need to adjust the unittests instead of the code.',
    )
    session.add(item)
    session.commit()

    ######################################
    # issue_to_issue
    repo = pagure.lib.get_project(session, 'test')

    all_issues = pagure.lib.search_issues(session, repo)
    pagure.lib.add_issue_dependency(session, all_issues[0],
                                    all_issues[1], 'pingou',
                                    pagure.APP.config['GIT_FOLDER'])

    ######################################
    # pull_request_comments
    user = pagure.lib.search_user(session, username='pingou')
    # only 1 pull request available atm
    pr = pagure.lib.get_pull_request_of_user(session, "pingou")[0]
    item = pagure.lib.model.PullRequestComment(
        pull_request_uid=pr.uid,
        user_id=user.id,
        comment="+1 for me. Btw, could you rebase before you merge?",
        notification=0
    )
    session.add(item)
    session.commit()

    ######################################
    # pull_request_flags
    user = pagure.lib.search_user(session, username='pingou')
    # only 1 pull request available atm
    pr = pagure.lib.get_pull_request_of_user(session, "pingou")[0]
    item = pagure.lib.model.PullRequestFlag(
        uid="random_pr_flag_uid",
        pull_request_uid=pr.uid,
        user_id=user.id,
        username=user.user,
        percent=80,
        comment="Jenkins build passes",
        url=str(pr.id)
    )
    session.add(item)
    session.commit()

    ######################################
    # tags_issues
    repo = pagure.lib.get_project(session, 'test')
    issues = pagure.lib.search_issues(session, repo)
    item = pagure.lib.model.TagIssue(
        issue_uid=issues[0].uid,
        tag='Blocker',
    )
    session.add(item)
    session.commit()

    ######################################
    # tokens_acls
    tests.create_tokens_acl(session)

    ######################################
    # Fork a project
    # delete fork data
    fork_proj_location = "forks/foo/test.git"
    try:
        shutil.rmtree(os.path.join(pagure.APP.config['GIT_FOLDER'],
                                   fork_proj_location))
    except:
        print('git folder already deleted')

    try:
        shutil.rmtree(os.path.join(pagure.APP.config['DOCS_FOLDER'],
                                   fork_proj_location))
    except:
        print('docs folder already deleted')

    try:
        shutil.rmtree(os.path.join(pagure.APP.config['TICKETS_FOLDER'],
                                   fork_proj_location))
    except:
        print('tickets folder already deleted')

    try:
        shutil.rmtree(os.path.join(pagure.APP.config['REQUESTS_FOLDER'],
                                   fork_proj_location))
    except:
        print('requests folder already deleted')

    repo = pagure.lib.get_project(session, 'test')
    result = pagure.lib.fork_project(session, 'foo', repo,
                                     pagure.APP.config['GIT_FOLDER'],
                                     pagure.APP.config['DOCS_FOLDER'],
                                     pagure.APP.config['TICKETS_FOLDER'],
                                     pagure.APP.config['REQUESTS_FOLDER'])
    if result == 'Repo "test" cloned to "foo/test"':
        session.commit()

Example 3

Project: pyspace
Source File: trainer.py
View license
    def prepare_training(self, training_files, potentials, operation, nullmarker_stride_ms = None):
        """ Prepares pyspace live for training.

        Prepares everything for training of pyspace live,
        i.e. creates flows based on the dataflow specs
        and configures them.
        """
        online_logger.info( "Preparing Training")
        self.potentials = potentials
        self.operation = operation
        self.nullmarker_stride_ms = nullmarker_stride_ms
        if self.nullmarker_stride_ms == None:
            online_logger.warn( 'Nullmarker stride interval is %s. You can specify it in your parameter file.' % self.nullmarker_stride_ms)
        else:
            online_logger.info( 'Nullmarker stride interval is set to %s ms ' % self.nullmarker_stride_ms)

        online_logger.info( "Creating flows..")
        for key in self.potentials.keys():
            spec_base = self.potentials[key]["configuration"].spec_dir
            if self.operation == "train":
                self.potentials[key]["node_chain"] = os.path.join(spec_base, self.potentials[key]["node_chain"])
                online_logger.info( "node_chain_spec:" + self.potentials[key]["node_chain"])

            elif self.operation in ("prewindowing", "prewindowing_offline"):
                self.potentials[key]["prewindowing_flow"] = os.path.join(spec_base, self.potentials[key]["prewindowing_flow"])
                online_logger.info( "prewindowing_dataflow_spec: " + self.potentials[key]["prewindowing_flow"])

            elif self.operation == "prewindowed_train":
                self.potentials[key]["postprocess_flow"] = os.path.join(spec_base, self.potentials[key]["postprocess_flow"])
                online_logger.info( "postprocessing_dataflow_spec: " + self.potentials[key]["postprocess_flow"])

            self.training_active_potential[key] = multiprocessing.Value("b",False)

        online_logger.info("Path variables set for NodeChains")

        # check if multiple potentials are given for training
        if isinstance(training_files, list):
            self.training_data = training_files
        else:
            self.training_data = [training_files]

        # Training is done in separate processes, we send the time series
        # windows to these threads via two queues
        online_logger.info( "Initializing Queues")
        for key in self.potentials.keys():
            self.queue[key] = multiprocessing.Queue()


        def flow_generator(key):
            """create a generator to yield all the abri flow windows"""
            # Yield all windows until a None item is found in the queue
            while True:
                window = self.queue[key].get(block = True, timeout = None)
                if window == None: break
                yield window

        # Create the actual data flows
        for key in self.potentials.keys():

            if self.operation == "train":
                self.node_chains[key] = NodeChainFactory.flow_from_yaml(Flow_Class = NodeChain,
                                                         flow_spec = file(self.potentials[key]["node_chain"]))
                self.node_chains[key][0].set_generator(flow_generator(key))
                flow = open(self.potentials[key]["node_chain"])
            elif self.operation in ("prewindowing", "prewindowing_offline"):
                online_logger.info("loading prewindowing flow..")
                online_logger.info("file: " + str(self.potentials[key]["prewindowing_flow"]))

                self.node_chains[key] = NodeChainFactory.flow_from_yaml(Flow_Class = NodeChain,
                                                             flow_spec = file(self.potentials[key]["prewindowing_flow"]))
                self.node_chains[key][0].set_generator(flow_generator(key))
                flow = open(self.potentials[key]["prewindowing_flow"])
            elif self.operation == "prewindowed_train":
                self.node_chains[key] = NodeChainFactory.flow_from_yaml(Flow_Class = NodeChain, flow_spec = file(self.potentials[key]["postprocess_flow"]))
                replace_start_and_end_markers = False

                final_collection = TimeSeriesDataset()
                final_collection_path = os.path.join(self.prewindowed_data_directory, key, "all_train_data")
                # delete previous training collection
                if os.path.exists(final_collection_path):
                    online_logger.info("deleting old training data collection for " + key)
                    shutil.rmtree(final_collection_path)

                # load all prewindowed collections and
                # append data to the final collection
                prewindowed_sets = \
                    glob.glob(os.path.join(self.prewindowed_data_directory, key, "*"))
                if len(prewindowed_sets) == 0:
                    online_logger.error("Couldn't find data, please do prewindowing first!")
                    raise Exception
                online_logger.info("concatenating prewindowed data from " + str(prewindowed_sets))

                for s,d in enumerate(prewindowed_sets):
                    collection = BaseDataset.load(d)
                    data = collection.get_data(0, 0, "train")
                    for d,(sample,label) in enumerate(data):
                        if replace_start_and_end_markers:
                            # in case we concatenate multiple 'Window' labeled
                            # sets we have to remove every start- and endmarker
                            for k in sample.marker_name.keys():
                                # find '{S,s}  8' or '{S,s}  9'
                                m = re.match("^s\s{0,2}[8,9]{1}$", k, re.IGNORECASE)
                                if m is not None:
                                    online_logger.info(str("remove %s from %d %d" % (m.group(), s, d)))
                                    del(sample.marker_name[m.group()])

                            if s == len(prewindowed_sets)-1 and \
                                d == len(data)-1:
                                # insert endmarker
                                sample.marker_name["S  9"] = [0.0]
                                online_logger.info("added endmarker" + str(s) + " " + str(d))

                            if s == 0 and d == 0:
                                # insert startmarker
                                sample.marker_name["S  8"] = [0.0]
                                online_logger.info("added startmarker" + str(s) + " " + str(d))

                        final_collection.add_sample(sample, label, True)

                # save final collection (just for debugging)
                os.mkdir(final_collection_path)
                final_collection.store(final_collection_path)

                online_logger.info("stored final collection at " + final_collection_path)

                # load final collection again for training
                online_logger.info("loading data from " + final_collection_path)
                self.prewindowed_data[key] =  BaseDataset.load(final_collection_path)
                self.node_chains[key][0].set_input_dataset(self.prewindowed_data[key])

                flow = open(self.potentials[key]["postprocess_flow"])

            # create window_stream for every potential

            if self.operation in ("prewindowing"):
                window_spec_file = os.path.join(spec_base,"node_chains","windower",
                             self.potentials[key]["windower_spec_path_train"])

                self.window_stream[key] = \
                        self.stream_manager.request_window_stream(window_spec_file,
                                                              nullmarker_stride_ms = self.nullmarker_stride_ms)
            elif self.operation in ("prewindowing_offline"):
                pass
            elif self.operation in ("train"):
                pass

            self.node_chain_definitions[key] = yaml.load(flow)
            flow.close()

        # TODO: check if the prewindowing flow is still needed when using the stream mode!
        if self.operation in ("train"):
            online_logger.info( "Removing old flows...")
            try:
                shutil.rmtree(self.flow_storage)
            except:
                online_logger.info("Could not delete flow storage directory")
            os.mkdir(self.flow_storage)
        elif self.operation in ("prewindowing", "prewindowing_offline"):
            # follow this policy:
            # - delete prewindowed data older than 12 hours
            # - always delete trained/stored flows
            now = datetime.datetime.now()
            then = now - datetime.timedelta(hours=12)

            if not os.path.exists(self.prewindowed_data_directory):
                os.mkdir(self.prewindowed_data_directory)
            if not os.path.exists(self.flow_storage):
                os.mkdir(self.flow_storage)

            for key in self.potentials.keys():
                found = self.find_files_older_than(then, \
                        os.path.join(self.prewindowed_data_directory, key))
                if found is not None:
                    for f in found:
                        online_logger.info(str("recursively deleting files in \'%s\'" % f))
                        try:
                            shutil.rmtree(os.path.abspath(f))
                        except Exception as e:
                            # TODO: find a smart solution for this!
                            pass # dir was probably already deleted..

                if os.path.exists(os.path.join(self.prewindowed_data_directory, key, "all_train_data")):
                    shutil.rmtree(os.path.join(self.prewindowed_data_directory, key, "all_train_data"))
                    online_logger.info("deleted concatenated training data for " + key)


        online_logger.info( "Training preparations finished")
        return 0

Example 4

Project: bleachbit
Source File: TestWinapp.py
View license
    def test_fake(self):
        """Test with fake file"""

        # reuse this path to store a winapp2.ini file in
        (ini_h, self.ini_fn) = tempfile.mkstemp(
            suffix='.ini', prefix='winapp2')
        os.close(ini_h)

        # a set of tests
        # this map explains what each position in the test tuple means
        # 0=line to write directly to winapp2.ini
        # 1=filename1 to place in fake environment (default=deleteme.log)
        # 2=auto-hide before cleaning
        # 3=dirname exists after cleaning
        # 4=filename1 (.\deleteme.log) exists after cleaning
        # 5=sub\deleteme.log exists after cleaning
        # 6=.\deleteme.bak exists after cleaning
        # 7=auto-hide after cleaning
        tests = [
            # single file
            ('FileKey1=%s|deleteme.log', None,
                False, True, False, True, True, True),
            # single file, case matching should be insensitive
            ('FileKey1=%s|dEleteme.LOG', None,
                False, True, False, True, True, True),
            # special characters for XML
            ('FileKey1=%s|special_chars_&-\'.txt', 'special_chars_&-\'.txt',
             False, True, False, True, True, True),
            # *.log
            ('FileKey1=%s|*.LOG', None, False, True, False, True, True, True),
            # semicolon separates different file types
            ('FileKey1=%s|*.log;*.bak', None,
             False, True, False, True, False, True),
            # *.*
            ('FileKey1=%s|*.*', None, False, True, False, True, False, True),
            # recurse *.*
            ('FileKey1=%s|*.*|RECURSE', None, False,
             True, False, False, False, True),
            # recurse *.log
            ('FileKey1=%s|*.log|RECURSE', None, False,
             True, False, False, True, True),
            # remove self *.*, this removes the directory
            ('FileKey1=%s|*.*|REMOVESELF', None,
             False, False, False, False, False, True),
        ]

        # Add positive detection, where the detection believes the application is present,
        # to all the tests, which are also positive.
        new_tests = []
        for test in tests:
            for detect in (
                "\nDetectFile=%%APPDATA%%\\Microsoft",
                "\nDetectFile1=%%APPDATA%%\\Microsoft\nDetectFile2=%%APPDATA%%\\does_not_exist",
                "\nDetectFile1=%%APPDATA%%\\does_not_exist\nDetectFile2=%%APPDATA%%\\Microsoft",
                "\nDetect=HKCU\\Software\\Microsoft",
                "\nDetect1=HKCU\\Software\\Microsoft\nDetect2=HKCU\\Software\\does_not_exist",
                    "\nDetect1=HKCU\\Software\\does_not_exist\nDetect2=HKCU\\Software\\Microsoft"):
                new_ini = test[0] + detect
                new_test = [new_ini, ] + [x for x in test[1:]]
                new_tests.append(new_test)
        positive_tests = tests + new_tests

        # execute positive tests
        for test in positive_tests:
            print 'positive test: ', test
            (dirname, f1, f2, fbak) = self.setup_fake(test[1])
            cleaner = self.ini2cleaner(test[0] % dirname)
            self.assertEqual(test[2], cleaner.auto_hide())
            self.run_all(cleaner, False)
            self.run_all(cleaner, True)
            self.assertCondExists(test[3], dirname)
            self.assertCondExists(test[4], f1)
            self.assertCondExists(test[5], f2)
            self.assertCondExists(test[6], fbak)
            self.assertEqual(test[7], cleaner.auto_hide())
            shutil.rmtree(dirname, True)

        # negative tests where the application detect believes the application
        # is absent
        for test in tests:
            for detect in (
                "\nDetectFile=c:\\does_not_exist",
                # special characters for XML
                "\nDetectFile=c:\\does_not_exist_special_chars_&'",
                "\nDetectFile1=c:\\does_not_exist1\nDetectFile2=c:\\does_not_exist2",
                "\nDetect=HKCU\\Software\\does_not_exist",
                "\nDetect=HKCU\\Software\\does_not_exist_&'",
                    "\nDetect1=HKCU\\Software\\does_not_exist1\nDetect2=HKCU\\Software\\does_not_exist1"):
                new_ini = test[0] + detect
                t = [new_ini, ] + [x for x in test[1:]]
                print 'negative test', t
                # execute the test
                (dirname, f1, f2, fbak) = self.setup_fake()
                cleaner = self.ini2cleaner(t[0] % dirname, False)
                self.assertRaises(StopIteration, cleaner.next)
                shutil.rmtree(dirname, True)

        # registry key, basic
        (dirname, f1, f2, fbak) = self.setup_fake()
        cleaner = self.ini2cleaner('RegKey1=%s' % keyfull)
        self.run_all(cleaner, False)
        self.assertTrue(detect_registry_key(keyfull))
        self.run_all(cleaner, True)
        self.assertFalse(detect_registry_key(keyfull))
        shutil.rmtree(dirname, True)

        # check for parse error with ampersand
        (dirname, f1, f2, fbak) = self.setup_fake()
        cleaner = self.ini2cleaner(
            'RegKey1=HKCU\\Software\\PeanutButter&Jelly')
        self.run_all(cleaner, False)
        self.run_all(cleaner, True)
        shutil.rmtree(dirname, True)

Example 5

Project: cassandra-dtest
Source File: snapshot_test.py
View license
    def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False, archive_command='cp'):
        """
        Run archive commit log restoration test
        """

        cluster = self.cluster
        cluster.populate(1)
        (node1,) = cluster.nodelist()

        # Create a temp directory for storing commitlog archives:
        tmp_commitlog = safe_mkdtemp()
        debug("tmp_commitlog: " + tmp_commitlog)

        # Edit commitlog_archiving.properties and set an archive
        # command:
        replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                        [(r'^archive_command=.*$', 'archive_command={archive_command} %path {tmp_commitlog}/%name'.format(
                            tmp_commitlog=tmp_commitlog, archive_command=archive_command))])

        cluster.start()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)

        # Write until we get a new CL segment. This avoids replaying
        # initialization mutations from startup into system tables when
        # restoring snapshots. See CASSANDRA-11811.
        advance_to_next_cl_segment(
            session=session,
            commitlog_dir=os.path.join(node1.get_path(), 'commitlogs')
        )

        session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
        debug("Writing first 30,000 rows...")
        self.insert_rows(session, 0, 30000)
        # Record when this first set of inserts finished:
        insert_cutoff_times = [time.gmtime()]

        # Delete all commitlog backups so far:
        for f in glob.glob(tmp_commitlog + "/*"):
            debug('Removing {}'.format(f))
            os.remove(f)

        snapshot_dirs = self.make_snapshot(node1, 'ks', 'cf', 'basic')

        if self.cluster.version() >= '3.0':
            system_ks_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'keyspaces', 'keyspaces')
        else:
            system_ks_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_keyspaces', 'keyspaces')

        if self.cluster.version() >= '3.0':
            system_col_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'columns', 'columns')
        else:
            system_col_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columns', 'columns')

        if self.cluster.version() >= '3.0':
            system_ut_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'types', 'usertypes')
        else:
            system_ut_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_usertypes', 'usertypes')

        if self.cluster.version() >= '3.0':
            system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'tables', 'cfs')
        else:
            system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columnfamilies', 'cfs')

        try:
            # Write more data:
            debug("Writing second 30,000 rows...")
            self.insert_rows(session, 30000, 60000)
            node1.flush()
            time.sleep(10)
            # Record when this second set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            debug("Writing final 5,000 rows...")
            self.insert_rows(session, 60000, 65000)
            # Record when the third set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            # Flush so we get an accurate view of commitlogs
            node1.flush()

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            self.assertEqual(rows[0][0], 65000)

            # Check that there are at least one commit log backed up that
            # is not one of the active commit logs:
            commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
            debug("node1 commitlog dir: " + commitlog_dir)
            debug("node1 commitlog dir contents: " + str(os.listdir(commitlog_dir)))
            debug("tmp_commitlog contents: " + str(os.listdir(tmp_commitlog)))

            self.assertNotEqual(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)),
                                set())

            cluster.flush()
            cluster.compact()
            node1.drain()

            # Destroy the cluster
            cluster.stop()
            debug("node1 commitlog dir contents after stopping: " + str(os.listdir(commitlog_dir)))
            debug("tmp_commitlog contents after stopping: " + str(os.listdir(tmp_commitlog)))

            self.copy_logs(self.cluster, name=self.id().split(".")[0] + "_pre-restore")
            cleanup_cluster(self.cluster, self.test_path)
            self.test_path = get_test_path()
            cluster = self.cluster = create_ccm_cluster(self.test_path, name='test')
            cluster.populate(1)
            node1, = cluster.nodelist()

            # Restore schema from snapshots:
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ks_snapshot_dir, node1, 'system_schema', 'keyspaces', 'keyspaces')
                else:
                    self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces')
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_col_snapshot_dir, node1, 'system_schema', 'columns', 'columns')
                else:
                    self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns')
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ut_snapshot_dir, node1, 'system_schema', 'types', 'usertypes')
                else:
                    self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes')

            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system_schema', 'tables', 'cfs')
                else:
                    self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs')
            for snapshot_dir in snapshot_dirs:
                self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic')

            cluster.start(wait_for_binary_proto=True)

            session = self.patient_cql_connection(node1)
            node1.nodetool('refresh ks cf')

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            self.assertEqual(rows[0][0], 30000)

            # Edit commitlog_archiving.properties. Remove the archive
            # command  and set a restore command and restore_directories:
            if restore_archived_commitlog:
                replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                                [(r'^archive_command=.*$', 'archive_command='),
                                 (r'^restore_command=.*$', 'restore_command=cp -f %from %to'),
                                 (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format(
                                     tmp_commitlog=tmp_commitlog))])

                if restore_point_in_time:
                    restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1])
                    replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                                    [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(restore_time=restore_time))])

            debug("Restarting node1..")
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            node1.nodetool('flush')
            node1.nodetool('compact')

            session = self.patient_cql_connection(node1)
            rows = session.execute('SELECT count(*) from ks.cf')
            # Now we should have 30000 rows from the snapshot + 30000 rows
            # from the commitlog backups:
            if not restore_archived_commitlog:
                self.assertEqual(rows[0][0], 30000)
            elif restore_point_in_time:
                self.assertEqual(rows[0][0], 60000)
            else:
                self.assertEqual(rows[0][0], 65000)

        finally:
            # clean up
            debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
            for snapshot_dir in snapshot_dirs:
                shutil.rmtree(snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_ks_snapshot_dirs))
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                shutil.rmtree(system_ks_snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_cfs_snapshot_dirs))
            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                shutil.rmtree(system_cfs_snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_ut_snapshot_dirs))
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                shutil.rmtree(system_ut_snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_col_snapshot_dirs))
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                shutil.rmtree(system_col_snapshot_dir)

            debug("removing tmp_commitlog: " + tmp_commitlog)
            shutil.rmtree(tmp_commitlog)

Example 6

Project: pymt
Source File: build.py
View license
    def run(self):
        print "---------------------------------"
        print "Building PyMT Portable for OSX"
        print "---------------------------------"

        print "\nPreparing Build..."
        print "---------------------------------------"
        if os.path.exists(self.build_dir):
            print "*Cleaning old build dir"
            shutil.rmtree(self.build_dir, ignore_errors=True)
        print "*Creating build directory:"
        print " "+self.build_dir
        os.makedirs(self.build_dir)


        print "\nGetting binary dependencies..."
        print "---------------------------------------"
        print "*Downloading:", self.deps_url
        #report_hook is called every time a piece of teh file is downloaded to print progress
        def report_hook(block_count, block_size, total_size):
            p = block_count*block_size*100.0/total_size
            print "\b\b\b\b\b\b\b\b\b", "%06.2f"%p +"%",
        print " Progress: 000.00%",
        urlretrieve(self.deps_url, #location of binary dependencioes needed for portable pymt
                    os.path.join(self.build_dir,'deps.zip'), #tmp file to store teh archive
                    reporthook=report_hook)
        print " [Done]"


        print "*Extracting binary dependencies..."
        #using osx sysetm command, becasue python zipfile cant handle the hidden files in teh archive
        Popen(['unzip', os.path.join(self.build_dir,'deps.zip')], cwd=self.build_dir, stdout=PIPE).communicate()

        print "\nPutting pymt into portable environment"
        print "---------------------------------------"
        print "*Building pymt source distribution"
        sdist_cmd = [sys.executable, #path to python.exe
                     os.path.join(self.src_dir,'setup.py'), #path to setup.py
                     'sdist', #make setup.py create a src distribution
                     '--dist-dir=%s'%self.build_dir] #put it into build folder
        Popen(sdist_cmd, stdout=PIPE).communicate()


        print "*Placing pymt source distribution in portable context"
        src_dist = os.path.join(self.build_dir,self.dist_name)
        #using osx sysetm command, becasue python zipfile cant handle the hidden files in teh archive
        Popen(['tar', 'xfv', src_dist+'.tar.gz'], cwd=self.build_dir, stdout=PIPE, stderr=PIPE).communicate()
        if self.no_cext:
            print "*Skipping C Extension build (either --no_cext or --no_mingw option set)"
        else:
            print "*Compiling C Extensions inplace for portable distribution"
            cext_cmd = [sys.executable, #path to python.exe
                        'setup.py',
                        'build_ext', #make setup.py create a src distribution
                        '--inplace'] #do it inplace
            #this time it runs teh setup.py inside the source distribution
            #thats has been generated inside the build dir (to generate ext
            #for teh target, instead of the source were building from)
            Popen(cext_cmd, cwd=src_dist, stdout=PIPE, stderr=PIPE).communicate()



        print "\nFinalizing Application Bundle"
        print "---------------------------------------"
        print "*Copying launcher script into the app bundle"
        script_target = os.path.join(self.build_dir, 'portable-deps-osx', 'PyMT.app', 'Contents', 'Resources', 'script')
        script = os.path.join(src_dist,'pymt','tools','packaging','osx', 'pymt.sh')
        shutil.copy(script, script_target)

        print "*Moving examples out of app bundle to be included in disk image"
        examples_target = os.path.join(self.build_dir, 'portable-deps-osx', 'examples')
        examples = os.path.join(src_dist,'examples')
        shutil.move(examples, examples_target)

        print "*Moving newly build pymt distribution into app bundle"
        pymt_target = os.path.join(self.build_dir, 'portable-deps-osx', 'PyMT.app', 'Contents', 'Resources', 'pymt')
        shutil.move(src_dist, pymt_target)

        print "*Removing intermediate file"
        os.remove(os.path.join(self.build_dir,'deps.zip'))
        os.remove(os.path.join(self.build_dir,src_dist+'.tar.gz'))
        shutil.rmtree(os.path.join(self.build_dir,'__MACOSX'), ignore_errors=True)


        #contents of portable-deps-osx, are now ready to go into teh disk image
        dmg_dir = os.path.join(self.build_dir, 'portable-deps-osx')
        vol_name = "PyMT"

        print "\nCreating disk image for distribution"
        print "---------------------------------------"
        print "\nCreating intermediate DMG disk image: temp.dmg"
        print "*checking how much space is needed for disk image..."
        du_cmd = 'du -sh %s'%dmg_dir
        du_out = Popen(shlex.split(du_cmd), stdout=PIPE).communicate()[0]
        size, unit = re.search('(\d+)(.*)\s+/.*', du_out).group(1,2)
        print "  build needs at least %s%s." % (size, unit)

        size = int(size)+10
        print "*allocating %d%s for temp.dmg (volume name:%s)" % (size, unit, vol_name)
        create_dmg_cmd = 'hdiutil create -srcfolder %s -volname %s -fs HFS+ \
                         -fsargs "-c c=64,a=16,e=16" -format UDRW -size %d%s temp.dmg' \
                         % (dmg_dir, vol_name, size+10, unit)
        Popen(shlex.split(create_dmg_cmd), cwd=self.build_dir).communicate()

        print "*mounting intermediate disk image:"
        mount_cmd = 'hdiutil attach -readwrite -noverify -noautoopen "temp.dmg"'
        Popen(shlex.split(mount_cmd), cwd=self.build_dir, stdout=PIPE).communicate()

        print "*running Apple Script to configure DMG layout properties:"
        dmg_config_script = """
           tell application "Finder"
             tell disk "%s"
                   open

                   set current view of container window to icon view
                   set toolbar visible of container window to false
                   set statusbar visible of container window to false
                   set the bounds of container window to {300,100,942,582}
                   set theViewOptions to the icon view options of container window
                   set arrangement of theViewOptions to not arranged
                   set icon size of theViewOptions to 72
                   set background picture of theViewOptions to file ".background:pymtdmg.png"
                   make new alias file at container window to POSIX file "/Applications" with properties {name:"Applications"}
                   set position of item "PyMT" of container window to {150, 130}
                   set position of item "Applications" of container window to {500, 130}
                   set position of item "examples" of container window to {575, 400}
                   set position of item "Readme.txt" of container window to {475, 400}
                   set position of item "make-symlinks" of container window to {375, 400}
                   close
                   open
                   update without registering applications
                   delay 2
                   eject
             end tell
           end tell
        """ % vol_name
        print Popen(['osascript'], cwd=self.build_dir, stdin=PIPE, stdout=PIPE).communicate(dmg_config_script)[0]


        print "\nCreating final disk image"

        print "*unmounting intermediate disk image"
        umount_cmd = 'hdiutil detach /Volumes/%s' % vol_name
        Popen(shlex.split(umount_cmd), cwd=self.build_dir, stdout=PIPE).communicate()

        print "*compressing and finalizing disk image"
        convert_cmd = 'hdiutil convert "temp.dmg" -format UDZO -imagekey zlib-level=9 -o %s.dmg' % os.path.join(self.dist_dir,vol_name)
        Popen(shlex.split(convert_cmd), cwd=self.build_dir, stdout=PIPE).communicate()

        print "*Writing disk image, and cleaning build directory"
        shutil.rmtree(self.build_dir, ignore_errors=True)

Example 7

Project: rPGA
Source File: mapping.py
View license
def main(args):
  ## main function for mapping reads to personal genomes
  
  helpStr = "Help!\n"
  if not args.o:
    sys.stderr.write('rPGA2 ERROR: must provide output directory \n\n')
    sys.exit()
  if not args.s:
    sys.stderr.write('rPGA ERROR: must provide read sequence files \n\n')
    sys.exit()
#  if not args.g:
#    sys.stderr.write('rPGA ERROR: must provide gtf file\n')
  

  command = args.command
  if args.T:
    threads = int(args.T)
  else:
    threads = 8

  if args.N:
    mismatches = int(args.N)
  else:
    mismatches = 3

  if args.M:
    multimapped = int(args.M)
  else:
    multimapped = 20

  gzipped = args.gz
  outDir = args.o
  nmask = args.nmask
  if not os.path.exists(outDir):
    os.makedirs(outDir)
  ref = args.r
  vcf = args.v
  gtf = args.g
  hap = args.hap
  if args.r1:
    hap1Ref = args.r1
  else:
    hap1Ref = os.path.join(outDir, "hap1.fa")
  if args.r2:
    hap2Ref = args.r2
  else:
    hap2Ref = os.path.join(outDir, "hap2.fa")
  nRef = os.path.join(outDir, "mask.fa")  
  if args.readlength:
    readlength = int(args.readlength)-1
  else:
    readlength = 99
  if len(command)>1:
    sys.stderr.write(helpStr + "\n\n")
    sys.exit()

  seqs = ' '.join((args.s).split(','))
  if len((args.s).split(','))==0 or len((args.s).split(','))>2:
    sys.stderr.write("ERROR: Sequence parameter -s input is  not correct\n Example: rPGA run mappng alleles -s reads_1.fq,reads_2.fq -o rPGA\n")
    sys.exit()
  
  
  if nmask:
    if not os.path.exists(os.path.join(outDir,"MASK/STARindex")):
      os.makedirs(os.path.join(outDir, "MASK/STARindex"))
    if not os.path.exists(os.path.join(outDir,"MASK/STARalign")):
      os.makedirs(os.path.join(outDir, "MASK/STARalign"))
  ##create genome index
    STAR_create_genome(outDir, nRef, "MASK",threads,gtf,readlength)
    genomeDir = outDir + '/MASK/STARindex'
  # map reads
    STAR_perform_mapping(genomeDir,outDir, "MASK", seqs,threads,mismatches,gzipped,multimapped)   
  ## sort bam file
    sam_to_sorted_bam(os.path.join(outDir,'MASK/STARalign/Aligned.out'))
  ## remove unnecessary files
    os.remove(os.path.join(outDir,'MASK/STARalign/Aligned.out.bam'))
    os.remove(os.path.join(outDir,'MASK/STARalign/SJ.out.tab'))
    shutil.rmtree(os.path.join(outDir, "MASK/STARindex"))

  elif hap: # map to hap and hap2 personal genomes
    if not os.path.exists(os.path.join(outDir, "HAP1/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP1/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP2/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP1/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP1/STARalign"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP2/STARalign"))
    if not args.genomedir:
      if not args.g:
        sys.stderr.write('rPGA ERROR: must provide gtf file\n') 
        sys.exit()
      STAR_create_genome(outDir, hap1Ref, "HAP1",threads,gtf,readlength)
      STAR_create_genome(outDir, hap2Ref, "HAP2",threads,gtf,readlength)
      genomeDir1 = os.path.join(outDir, 'HAP1/STARindex')
      genomeDir2 = os.path.join(outDir, 'HAP2/STARindex')
    else:
      genomeDir1, genomeDir2 = (args.genomedir).split(',')
    STAR_perform_mapping(genomeDir1, outDir, "HAP1", seqs,threads,mismatches,gzipped,multimapped)
    STAR_perform_mapping(genomeDir2, outDir, "HAP2", seqs,threads,mismatches,gzipped,multimapped)
    sam_to_sorted_bam(os.path.join(outDir,'HAP1/STARalign/Aligned.out'))
    sam_to_sorted_bam(os.path.join(outDir,'HAP2/STARalign/Aligned.out'))
    os.remove(os.path.join(outDir,'HAP1/STARalign/Aligned.out.bam'))
    os.remove(os.path.join(outDir,'HAP2/STARalign/Aligned.out.bam'))
    os.remove(os.path.join(outDir,'HAP1/STARalign/SJ.out.tab'))
    os.remove(os.path.join(outDir,'HAP2/STARalign/SJ.out.tab'))
    shutil.rmtree(os.path.join(outDir, "HAP1/STARindex"))
    shutil.rmtree(os.path.join(outDir, "HAP2/STARindex"))
  else:
    if not args.r:
      sys.stderr.write("ERROR: rPGA run mapping command requires -r parameter \nExample: rPGA run mapping -r reference.fa -s reads_1.fq,reads_.fq -o rPGA \n")
      sys.exit()
    
    if not os.path.exists(os.path.join(outDir, "HAP1/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP1/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP2/STARindex"))
    if not os.path.exists(os.path.join(outDir, "REF/STARindex")):
      os.makedirs(os.path.join(outDir, "REF/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP1/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP1/STARalign"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP2/STARalign"))
    if not os.path.exists(os.path.join(outDir, "REF/STARalign")):
      os.makedirs(os.path.join(outDir, "REF/STARalign"))

    print "creating STAR genome indicies"
    if not args.genomedir:
      STAR_create_genome(outDir, ref, "REF",threads,gtf,readlength)
      STAR_create_genome(outDir, hap1Ref, "HAP1",threads,gtf,readlength)
      STAR_create_genome(outDir, hap2Ref, "HAP2",threads,gtf,readlength)

    print "perform STAR mapping"
    if args.genomedir:
      genomeDir1, genomeDir2, genomeDirR = (args.genomedir).split(',')
    else:
      genomeDir1 = os.path.join(outDir, 'HAP1/STARindex')
      genomeDir2 = os.path.join(outDir, 'HAP2/STARindex')
      genomeDirR = os.path.join(outDir, 'REF/STARindex')
    if not args.g:
      sys.stderr.write('rPGA ERROR: must provide gtf file\n')
      sys.exit()
    STAR_create_genome(outDir, hap1Ref, "HAP1",threads,gtf,readlength)
    STAR_create_genome(outDir, hap2Ref, "HAP2",threads,gtf,readlength)
    STAR_create_genome(outDir, ref, "REF",threads,gtf,readlength)
    STAR_perform_mapping(genomeDir1, outDir, "HAP1", seqs,threads,mismatches,gzipped,multimapped)
    STAR_perform_mapping(genomeDir2, outDir, "HAP2", seqs,threads,mismatches,gzipped,multimapped)
    STAR_perform_mapping(genomeDirR, outDir, "REF", seqs,threads,mismatches,gzipped,multimapped)
    sam_to_sorted_bam(os.path.join(outDir,'HAP1/STARalign/Aligned.out'))
    sam_to_sorted_bam(os.path.join(outDir,'HAP2/STARalign/Aligned.out'))
    sam_to_sorted_bam(os.path.join(outDir,'REF/STARalign/Aligned.out'))

Example 8

Project: zorna
Source File: views.py
View license
@login_required()
def edit_story(request, story):
    allowed_objects = get_allowed_objects(
            request.user, ArticleCategory, 'manager')
    try:
        story = ArticleStory.objects.select_related().get(pk=story)
        categories = story.categories.all()
        intersect = set(allowed_objects).intersection( set([category.pk for category in categories]))
        if story.owner != request.user and not intersect:
            return HttpResponseRedirect('/')
    except Exception as e:
        print e
        return HttpResponseRedirect('/')

    attachments = story.articleattachments_set.all()
    if request.method == 'POST':
        if 'bdelstory' in request.POST:
            story.articleattachments_set.all().delete()
            pk = story.pk
            story.delete()
            try:
                shutil.rmtree(u"%s/%s" % (get_upload_articles_images(), pk))
            except:
                pass
            try:
                shutil.rmtree(u"%s/%s" % (get_upload_articles_files(), pk))
            except:
                pass
            return HttpResponseRedirect(reverse('writer_stories_list', args=[]))

        form_story = ArticleStoryForm(
            request.POST, request.FILES, instance=story, request=request)
        if form_story.is_valid():
            if 'selected_image' in request.POST:
                story.image.delete()
                try:
                    shutil.rmtree(u"%s/%s" % (get_upload_articles_images(), story.pk))
                except:
                    pass
            if 'image' in request.FILES:
                story.mimetype = request.FILES['image'].content_type
            else:
                image_file = None

            story.modifier = request.user
            story.save()

            story.categories.clear()
            selected_categories = request.POST.getlist('_selected_action')
            story.categories = selected_categories

        form_story = ArticleStoryForm(instance=story, request=request)

        if len(attachments) < 2:
            fa_set = formset_factory(
                ArticleAttachmentsForm, extra=2 - len(attachments))
            form_attachments_set = fa_set(request.POST, request.FILES)
            if form_attachments_set.is_valid():
                for i in range(0, form_attachments_set.total_form_count()):
                    form = form_attachments_set.forms[i]
                    try:
                        file = request.FILES['form-' + str(
                            i) + '-attached_file']
                        attachment = ArticleAttachments(description=form.cleaned_data[
                                                        'description'], mimetype=file.content_type)
                        attachment.article = story
                        attachment.save()
                        attachment.attached_file.save(file.name, file)
                    except:
                        pass

        if 'selected_attachments' in request.POST:
            att = request.POST.getlist('selected_attachments')
            ArticleAttachments.objects.filter(pk__in=att).delete()
        attachments = story.articleattachments_set.all()
        extra = len(attachments)
        if extra < 2:
            fa_set = formset_factory(ArticleAttachmentsForm, extra=2 - extra)
            form_attachments_set = fa_set()
        else:
            form_attachments_set = None

        tags = map(int, request.POST.getlist('article_tags[]'))
        story.tags.clear()
        tags = ArticleTags.objects.filter(pk__in=tags)
        story.tags.add(*tags)
        if story.categories:
            notify_users(request, story, story.categories.all(), False)

    else:
        form_story = ArticleStoryForm(instance=story, request=request)
        extra = len(attachments)
        if extra < 2:
            fa_set = formset_factory(ArticleAttachmentsForm, extra=2 - extra)
            form_attachments_set = fa_set()
        else:
            form_attachments_set = None

    tags = ArticleTags.objects.all()
    story_tags = story.tags.all()
    for tag in tags:
        if tag in story_tags:
            tag.checked = True
    context = RequestContext(request)
    extra_context = {'form_story': form_story,
                     'story': story,
                     'tags': tags,
                     'form_attachments': form_attachments_set,
                     'attachments': attachments,
                     'categories': [c.pk for c in categories],
                     }
    return render_to_response('articles/edit_article.html', extra_context, context_instance=context)

Example 9

Project: jasy
Source File: Git.py
View license
def update(url, version, path, update=True, submodules=True):
    """Clones the given repository URL (optionally with overriding/update features)"""

    # Prepend git+ so that user knows that we identified the URL as git repository
    if not url.startswith("git+"):
        url = "git+%s" % url

    old = os.getcwd()

    if os.path.exists(path) and os.path.exists(os.path.join(path, ".git")):
        
        if not os.path.exists(os.path.join(path, ".git", "HEAD")):
            Console.error("Invalid git clone. Cleaning up...")
            shutil.rmtree(path)

        else:
            os.chdir(path)
            revision = executeCommand(["git", "rev-parse", "HEAD"], "Could not detect current revision")
            
            if update and (version == "master" or "refs/heads/" in version):
                if update:
                    Console.info("Updating %s", Console.colorize("%s @ " % url, "bold") + Console.colorize(version, "magenta"))
                    Console.indent()
                    
                    try:
                        executeCommand(["git", "fetch", "-q", "--depth", "1", "origin", version], "Could not fetch updated revision!")
                        executeCommand(["git", "reset", "-q", "--hard", "FETCH_HEAD"], "Could not update checkout!")
                        newRevision = executeCommand(["git", "rev-parse", "HEAD"], "Could not detect current revision")
                        
                        if revision != newRevision:
                            Console.info("Updated from %s to %s", revision[:10], newRevision[:10])
                            revision = newRevision

                            if submodules and os.path.exists(".gitmodules"):
                                Console.info("Updating sub modules (this might take some time)...")
                                executeCommand("git submodule update --recursive", "Could not initialize sub modules")

                    except Exception:
                        Console.error("Error during git transaction! Could not update clone.")
                        Console.error("Please verify that the host is reachable or disable automatic branch updates.")
                        Console.outdent()

                        os.chdir(old)
                        return
                        
                    except KeyboardInterrupt:
                        print()
                        Console.error("Git transaction was aborted by user!")
                        Console.outdent()
                        
                        os.chdir(old)
                        return                            

                    Console.outdent()
                    
                else:
                    Console.debug("Updates disabled")
                
            else:
                Console.debug("Using existing clone")

            os.chdir(old)
            return revision

    Console.info("Cloning %s", Console.colorize("%s @ " % url, "bold") + Console.colorize(version, "magenta"))
    Console.indent()

    os.makedirs(path)
    os.chdir(path)
    
    try:
        # cut of "git+" prefix
        remoteurl = url[4:]

        executeCommand(["git", "init", "."], "Could not initialize GIT repository!")
        executeCommand(["git", "remote", "add", "origin", remoteurl], "Could not register remote repository!")
        executeCommand(["git", "fetch", "-q", "--depth", "1", "origin", version], "Could not fetch revision!")
        executeCommand(["git", "reset", "-q", "--hard", "FETCH_HEAD"], "Could not update checkout!")
        revision = executeCommand(["git", "rev-parse", "HEAD"], "Could not detect current revision")

        if submodules and os.path.exists(".gitmodules"):
            Console.info("Updating sub modules (this might take some time)...")
            executeCommand("git submodule update --init --recursive", "Could not initialize sub modules")
        
    except Exception:
        Console.error("Error during git transaction! Intitial clone required for continuing!")
        Console.error("Please verify that the host is reachable.")

        Console.error("Cleaning up...")
        os.chdir(old)
        shutil.rmtree(path)

        Console.outdent()
        return
        
    except KeyboardInterrupt:
        print()
        Console.error("Git transaction was aborted by user!")
        
        Console.error("Cleaning up...")
        os.chdir(old)
        shutil.rmtree(path)

        Console.outdent()
        return
    
    os.chdir(old)
    Console.outdent()

    return revision

Example 10

Project: faf
Source File: core.py
View license
    def retrace(self, db, task):
        new_symbols = {}
        new_symbolsources = {}

        for bin_pkg, db_ssources in task.binary_packages.items():
            self.log_info("Retracing symbols from package {0}"
                          .format(bin_pkg.nvra))

            i = 0
            for db_ssource in db_ssources:
                i += 1

                self.log_debug("[{0} / {1}] Processing '{2}' @ '{3}'"
                               .format(i, len(db_ssources),
                                       ssource2funcname(db_ssource),
                                       db_ssource.path))

                norm_path = get_libname(db_ssource.path)
                binary = os.path.join(bin_pkg.unpacked_path,
                                      db_ssource.path[1:])

                try:
                    address = get_base_address(binary) + db_ssource.offset
                except FafError as ex:
                    self.log_debug("get_base_address failed: {0}"
                                   .format(str(ex)))
                    db_ssource.retrace_fail_count += 1
                    continue

                try:
                    debug_path = os.path.join(task.debuginfo.unpacked_path,
                                              "usr", "lib", "debug")
                    results = addr2line(binary, address, debug_path)
                    results.reverse()
                except Exception as ex:
                    self.log_debug("addr2line failed: {0}".format(str(ex)))
                    db_ssource.retrace_fail_count += 1
                    continue

                inl_id = 0
                while len(results) > 1:
                    inl_id += 1

                    funcname, srcfile, srcline = results.pop()
                    self.log_debug("Unwinding inlined function '{0}'"
                                   .format(funcname))
                    # hack - we have no offset for inlined symbols
                    # let's use minus source line to avoid collisions
                    offset = -srcline

                    db_ssource_inl = get_ssource_by_bpo(db, db_ssource.build_id,
                                                        db_ssource.path, offset)
                    if db_ssource_inl is None:
                        key = (db_ssource.build_id, db_ssource.path, offset)
                        if key in new_symbolsources:
                            db_ssource_inl = new_symbolsources[key]
                        else:
                            db_symbol_inl = get_symbol_by_name_path(db,
                                                                    funcname,
                                                                    norm_path)
                            if db_symbol_inl is None:
                                sym_key = (funcname, norm_path)
                                if sym_key in new_symbols:
                                    db_symbol_inl = new_symbols[sym_key]
                                else:
                                    db_symbol_inl = Symbol()
                                    db_symbol_inl.name = funcname
                                    db_symbol_inl.normalized_path = norm_path
                                    db.session.add(db_symbol_inl)
                                    new_symbols[sym_key] = db_symbol_inl

                            db_ssource_inl = SymbolSource()
                            db_ssource_inl.symbol = db_symbol_inl
                            db_ssource_inl.build_id = db_ssource.build_id
                            db_ssource_inl.path = db_ssource.path
                            db_ssource_inl.offset = offset
                            db_ssource_inl.source_path = srcfile
                            db_ssource_inl.line_number = srcline
                            db.session.add(db_ssource_inl)
                            new_symbolsources[key] = db_ssource_inl

                    for db_frame in db_ssource.frames:
                        db_frames = sorted(db_frame.thread.frames,
                                           key=lambda f: f.order)
                        idx = db_frames.index(db_frame)
                        if idx > 0:
                            prevframe = db_frame.thread.frames[idx - 1]
                            if (prevframe.inlined and
                                    prevframe.symbolsource == db_ssource_inl):

                                continue

                        db_newframe = ReportBtFrame()
                        db_newframe.symbolsource = db_ssource_inl
                        db_newframe.thread = db_frame.thread
                        db_newframe.inlined = True
                        db_newframe.order = db_frame.order - inl_id
                        db.session.add(db_newframe)

                funcname, srcfile, srcline = results.pop()
                self.log_debug("Result: {0}".format(funcname))
                db_symbol = get_symbol_by_name_path(db, funcname, norm_path)
                if db_symbol is None:
                    key = (funcname, norm_path)
                    if key in new_symbols:
                        db_symbol = new_symbols[key]
                    else:
                        self.log_debug("Creating new symbol '{0}' @ '{1}'"
                                       .format(funcname, db_ssource.path))
                        db_symbol = Symbol()
                        db_symbol.name = funcname
                        db_symbol.normalized_path = norm_path
                        db.session.add(db_symbol)

                        new_symbols[key] = db_symbol

                if db_symbol.nice_name is None:
                    db_symbol.nice_name = demangle(funcname)

                db_ssource.symbol = db_symbol
                db_ssource.source_path = srcfile
                db_ssource.line_number = srcline

        if task.debuginfo.unpacked_path is not None:
            self.log_debug("Removing {0}".format(task.debuginfo.unpacked_path))
            shutil.rmtree(task.debuginfo.unpacked_path, ignore_errors=True)

        if task.source is not None and task.source.unpacked_path is not None:
            self.log_debug("Removing {0}".format(task.source.unpacked_path))
            shutil.rmtree(task.source.unpacked_path, ignore_errors=True)

        for bin_pkg in task.binary_packages.keys():
            if bin_pkg.unpacked_path is not None:
                self.log_debug("Removing {0}".format(bin_pkg.unpacked_path))
                shutil.rmtree(bin_pkg.unpacked_path, ignore_errors=True)

Example 11

Project: vent
Source File: plugin_parser.py
View license
def add_plugins(path_dirs, plugin_url, user=None, pw=None):
    try:
        if not ".git" in plugin_url:
            plugin_url = plugin_url + ".git"
        plugin_name = plugin_url.split("/")[-1].split(".git")[0]
        if plugin_name == "":
            print("No plugins added, url is not formatted correctly")
            print("Please use a git url, e.g. https://github.com/CyberReboot/vent-plugins.git")
            return
        # check to see if plugin already exists in filesystem
        if os.path.isdir(path_dirs.plugin_repos+"/"+plugin_name):
            print(plugin_name+" already exists. Not installing.")
            return
        os.system("git config --global http.sslVerify false")
        if not user and not pw:
            os.system("cd "+path_dirs.plugin_repos+"/ && git clone --recursive "+plugin_url)
        else:
            new_plugin_url = plugin_url.split("https://")[-1]
            os.system("cd "+path_dirs.plugin_repos+"/ && git clone --recursive https://"+user+":"+pw+"@"+new_plugin_url)
        # check to see if repo was cloned correctly
        if not os.path.isdir(path_dirs.plugin_repos+"/"+plugin_name):
            print(plugin_name+" did not install. Is this a git repository?")
            return

        subdirs = [x[0] for x in os.walk(path_dirs.plugin_repos+"/"+plugin_name)]
        check_modes = True
        for subdir in subdirs:
            try:
                if subdir.startswith(path_dirs.plugin_repos+"/"+plugin_name+"/collectors/"):
                    recdir = subdir.split(path_dirs.plugin_repos+"/"+plugin_name+"/collectors/")[-1]
                    # only go one level deep, and copy recursively below that
                    if not "/" in recdir:
                        dest = path_dirs.collectors_dir+"/"+recdir
                        if os.path.exists(dest):
                            shutil.rmtree(dest)
                        shutil.copytree(subdir, dest)
                elif subdir.startswith(path_dirs.plugin_repos+"/"+plugin_name+"/plugins/"):
                    recdir = subdir.split(path_dirs.plugin_repos+"/"+plugin_name+"/plugins/")[-1]
                    # only go one level deep, and copy recursively below that
                    if not "/" in recdir:
                        dest = path_dirs.plugins_dir+"/"+recdir
                        if os.path.exists(dest):
                            shutil.rmtree(dest)
                        shutil.copytree(subdir, dest)
                    else:
                        # makes sure that every namespace has a corresponding template file
                        namespace = recdir.split("/")[0]
                        if not os.path.isfile(path_dirs.plugin_repos+"/"+plugin_name+"/templates/"+namespace+".template"):
                            print("Warning! Plugin namespace has no template. Not installing "+namespace)
                            shutil.rmtree(path_dirs.plugins_dir+namespace)
                elif subdir.startswith(path_dirs.plugin_repos+"/"+plugin_name+"/visualization/"):
                    recdir = subdir.split(path_dirs.plugin_repos+"/"+plugin_name+"/visualization/")[-1]
                    # only go one level deep, and copy recursively below that
                    if not "/" in recdir:
                        dest = path_dirs.vis_dir+"/"+recdir
                        if os.path.exists(dest):
                            shutil.rmtree(dest)
                        shutil.copytree(subdir, dest)
                # elif subdir == path_dirs.plugin_repos+"/"+plugin_name+"/visualization":
                #     # only files, not dirs
                #     dest = path_dirs.vis_dir+"/"
                #     for (dirpath, dirnames, filenames) in os.walk(subdir):
                #         for filename in filenames:
                #             shutil.copyfile(subdir+"/"+filename, dest+filename)
                elif subdir == path_dirs.plugin_repos+"/"+plugin_name+"/templates":
                    # only files, not dirs
                    dest = path_dirs.template_dir
                    for (dirpath, dirnames, filenames) in os.walk(subdir):
                        for filename in filenames:
                            if filename == "modes.template":
                                check_modes = False
                                shutil.copyfile(subdir+"/"+filename, dest+filename)
                            elif filename == "collectors.template":
                                shutil.copyfile(subdir+"/"+filename, dest+filename)
                            elif filename == "visualization.template":
                                shutil.copyfile(subdir+"/"+filename, dest+filename)
                            elif filename == "core.template":
                                read_config = ConfigParser.RawConfigParser()
                                # needed to preserve case sensitive options
                                read_config.optionxform=str
                                read_config.read(path_dirs.template_dir + 'core.template')
                                write_config = ConfigParser.RawConfigParser()
                                # needed to preserve case sensitive options
                                write_config.optionxform=str
                                write_config.read(subdir+"/"+filename)
                                write_sections = write_config.sections()
                                for section in write_sections:
                                    read_config.remove_section(section)
                                    read_config.add_section(section)
                                    recdir = path_dirs.plugin_repos+"/"+plugin_name+"/core/"+section
                                    dest1 = path_dirs.core_dir+"/"+section
                                    if os.path.exists(dest1):
                                        shutil.rmtree(dest1)
                                    shutil.copytree(recdir, dest1)
                                with open(path_dirs.template_dir + 'core.template', 'w') as configfile:
                                    read_config.write(configfile)
                            else:
                                # makes sure that every template file has a corresponding namespace in filesystem
                                namespace = filename.split(".")[0]
                                if os.path.isdir(path_dirs.plugin_repos+"/"+plugin_name+"/plugins/"+namespace):
                                    shutil.copyfile(subdir+"/"+filename, dest+filename)
                                else:
                                    print("Warning! Plugin template with no corresponding plugins to install. Not installing "+namespace+".template")
                                    os.remove(path_dirs.plugin_repos+"/"+plugin_name+"/templates/"+filename)
            except Exception as e:
                pass
        # update modes.template if it wasn't copied up to include new plugins
        if check_modes:
            files = [x[2] for x in os.walk(path_dirs.base_dir + "templates")][0]
            config = ConfigParser.RawConfigParser()
            # needed to preserve case sensitive options
            config.optionxform=str
            config.read(path_dirs.template_dir + 'modes.template')
            plugin_array = config.options("plugins")
            plugins = {}
            for f in files:
                f_name = f.split(".template")[0]
                if f_name != "README.md" and not f_name in plugin_array and f_name != "modes" and (os.path.isdir(path_dirs.plugins_dir+f_name) or os.path.isdir(path_dirs.base_dir+f_name)):
                    config.set("plugins", f_name, "all")
            with open(path_dirs.template_dir + 'modes.template', 'w') as configfile:
                config.write(configfile)
        # check if files copied over correctly
        for subdir in subdirs:
            if os.path.isdir(subdir):
                directory = subdir.split(path_dirs.plugin_repos+"/"+plugin_name+"/")[0]
                if subdir == path_dirs.plugin_repos+"/"+plugin_name:
                    continue
                if not os.path.isdir(path_dirs.base_dir + directory):
                    print("Failed to install "+plugin_name+" resource: "+directory)
                    os.system("sudo rm -rf "+path_dirs.plugin_repos+"/"+plugin_name)
                    return
        # resources installed correctly. Building...
        os.system("/bin/sh "+path_dirs.scripts_dir+"build_images.sh --basedir "+path_dirs.base_dir[:-1])
    except Exception as e:
        pass

Example 12

Project: cstar_perf
Source File: client.py
View license
    def perform_job(self, job):
        """Perform a job the server gave us, stream output and artifacts to the given websocket."""
        job = copy.deepcopy(job['test_definition'])
        # Cleanup the job structure according to what stress_compare needs:
        for operation in job['operations']:
            operation['type'] = operation['operation']
            del operation['operation']

        job_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf','jobs',job['test_id'])
        mkpath(job_dir)
        stats_path = os.path.join(job_dir,'stats.{test_id}.json'.format(test_id=job['test_id']))
        summary_path = os.path.join(job_dir,'stats_summary.{test_id}.json'.format(test_id=job['test_id']))
        stress_log_path = os.path.join(job_dir,'stress_compare.{test_id}.log'.format(test_id=job['test_id']))

        stress_json = json.dumps(dict(revisions=job['revisions'],
                                      operations=job['operations'],
                                      title=job['title'],
                                      leave_data=job.get('leave_data', False),
                                      log=stats_path))

        # Create a temporary location to store the stress_compare json file:
        stress_json_path = os.path.join(job_dir, 'test.{test_id}.json'.format(test_id=job['test_id']))
        with open(stress_json_path, 'w') as f:
            f.write(stress_json)

        # Inform the server we will be streaming the console output to them:
        command = Command.new(self.__ws_client.socket(), action='stream', test_id=job['test_id'],
                              kind='console', name="stress_compare.{test_id}.log".format(test_id=job['test_id']),
                              eof=EOF_MARKER, keepalive=KEEPALIVE_MARKER)
        response = self.__ws_client.send(command, assertions={'message':'ready'})

        # Start a status checking thread.
        # If a user cancel's the job after it's marked in_progress, we
        # need to periodically check for that state change and kill
        # our test:
        cancel_checker = JobCancellationTracker(urlparse.urlparse(self.ws_endpoint).netloc, job['test_id'])
        cancel_checker.start()

        # stats file observer
        # looks for changes to update server with status progress message
        observer = Observer()
        observer.schedule(UpdateServerProgressMessageHandler(job, urlparse.urlparse(self.ws_endpoint).netloc),
                          os.path.join(os.path.expanduser("~"), '.cstar_perf', 'jobs'),
                          recursive=True)
        observer.start()

        # Run stress_compare in a separate process, collecting the
        # output as an artifact:
        try:
            # Run stress_compare with pexpect. subprocess.Popen didn't
            # work due to some kind of tty issue when invoking
            # nodetool.
            stress_proc = pexpect.spawn('cstar_perf_stress {stress_json_path}'.format(stress_json_path=stress_json_path), timeout=None)
            with open(stress_log_path, 'w') as stress_log:
                while True:
                    try:
                        with timeout(25):
                            line = stress_proc.readline()
                            if line == '':
                                break
                            stress_log.write(line)
                            sys.stdout.write(line)
                            self.__ws_client.send(base64.b64encode(line))
                    except TimeoutError:
                        self.__ws_client.send(base64.b64encode(KEEPALIVE_MARKER))
        finally:
            cancel_checker.stop()
            observer.stop()
            self.__ws_client.send(base64.b64encode(EOF_MARKER))

        response = self.__ws_client.receive(response, assertions={'message': 'stream_received', 'done': True})

        # Find the log tarball for each revision by introspecting the stats json:
        system_logs = []
        flamegraph_logs = []
        yourkit_logs = []
        log_dir = CSTAR_PERF_LOGS_DIR
        flamegraph_dir = os.path.join(os.path.expanduser("~"), '.cstar_perf', 'flamegraph')
        yourkit_dir = os.path.join(os.path.expanduser("~"), '.cstar_perf', 'yourkit')
        #Create a stats summary file without voluminous interval data
        if os.path.isfile(stats_path):
            with open(stats_path) as stats:
                stats = json.loads(stats.read())
                for rev in stats['revisions']:
                    last_log_rev_id = rev.get('last_log')
                    if last_log_rev_id:
                        system_logs.append(os.path.join(log_dir, "{name}.tar.gz".format(name=last_log_rev_id)))
                        fg_path = os.path.join(flamegraph_dir, "{name}.tar.gz".format(name=last_log_rev_id))
                        yourkit_path = os.path.join(yourkit_dir, "{name}.tar.gz".format(name=last_log_rev_id))
                        if os.path.exists(fg_path):
                            flamegraph_logs.append(fg_path)
                        if os.path.exists(yourkit_path):
                            yourkit_logs.append(yourkit_path)
                with open(summary_path, 'w') as summary:
                    hadStats = False
                    for op in stats['stats']:
                        if op['type'] == 'stress':
                            try:
                                del op['intervals']
                                hadStats = True
                            except KeyError:
                                pass
                        try:
                            del op['output']
                        except KeyError:
                            pass
                    if hadStats:
                        json.dump(obj=stats, fp=summary, sort_keys=True, indent=4, separators=(',', ': '))

        # Make a new tarball containing all the revision logs:
        tmptardir = tempfile.mkdtemp()
        try:
            startup_log_tarball = self._maybe_get_startup_log_tarball(job['test_id'], log_dir)
            if startup_log_tarball:
                system_logs.append(startup_log_tarball)
            job_log_dir = os.path.join(tmptardir, 'cassandra_logs.{test_id}'.format(test_id=job['test_id']))
            os.mkdir(job_log_dir)
            for x, syslog in enumerate(system_logs, 1):
                with tarfile.open(syslog) as tar:
                    tar.extractall(job_log_dir)
                    os.rename(os.path.join(job_log_dir, tar.getnames()[0]), os.path.join(job_log_dir, 'revision_{x:02d}'.format(x=x)))
            system_logs_path = os.path.join(job_dir, 'cassandra_logs.{test_id}.tar.gz'.format(test_id=job['test_id']))
            with tarfile.open(system_logs_path, 'w:gz') as tar:
                with cd(tmptardir):
                    tar.add('cassandra_logs.{test_id}'.format(test_id=job['test_id']))
            assert os.path.exists(system_logs_path)
        finally:
            shutil.rmtree(tmptardir)

        # Make a new tarball containing all the flamegraph and data
        if flamegraph_logs:
            tmptardir = tempfile.mkdtemp()
            try:
                flamegraph_tmp_dir = os.path.join(tmptardir, 'flamegraph_logs.{test_id}'.format(test_id=job['test_id']))
                os.mkdir(flamegraph_tmp_dir)
                for x, flamegraph in enumerate(flamegraph_logs, 1):
                    with tarfile.open(flamegraph) as tar:
                        tar.extractall(flamegraph_tmp_dir)
                        tmp_dir = os.path.join(flamegraph_tmp_dir, tar.getnames()[0])

                        # Copy all flamegraph as artifacts
                        for node_dir in os.listdir(tmp_dir):
                            glob_match = os.path.join(os.path.join(tmp_dir, node_dir), '*.svg')
                            graphs = glob.glob(glob_match)
                            for graph in graphs:
                                graph_name = os.path.basename(graph).replace(
                                    'flamegraph_', 'flamegraph_{}_{}_'.format(job['test_id'], node_dir))
                                graph_dst_filename = os.path.join(job_dir, graph_name)
                                shutil.copyfile(graph, graph_dst_filename)

                        os.rename(tmp_dir, os.path.join(flamegraph_tmp_dir, 'revision_{x:02d}'.format(x=x)))

                flamegraph_job_path = os.path.join(job_dir, 'flamegraph_logs.{test_id}.tar.gz'.format(test_id=job['test_id']))
                with tarfile.open(flamegraph_job_path, 'w:gz') as tar:
                    with cd(tmptardir):
                        tar.add('flamegraph_logs.{test_id}'.format(test_id=job['test_id']))
                assert os.path.exists(flamegraph_job_path)
            finally:
                shutil.rmtree(tmptardir)

        # Make a new tarball containing all the flamegraph and data
        if yourkit_logs:
            tmptardir = tempfile.mkdtemp()
            try:
                yourkit_tmp_dir = os.path.join(tmptardir, 'yourkit.{test_id}'.format(test_id=job['test_id']))
                os.mkdir(yourkit_tmp_dir)
                for x, yourkit in enumerate(yourkit_logs, 1):
                    with tarfile.open(yourkit) as tar:
                        tar.extractall(yourkit_tmp_dir)
                        tmp_dir = os.path.join(yourkit_tmp_dir, tar.getnames()[0])
                        os.rename(tmp_dir, os.path.join(yourkit_tmp_dir, 'revision_{x:02d}'.format(x=x)))

                yourkit_job_path = os.path.join(job_dir, 'yourkit.{test_id}.tar.gz'.format(test_id=job['test_id']))
                with tarfile.open(yourkit_job_path, 'w:gz') as tar:
                    with cd(tmptardir):
                        tar.add('yourkit.{test_id}'.format(test_id=job['test_id']))
                assert os.path.exists(yourkit_job_path)
            finally:
                shutil.rmtree(tmptardir)

        ## Stream artifacts
        ## Write final job status to 0.job_status file
        final_status = 'local_complete'
        try:
            # Stream artifacts:
            self.stream_artifacts(job['test_id'])
            if self.__ws_client.in_sync():
                final_status = 'server_complete'

            # Spot check stats to ensure it has the data it should
            # contain. Raises JobFailure if something's amiss.
            try:
                self.__spot_check_stats(job, stats_path)
            except JobFailure, e:
                if final_status == 'server_complete':
                    final_status = 'server_fail'
                else:
                    final_status = 'local_fail'
                raise
        finally:
            with open(os.path.join(job_dir, '0.job_status'), 'w') as f:
                f.write(final_status)

Example 13

Project: DIRAC
Source File: GridPilotDirector.py
View license
  def _submitPilots( self, workDir, taskQueueDict, pilotOptions, pilotsToSubmit,
                     ceMask, submitPrivatePilot, privateTQ, proxy, pilotsPerJob ):
    """
      This method does the actual pilot submission to the Grid RB
      The logic is as follows:
      - If there are no available RB it return error
      - If there is no VOMS extension in the proxy, return error
      - It creates a temp directory
      - Prepare a JDL
        it has some part common to gLite and LCG (the payload description)
        it has some part specific to each middleware
    """
    taskQueueID = taskQueueDict['TaskQueueID']
    # ownerDN = taskQueueDict['OwnerDN']
    credDict = proxy.getCredentials()['Value']
    ownerDN = credDict['identity']
    ownerGroup = credDict[ 'group' ]

    if not self.resourceBrokers:
      # Since we can exclude RBs from the list, it may become empty
      return S_ERROR( ERROR_RB )

    # Need to get VOMS extension for the later interactions with WMS
    ret = gProxyManager.getVOMSAttributes( proxy )
    if not ret['OK']:
      self.log.error( ERROR_VOMS, ret['Message'] )
      return S_ERROR( ERROR_VOMS )
    if not ret['Value']:
      return S_ERROR( ERROR_VOMS )

    workingDirectory = tempfile.mkdtemp( prefix = 'TQ_%s_' % taskQueueID, dir = workDir )
    self.log.verbose( 'Using working Directory:', workingDirectory )

    # Write JDL
    retDict = self._prepareJDL( taskQueueDict, workingDirectory, pilotOptions, pilotsPerJob,
                                ceMask, submitPrivatePilot, privateTQ )
    jdl = retDict['JDL']
    pilotRequirements = retDict['Requirements']
    rb = retDict['RB']
    if not jdl:
      try:
        shutil.rmtree( workingDirectory )
      except:
        pass
      return S_ERROR( ERROR_JDL )

    # Check that there are available queues for the Job:
    if self.enableListMatch:
      availableCEs = []
      now = Time.dateTime()
      availableCEs = self.listMatchCache.get( pilotRequirements )
      if availableCEs is None:
        availableCEs = self._listMatch( proxy, jdl, taskQueueID, rb )
        if availableCEs != False:
          self.log.verbose( 'LastListMatch', now )
          self.log.verbose( 'AvailableCEs ', availableCEs )
          self.listMatchCache.add( pilotRequirements, self.listMatchDelay * 60,
                                   value = availableCEs )                      # it is given in minutes
      if not availableCEs:
        try:
          shutil.rmtree( workingDirectory )
        except:
          pass
        return S_ERROR( ERROR_CE + ' TQ: %d' % taskQueueID )

    # Now we are ready for the actual submission, so

    self.log.verbose( 'Submitting Pilots for TaskQueue', taskQueueID )

    # FIXME: what is this?? If it goes on the super class, it is doomed
    submitRet = self._submitPilot( proxy, pilotsPerJob, jdl, taskQueueID, rb )
    try:
      shutil.rmtree( workingDirectory )
    except:
      pass
    if not submitRet:
      return S_ERROR( 'Pilot Submission Failed for TQ %d ' % taskQueueID )
    # pilotReference, resourceBroker = submitRet

    submittedPilots = 0

    if pilotsPerJob != 1 and len( submitRet ) != pilotsPerJob:
      # Parametric jobs are used
      for pilotReference, resourceBroker in submitRet:
        pilotReference = self._getChildrenReferences( proxy, pilotReference, taskQueueID )
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      ownerGroup, resourceBroker, self.gridMiddleware,
                      pilotRequirements )
    else:
      for pilotReference, resourceBroker in submitRet:
        pilotReference = [pilotReference]
        submittedPilots += len( pilotReference )
        pilotAgentsDB.addPilotTQReference( pilotReference, taskQueueID, ownerDN,
                      ownerGroup, resourceBroker, self.gridMiddleware, pilotRequirements )

    # add some sleep here
    time.sleep( 0.1 * submittedPilots )

    if pilotsToSubmit > pilotsPerJob:
      # Additional submissions are necessary, need to get a new token and iterate.
      pilotsToSubmit -= pilotsPerJob
      result = gProxyManager.requestToken( ownerDN, ownerGroup, max( pilotsToSubmit, self.maxJobsInFillMode ) )
      if not result[ 'OK' ]:
        self.log.error( ERROR_TOKEN, result['Message'] )
        result = S_ERROR( ERROR_TOKEN )
        result['Value'] = submittedPilots
        return result
      ( token, numberOfUses ) = result[ 'Value' ]
      for option in pilotOptions:
        if option.find( '-o /Security/ProxyToken=' ) == 0:
          pilotOptions.remove( option )
      pilotOptions.append( '-o /Security/ProxyToken=%s' % token )
      pilotsPerJob = max( 1, min( pilotsPerJob, int( numberOfUses / self.maxJobsInFillMode ) ) )
      result = self._submitPilots( workDir, taskQueueDict, pilotOptions,
                                   pilotsToSubmit, ceMask,
                                   submitPrivatePilot, privateTQ,
                                   proxy, pilotsPerJob )
      if not result['OK']:
        if 'Value' not in result:
          result['Value'] = 0
        result['Value'] += submittedPilots
        return result
      submittedPilots += result['Value']

    return S_OK( submittedPilots )

Example 14

Project: ete
Source File: phylobuild.py
View license
def main(args):
    """ Read and parse all configuration and command line options,
    setup global variables and data, and initialize the master task of
    all workflows. """

    global log
    log = logging.getLogger("main")

    base_dir = GLOBALS["basedir"]

    # -------------------------------------
    # READ CONFIG FILE AND PARSE WORKFLOWS
    # -------------------------------------

    # Load and check config file


    if args.custom_config:
        concat_config = open(args.base_config).readlines()
        concat_config += open(args.custom_config).readlines()
        base_config = check_config(concat_config)
    else:
        base_config = check_config(args.base_config)
        
    # Check for config file overwriting
    clearname = os.path.basename(args.base_config)
    local_conf_file = pjoin(base_dir, "ete_build.cfg")
    if pexist(base_dir):
        if hascontent(local_conf_file):
            if not args.clearall and not args.resume:
                raise ConfigError("Output directory seems to contain"
                                  " data from a previous run."
                                  " Use --clearall to restart the analysis or --resume to continue.")

    # Creates a tree splitter config block on the fly. In the future this
    # options should be more accessible by users.
    base_config['default_tree_splitter'] = {
        '_app' : 'treesplitter',
        '_max_outgroup_size' : '10%', # dynamic or fixed selection of out seqs.
        '_min_outgroup_support' : 0.9, # avoids fixing labile nodes as monophyletic
        '_outgroup_topology_dist' : False}


    # prepare workflow config dictionaries
    workflow_types = defaultdict(list)
    TARGET_CLADES = set()
    VALID_WORKFLOW_TYPES = set(['genetree', 'supermatrix'])
    # extract workflow filters


    def parse_workflows(names, target_wtype, parse_filters=False):
        parsed_workflows = []
        if not names:
            return parsed_workflows

        for wkname in names:
            if parse_filters:
                wfilters = {}
                fields = [_f.strip() for _f in wkname.split(",")]
                if len(fields) == 1:
                    wkname = fields[0]
                else:
                    wkname = fields[-1]
                    for f in fields[:-1]:
                        if f.startswith("size-range:"): # size filter
                            f = f.replace("size-range:",'')
                            try:
                                min_size, max_size = list(map(int, f.split('-')))
                                if min_size < 0 or min_size > max_size:
                                    raise ValueError

                            except ValueError:
                                raise ConfigError('size filter should consist of two integer numbers (i.e. 50-100). Found [%s] instead' %f)
                            wfilters["max_size"] = max_size
                            wfilters["min_size"] = min_size
                        elif f.startswith("seq-sim-range:"):
                            f = f.replace("seq-sim-range:",'')
                            try:
                                min_seq_sim, max_seq_sim  = map(float, f.split('-'))
                                if min_seq_sim > 1 or min_seq_sim < 0:
                                    raise ValueError
                                if max_seq_sim > 1 or max_seq_sim < 0:
                                    raise ValueError
                                if min_seq_sim > max_seq_sim:
                                    raise ValueError
                            except ValueError:
                                raise ConfigError('sequence similarity filter should consist of two float numbers between 0 and 1 (i.e. 0-0.95). Found [%s] instead' %f)
                            wfilters["min_seq_sim"] = min_seq_sim
                            wfilters["max_seq_sim"] = max_seq_sim
                        else:
                            raise ConfigError('Unknown workflow filter [%s]' %f)

            if target_wtype == "genetree" and wkname in base_config.get('genetree_meta_workflow', {}):
                temp_workflows = [x.lstrip('@') for x in base_config['genetree_meta_workflow'][wkname]]
            elif target_wtype == "supermatrix" and wkname in base_config.get('supermatrix_meta_workflow', {}):
                temp_workflows = [x.lstrip('@') for x in base_config['supermatrix_meta_workflow'][wkname]]
            else:
                temp_workflows = [wkname]

            # if wkname not in base_config and wkname in base_config.get('meta_workflow', {}):
            #     temp_workflows = [x.lstrip('@') for x in base_config['meta_workflow'][wkname]]
            # else:
            #     temp_workflows = [wkname]

            for _w in temp_workflows:
                if target_wtype == "genetree":
                    base_config.update(build_genetree_workflow(_w))
                elif target_wtype == "supermatrix":
                    base_config.update(build_supermatrix_workflow(_w))
                parse_block(_w, base_config)
                
                if _w not in base_config:
                    list_workflows(base_config)
                    raise ConfigError('[%s] workflow or meta-workflow name is not found in the config file.' %_w)
                wtype = base_config[_w]['_app']
                if wtype not in VALID_WORKFLOW_TYPES:
                    raise ConfigError('[%s] is not a valid workflow: %s?' %(_w, wtype))
                if wtype != target_wtype:
                    raise ConfigError('[%s] is not a valid %s workflow' %(wkname, target_wtype))

            if parse_filters:
                if len(temp_workflows) == 1:
                    parsed_workflows.extend([(temp_workflows[0], wfilters)])
                else:
                    raise ConfigError('Meta-workflows with multiple threads are not allowed as recursive workflows [%s]' %wkname)
            else:
                parsed_workflows.extend(temp_workflows)
        return parsed_workflows

    genetree_workflows = parse_workflows(args.workflow, "genetree")
    supermatrix_workflows = parse_workflows(args.supermatrix_workflow, "supermatrix")

    # Stop if mixing types of meta-workflows
    if supermatrix_workflows and len(genetree_workflows) > 1:
        raise ConfigError("A single genetree workflow must be specified when used in combination with super-matrix workflows.")

    # Sets master workflow type
    if supermatrix_workflows:
        WORKFLOW_TYPE = "supermatrix"
        master_workflows = supermatrix_workflows
    else:
        WORKFLOW_TYPE = "genetree"
        master_workflows = genetree_workflows

    # Parse npr workflows and filters
    npr_workflows = []
    use_npr = False
    if args.npr_workflows is not None:
        use_npr = True
        npr_workflows = parse_workflows(args.npr_workflows, WORKFLOW_TYPE, parse_filters=True)

    # setup workflows and create a separate config dictionary for each of them
    run2config = {}
    for wkname in master_workflows:
        config = dict(base_config)
        run2config[wkname] = config

        appset = config[config[wkname]['_appset'][1:]]

        # Initialized application command line commands for this workflow
        config['app'] = {}
        config['threading'] = {}

        apps_to_test = {}
        for k, (appsrc, cores) in six.iteritems(appset):
            cores = int(cores)
            if appsrc == "built-in":
                #cores = int(config["threading"].get(k, args.maxcores))
                cores = min(args.maxcores, cores)
                config["threading"][k] = cores
                cmd = apps.get_call(k, APPSPATH, base_dir, str(cores))
                config["app"][k] = cmd
                apps_to_test[k] = cmd

        # Copy config file
        config["_outpath"] = pjoin(base_dir, wkname)
        config["_nodeinfo"] = defaultdict(dict)
        try:
            os.makedirs(config["_outpath"])
        except OSError:
            pass

        # setup genetree workflow as the processor of concat alignment jobs
        if WORKFLOW_TYPE == "supermatrix":
            concatenator = config[wkname]["_alg_concatenator"][1:]
            config[concatenator]["_workflow"] = '@%s' % genetree_workflows[0]

        # setup npr options for master workflows
        if use_npr:
            config['_npr'] = {
                # register root workflow as the main workflow if the contrary not said
                "wf_type": WORKFLOW_TYPE,
                "workflows": npr_workflows if npr_workflows else [(wkname, {})],
                'nt_switch_thr': args.nt_switch_thr,
                'max_iters': args.max_iters,
                }

            #config[wkname]['_npr'] = '@'+npr_config
            #target_levels = config[npr_config].get('_target_levels', [])
            #target_dict = config['_optimized_levels'] = {}
            #for tg in target_levels:
                # If target level name starts with ~, we allow para and
                # poly-phyletic grouping of the species in such level
                #strict_monophyly = True
                #if tg.startswith("~"):
                    #tg = target_level.lstrip("~")
                    #strict_monophyly = False
                #tg = tg.lower()
                # We add the level as non-optimized
                #target_dict[target_level] = [False, strict_monophyly]
            #TARGET_CLADES.update(target_levels)
        else:
            config['_npr'] = {
                'nt_switch_thr': args.nt_switch_thr,
            }


    # dump log config file
    with open(local_conf_file, "w") as OUTPUT:
        with open(args.base_config) as INPUT:
            OUTPUT.write(INPUT.read()) # replace by simple copy?

    TARGET_CLADES.discard('')

    if WORKFLOW_TYPE == 'genetree':
        from .phylobuild_lib.workflow.genetree import pipeline
    elif WORKFLOW_TYPE == 'supermatrix':
        from .phylobuild_lib.workflow.supermatrix import pipeline

    #if args.arch == "auto":
    #    arch = "64 " if sys.maxsize > 2**32 else "32"
    #else:
    #    arch = args.arch

    arch = "64 " if sys.maxsize > 2**32 else "32"

    print(__DESCRIPTION__)

    # check application binary files
    if not args.nochecks:
        log.log(28, "Testing x86-%s portable applications..." % arch)
        apps.test_apps(apps_to_test)

    log.log(28, "Starting ETE-build execution at %s" %(ctime()))
    log.log(28, "Output directory %s" %(GLOBALS["output_dir"]))


    # -------------------------------------
    # PATH CONFIGs
    # -------------------------------------

    # Set up paths
    gallery_dir = os.path.join(base_dir, "gallery")
    sge_dir = pjoin(base_dir, "sge_jobs")
    tmp_dir = pjoin(base_dir, "tmp")
    tasks_dir = os.path.realpath(args.tasks_dir) if args.tasks_dir else  pjoin(base_dir, "tasks")
    input_dir = pjoin(base_dir, "input")
    db_dir = os.path.realpath(args.db_dir) if args.db_dir else  pjoin(base_dir, "db")

    GLOBALS["db_dir"] = db_dir
    GLOBALS["sge_dir"] = sge_dir
    GLOBALS["tmp"] = tmp_dir
    GLOBALS["gallery_dir"] = gallery_dir
    GLOBALS["tasks_dir"] = tasks_dir
    GLOBALS["input_dir"] = input_dir

    GLOBALS["nprdb_file"]  = pjoin(db_dir, "npr.db")
    GLOBALS["datadb_file"]  = pjoin(db_dir, "data.db")
    
    GLOBALS["seqdb_file"]  = pjoin(db_dir, "seq.db") if not args.seqdb else args.seqdb

    # Clear databases if necessary
    if args.clearall:
        log.log(28, "Erasing all existing npr data...")
        shutil.rmtree(GLOBALS["tasks_dir"]) if pexist(GLOBALS["tasks_dir"]) else None
        shutil.rmtree(GLOBALS["tmp"]) if pexist(GLOBALS["tmp"]) else None
        shutil.rmtree(GLOBALS["input_dir"]) if pexist(GLOBALS["input_dir"]) else None

        if not args.seqdb:
            silent_remove(GLOBALS["seqdb_file"])

        silent_remove(GLOBALS["datadb_file"])
        silent_remove(pjoin(base_dir, "nprdata.tar"))
        silent_remove(pjoin(base_dir, "nprdata.tar.gz"))
        #silent_remove(pjoin(base_dir, "npr.log"))
        silent_remove(pjoin(base_dir, "npr.log.gz"))
    else:
        if args.softclear:
            log.log(28, "Erasing precomputed data (reusing task directory)")
            shutil.rmtree(GLOBALS["tmp"]) if pexist(GLOBALS["tmp"]) else None
            shutil.rmtree(GLOBALS["input_dir"]) if pexist(GLOBALS["input_dir"]) else None
            os.remove(GLOBALS["datadb_file"]) if pexist(GLOBALS["datadb_file"]) else None
        if args.clearseqs and pexist(GLOBALS["seqdb_file"]) and not args.seqdb:
            log.log(28, "Erasing existing sequence database...")
            os.remove(GLOBALS["seqdb_file"])

    if not args.clearall and base_dir != GLOBALS["output_dir"]:
        log.log(24, "Copying previous output files to scratch directory: %s..." %base_dir)
        try:
            shutil.copytree(pjoin(GLOBALS["output_dir"], "db"), db_dir)
        except IOError as e:
            print(e)
            pass

        try:
            shutil.copytree(pjoin(GLOBALS["output_dir"], "tasks/"), pjoin(base_dir, "tasks/"))
        except IOError as e:
            try:
                shutil.copy(pjoin(GLOBALS["output_dir"], "nprdata.tar.gz"), base_dir)
            except IOError as e:
                pass

        # try: os.system("cp -a %s/* %s/" %(GLOBALS["output_dir"],  base_dir))
        # except Exception: pass


    # UnCompress packed execution data
    if pexist(os.path.join(base_dir,"nprdata.tar.gz")):
        log.warning("Compressed data found. Extracting content to start execution...")
        cmd = "cd %s && gunzip -f nprdata.tar.gz && tar -xf nprdata.tar && rm nprdata.tar" % base_dir
        os.system(cmd)

    # Create dir structure
    for dirname in [tmp_dir, tasks_dir, input_dir, db_dir]:
        try:
            os.makedirs(dirname)
        except OSError:
            log.warning("Using existing dir: %s", dirname)


    # -------------------------------------
    # DATA READING AND CHECKING
    # -------------------------------------

    # Set number of CPUs available

    if WORKFLOW_TYPE == "supermatrix" and not args.cogs_file:
        raise ConfigError("Species tree workflow requires a list of COGS"
                          " to be supplied through the --cogs"
                          " argument.")
    elif WORKFLOW_TYPE == "supermatrix":
        GLOBALS["cogs_file"] = os.path.abspath(args.cogs_file)

    GLOBALS["seqtypes"] = set()
    if args.nt_seed_file:
        GLOBALS["seqtypes"].add("nt")
        GLOBALS["inputname"] = os.path.split(args.nt_seed_file)[-1]

    if args.aa_seed_file:
        GLOBALS["seqtypes"].add("aa")
        GLOBALS["inputname"] = os.path.split(args.aa_seed_file)[-1]

    # Initialize db if necessary, otherwise extract basic info
    db.init_nprdb(GLOBALS["nprdb_file"])
    db.init_datadb(GLOBALS["datadb_file"])

    # Species filter
    if args.spfile:
        target_species = set([line.strip() for line in open(args.spfile)])
        target_species.discard("")
        log.log(28, "Enabling %d species", len(target_species))
    else:
        target_species = None
    
    # Load supermatrix data
    if WORKFLOW_TYPE == "supermatrix":
        observed_species= set()
        target_seqs = set()
        for cog_number, seq_cogs in iter_cog_seqs(args.cogs_file, args.spname_delimiter):
            for seqname, spcode, seqcode in seq_cogs:
                if target_species is None or spcode in target_species:
                    observed_species.add(spcode)
                    target_seqs.add(seqname)            
                
        if target_species is not None:
            if target_species - observed_species:
                raise DataError("The following target_species could not be found in COGs file: %s" %(','.join(target_species-observed_species)))
        else:
            target_species = observed_species
        log.warning("COG file restriction: %d sequences from %s species " %(len(target_seqs), len(target_species)))
    else:
        target_seqs = None

    GLOBALS["target_species"] = target_species
    
    # Check and load data
    ERROR = ""
    if not pexist(GLOBALS["seqdb_file"]):
        db.init_seqdb(GLOBALS["seqdb_file"])
        seqname2seqid = None
        if args.aa_seed_file:
            seqname2seqid = seqio.load_sequences(args, "aa", target_seqs, target_species, seqname2seqid)
            if not target_seqs:
                target_seqs = list(seqname2seqid.keys())
                
        if args.nt_seed_file:
            seqname2seqid = seqio.load_sequences(args, "nt", target_seqs, target_species, seqname2seqid)
        # Integrity checks?
        pass
            
    else:
        db.init_seqdb(GLOBALS["seqdb_file"])
        log.warning("Reusing sequences from existing database!")
        if target_seqs is None:
            seqname2seqid = db.get_seq_name_dict()
        else:
            seqname2seqid = db.get_seq_name_dict()
            if target_seqs - set(seqname2seqid.keys()):
                raise DataError("The following sequence names in COGs file"
                                " are not found in current database: %s" %(
                                    ','.join(target_seqs - db_seqs)))
                      
    log.warning("%d target sequences" %len(seqname2seqid))
    GLOBALS["target_sequences"] = seqname2seqid.values()
        
    if ERROR:
        with open(pjoin(base_dir, "error.log"), "w") as OUTPUT:
            OUTPUT.write(' '.join(arguments) + "\n\n" + ERROR)
        raise DataError("Errors were found while loading data. Please"
                        " check error file for details")

    # Prepare target taxa levels, if any
    if WORKFLOW_TYPE == "supermatrix" and args.lineages_file and TARGET_CLADES:
        sp2lin = {}
        lin2sp = defaultdict(set)
        all_sorted_levels = []
        for line in open(args.lineages_file):
            sp, lineage = line.split("\t")
            sp = sp.strip()
            if sp in target_species:
                sp2lin[sp] = [x.strip().lower() for x in lineage.split(",")]
                for lin in sp2lin[sp]:
                    if lin not in lin2sp:
                        all_sorted_levels.append(lin)
                    lin2sp[lin].add(sp)
        # any target species without lineage information?
        if target_species - set(sp2lin):
            missing = target_species - set(sp2lin)
            log.warning("%d species not found in lineages file" %len(missing))

        # So, the following levels (with at least 2 species) could be optimized
        avail_levels = [(lin, len(lin2sp[lin])) for lin in all_sorted_levels if len(lin2sp[lin])>=2]
        log.log(26, "Available levels for NPR optimization:\n%s", '\n'.join(["% 30s (%d spcs)"%x for x in avail_levels]))
        avail_levels = set([lv[0] for lv in avail_levels])
        GLOBALS["lineages"] = (sp2lin, lin2sp)
        
    # if no lineages file, raise an error
    elif WORKFLOW_TYPE == "supermatrix" and TARGET_CLADES:
        raise ConfigError("The use of target_levels requires a species lineage file provided through the --lineages option")

    # -------------------------------------
    # MISC
    # -------------------------------------

    GLOBALS["_max_cores"] = args.maxcores
    log.debug("Enabling %d CPU cores" %args.maxcores)


    # how task will be executed
    if args.no_execute:
        execution = (None, False)
    # elif args.sge_execute:
    #     execution = ("sge", False)
    else:
        if args.monitor:
            execution =("insitu", True) # True is for run-detached flag
        else:
            execution = ("insitu", False)

    # Scheduling starts here
    log.log(28, "ETE build starts now!")

    # This initialises all pipelines
    pending_tasks = []
    start_time = ctime()
    for wkname, config in six.iteritems(run2config):
        # Feeds pending task with the first task of the workflow
        config["_name"] = wkname
        new_tasks = pipeline(None, wkname, config)
        if not new_tasks:
            continue # skips pipelines not fitting workflow filters
        thread_id = new_tasks[0].threadid
        config["_configid"] = thread_id
        GLOBALS[thread_id] = config
        pending_tasks.extend(new_tasks)

        # Clear info from previous runs
        open(os.path.join(config["_outpath"], "runid"), "a").write('\t'.join([thread_id, GLOBALS["nprdb_file"]+"\n"]))
        # Write command line info
        cmd_info = '\t'.join([start_time, thread_id, str(args.monitor), GLOBALS["cmdline"]])
        open(pjoin(config["_outpath"], "command_lines"), "a").write(cmd_info+"\n")

    thread_errors = schedule(pipeline, pending_tasks, args.schedule_time,
                             execution, args.debug, args.noimg)
    db.close()

    if not thread_errors:
        if GLOBALS.get('_background_scheduler', None):
            GLOBALS['_background_scheduler'].terminate()

        if args.compress:
            log.log(28, "Compressing intermediate data...")
            cmd = "cd %s && tar --remove-files -cf nprdata.tar tasks/ && gzip -f nprdata.tar; if [ -e npr.log ]; then gzip -f npr.log; fi;" %\
              GLOBALS["basedir"]
            os.system(cmd)
        log.log(28, "Deleting temporal data...")
        cmd = "cd %s && rm -rf tmp/" %GLOBALS["basedir"]
        os.system(cmd)
        cmd = "cd %s && rm -rf input/" %GLOBALS["basedir"]
        os.system(cmd)
        GLOBALS["citator"].show()
    else:
        raise DataError("Errors found in some tasks")

Example 15

Project: geonode
Source File: restore.py
View license
    def handle(self, **options):
        # ignore_errors = options.get('ignore_errors')
        force_exec = options.get('force_exec')
        backup_file = options.get('backup_file')

        if not backup_file or len(backup_file) == 0:
            raise CommandError("Backup archive '--backup-file' is mandatory")

        print "Before proceeding with the Restore, please ensure that:"
        print " 1. The backend (DB or whatever) is accessible and you have rights"
        print " 2. The GeoServer is up and running and reachable from this machine"
        message = 'WARNING: The restore will overwrite ALL GeoNode data. You want to proceed?'
        if force_exec or helpers.confirm(prompt=message, resp=False):
            # Create Target Folder
            restore_folder = os.path.join(tempfile.gettempdir(), 'restore')
            if not os.path.exists(restore_folder):
                os.makedirs(restore_folder)

            # Extract ZIP Archive to Target Folder
            target_folder = helpers.unzip_file(backup_file, restore_folder)

            # Restore GeoServer Catalog
            url = settings.OGC_SERVER['default']['PUBLIC_LOCATION']
            user = settings.OGC_SERVER['default']['USER']
            passwd = settings.OGC_SERVER['default']['PASSWORD']
            geoserver_bk_file = os.path.join(target_folder, 'geoserver_catalog.zip')

            print "Restoring 'GeoServer Catalog ["+url+"]' into '"+geoserver_bk_file+"'."
            if not os.path.exists(geoserver_bk_file):
                raise ValueError('Could not find GeoServer Backup file [' + geoserver_bk_file + ']')

            # Best Effort Restore: 'options': {'option': ['BK_BEST_EFFORT=true']}
            data = {'restore': {'archiveFile': geoserver_bk_file, 'options': {}}}
            headers = {'Content-type': 'application/json'}
            r = requests.post(url + 'rest/br/restore/', data=json.dumps(data),
                              headers=headers, auth=HTTPBasicAuth(user, passwd))
            if (r.status_code > 201):
                gs_backup = r.json()
                gs_bk_exec_id = gs_backup['restore']['execution']['id']
                r = requests.get(url + 'rest/br/restore/' + str(gs_bk_exec_id) + '.json',
                                 auth=HTTPBasicAuth(user, passwd))
                if (r.status_code == 200):
                    gs_backup = r.json()
                    gs_bk_progress = gs_backup['restore']['execution']['progress']
                    print gs_bk_progress

                raise ValueError('Could not successfully restore GeoServer catalog [' + url +
                                 'rest/br/restore/]: ' + str(r.status_code) + ' - ' + str(r.text))
            else:
                gs_backup = r.json()
                gs_bk_exec_id = gs_backup['restore']['execution']['id']
                r = requests.get(url + 'rest/br/restore/' + str(gs_bk_exec_id) + '.json',
                                 auth=HTTPBasicAuth(user, passwd))
                if (r.status_code == 200):
                    gs_bk_exec_status = gs_backup['restore']['execution']['status']
                    gs_bk_exec_progress = gs_backup['restore']['execution']['progress']
                    gs_bk_exec_progress_updated = '0/0'
                    while (gs_bk_exec_status != 'COMPLETED' and gs_bk_exec_status != 'FAILED'):
                        if (gs_bk_exec_progress != gs_bk_exec_progress_updated):
                            gs_bk_exec_progress_updated = gs_bk_exec_progress
                        r = requests.get(url + 'rest/br/restore/' + str(gs_bk_exec_id) + '.json',
                                         auth=HTTPBasicAuth(user, passwd))
                        if (r.status_code == 200):
                            gs_backup = r.json()
                            gs_bk_exec_status = gs_backup['restore']['execution']['status']
                            gs_bk_exec_progress = gs_backup['restore']['execution']['progress']
                            print str(gs_bk_exec_status) + ' - ' + gs_bk_exec_progress
                            time.sleep(3)
                        else:
                            raise ValueError('Could not successfully restore GeoServer catalog [' + url +
                                             'rest/br/restore/]: ' + str(r.status_code) + ' - ' + str(r.text))
                else:
                    raise ValueError('Could not successfully restore GeoServer catalog [' + url +
                                     'rest/br/restore/]: ' + str(r.status_code) + ' - ' + str(r.text))

            # Restore GeoServer Data
            if (helpers.GS_DATA_DIR):
                if (helpers.GS_DUMP_RASTER_DATA):
                    # Restore '$GS_DATA_DIR/data/geonode'
                    gs_data_root = os.path.join(helpers.GS_DATA_DIR, 'data', 'geonode')
                    gs_data_folder = os.path.join(target_folder, 'gs_data_dir', 'data', 'geonode')

                    try:
                        shutil.rmtree(gs_data_root)
                    except:
                        pass

                    if not os.path.exists(gs_data_root):
                        os.makedirs(gs_data_root)

                    helpers.copy_tree(gs_data_folder, gs_data_root)
                    helpers.chmod_tree(gs_data_root)
                    print "GeoServer Uploaded Data Restored to '"+gs_data_root+"'."

            if (helpers.GS_DUMP_VECTOR_DATA):
                # Restore Vectorial Data from DB
                datastore = settings.OGC_SERVER['default']['DATASTORE']
                if (datastore):
                    ogc_db_name = settings.DATABASES[datastore]['NAME']
                    ogc_db_user = settings.DATABASES[datastore]['USER']
                    ogc_db_passwd = settings.DATABASES[datastore]['PASSWORD']
                    ogc_db_host = settings.DATABASES[datastore]['HOST']
                    ogc_db_port = settings.DATABASES[datastore]['PORT']

                    gs_data_folder = os.path.join(target_folder, 'gs_data_dir', 'data', 'geonode')

                    helpers.restore_db(ogc_db_name, ogc_db_user, ogc_db_port, ogc_db_host,
                                       ogc_db_passwd, gs_data_folder)

            # Prepare Target DB
            try:
                call_command('syncdb', interactive=False, load_initial_data=False)
                call_command('flush', interactive=False, load_initial_data=False)

                db_name = settings.DATABASES['default']['NAME']
                db_user = settings.DATABASES['default']['USER']
                db_port = settings.DATABASES['default']['PORT']
                db_host = settings.DATABASES['default']['HOST']
                db_passwd = settings.DATABASES['default']['PASSWORD']

                helpers.patch_db(db_name, db_user, db_port, db_host, db_passwd)
            except:
                traceback.print_exc()

            # Restore Fixtures
            for app_name, dump_name in zip(helpers.app_names, helpers.dump_names):
                fixture_file = os.path.join(target_folder, dump_name+'.json')

                print "Deserializing "+fixture_file
                try:
                    call_command('loaddata', fixture_file, app_label=app_name)
                except:
                    traceback.print_exc()
                    print "WARNING: No valid fixture data found for '"+dump_name+"'."
                    # helpers.load_fixture(app_name, fixture_file)

            # Restore Media Root
            media_root = settings.MEDIA_ROOT
            media_folder = os.path.join(target_folder, helpers.MEDIA_ROOT)

            try:
                shutil.rmtree(media_root)
            except:
                pass

            if not os.path.exists(media_root):
                os.makedirs(media_root)

            helpers.copy_tree(media_folder, media_root)
            helpers.chmod_tree(media_root)
            print "Media Files Restored into '"+media_root+"'."

            # Restore Static Root
            static_root = settings.STATIC_ROOT
            static_folder = os.path.join(target_folder, helpers.STATIC_ROOT)

            try:
                shutil.rmtree(static_root)
            except:
                pass

            if not os.path.exists(static_root):
                os.makedirs(static_root)

            helpers.copy_tree(static_folder, static_root)
            helpers.chmod_tree(static_root)
            print "Static Root Restored into '"+static_root+"'."

            # Restore Static Root
            static_root = settings.STATIC_ROOT
            static_folder = os.path.join(target_folder, helpers.STATIC_ROOT)

            try:
                shutil.rmtree(static_root)
            except:
                pass

            if not os.path.exists(static_root):
                os.makedirs(static_root)

            helpers.copy_tree(static_folder, static_root)
            helpers.chmod_tree(static_root)
            print "Static Root Restored into '"+static_root+"'."

            # Restore Static Folders
            static_folders = settings.STATICFILES_DIRS
            static_files_folders = os.path.join(target_folder, helpers.STATICFILES_DIRS)

            for static_files_folder in static_folders:

                try:
                    shutil.rmtree(static_files_folder)
                except:
                    pass

                if not os.path.exists(static_files_folder):
                    os.makedirs(static_files_folder)

                helpers.copy_tree(os.path.join(static_files_folders,
                                               os.path.basename(os.path.normpath(static_files_folder))),
                                  static_files_folder)
                helpers.chmod_tree(static_files_folder)
                print "Static Files Restored into '"+static_files_folder+"'."

            # Restore Template Folders
            template_folders = settings.TEMPLATE_DIRS
            template_files_folders = os.path.join(target_folder, helpers.TEMPLATE_DIRS)

            for template_files_folder in template_folders:

                try:
                    shutil.rmtree(template_files_folder)
                except:
                    pass

                if not os.path.exists(template_files_folder):
                    os.makedirs(template_files_folder)

                helpers.copy_tree(os.path.join(template_files_folders,
                                               os.path.basename(os.path.normpath(template_files_folder))),
                                  template_files_folder)
                helpers.chmod_tree(template_files_folder)
                print "Template Files Restored into '"+template_files_folder+"'."

            # Restore Locale Folders
            locale_folders = settings.LOCALE_PATHS
            locale_files_folders = os.path.join(target_folder, helpers.LOCALE_PATHS)

            for locale_files_folder in locale_folders:

                try:
                    shutil.rmtree(locale_files_folder)
                except:
                    pass

                if not os.path.exists(locale_files_folder):
                    os.makedirs(locale_files_folder)

                helpers.copy_tree(os.path.join(locale_files_folders,
                                               os.path.basename(os.path.normpath(locale_files_folder))),
                                  locale_files_folder)
                helpers.chmod_tree(locale_files_folder)
                print "Locale Files Restored into '"+locale_files_folder+"'."

            # Cleanup DB
            try:
                db_name = settings.DATABASES['default']['NAME']
                db_user = settings.DATABASES['default']['USER']
                db_port = settings.DATABASES['default']['PORT']
                db_host = settings.DATABASES['default']['HOST']
                db_passwd = settings.DATABASES['default']['PASSWORD']

                helpers.cleanup_db(db_name, db_user, db_port, db_host, db_passwd)
            except:
                traceback.print_exc()

            print "Restore finished. Please find restored files and dumps into:"

            return str(target_folder)

Example 16

Project: geonode
Source File: tests.py
View license
    def test_get_files(self):

        # Check that a well-formed Shapefile has its components all picked up
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.shp", "foo.shx", "foo.prj", "foo.dbf"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.shp"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(gotten_files, dict(shp="foo.shp", shx="foo.shx",
                                                 prj="foo.prj", dbf="foo.dbf"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that a Shapefile missing required components raises an
        # exception
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.shp", "foo.shx", "foo.prj"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            self.assertRaises(
                GeoNodeException,
                lambda: get_files(
                    os.path.join(
                        d,
                        "foo.shp")))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including an SLD with a valid shapefile results in the SLD
        # getting picked up
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.shp", "foo.shx", "foo.prj", "foo.dbf", "foo.sld"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.shp"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(
                gotten_files,
                dict(
                    shp="foo.shp",
                    shx="foo.shx",
                    prj="foo.prj",
                    dbf="foo.dbf",
                    sld="foo.sld"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that capitalized extensions are ok
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.SHP"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(gotten_files, dict(shp="foo.SHP", shx="foo.SHX",
                                                 prj="foo.PRJ", dbf="foo.DBF"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that mixed capital and lowercase extensions are ok
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.SHP", "foo.shx", "foo.pRJ", "foo.DBF"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.SHP"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(gotten_files, dict(shp="foo.SHP", shx="foo.shx",
                                                 prj="foo.pRJ", dbf="foo.DBF"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including both capital and lowercase extensions raises an
        # exception
        d = None
        try:
            d = tempfile.mkdtemp()
            files = (
                "foo.SHP",
                "foo.SHX",
                "foo.PRJ",
                "foo.DBF",
                "foo.shp",
                "foo.shx",
                "foo.prj",
                "foo.dbf")
            for f in files:
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            # Only run the tests if this is a case sensitive OS
            if len(os.listdir(d)) == len(files):
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.SHP")))
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.shp")))

        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including both capital and lowercase PRJ (this is
        # special-cased in the implementation)
        d = None
        try:
            d = tempfile.mkdtemp()
            files = ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF", "foo.prj")
            for f in files:
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            # Only run the tests if this is a case sensitive OS
            if len(os.listdir(d)) == len(files):
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.SHP")))
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.shp")))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including both capital and lowercase SLD (this is
        # special-cased in the implementation)
        d = None
        try:
            d = tempfile.mkdtemp()
            files = (
                "foo.SHP",
                "foo.SHX",
                "foo.PRJ",
                "foo.DBF",
                "foo.SLD",
                "foo.sld")
            for f in files:
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            # Only run the tests if this is a case sensitive OS
            if len(os.listdir(d)) == len(files):
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.SHP")))
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.shp")))
        finally:
            if d is not None:
                shutil.rmtree(d)

Example 17

Project: hydroshare
Source File: receivers.py
View license
@receiver(pre_add_files_to_resource, sender=GeographicFeatureResource)
def geofeature_pre_add_files_to_resource(sender, **kwargs):

    res_obj = kwargs['resource']
    res_id = res_obj.short_id
    files = kwargs['files']
    fed_res_fnames = kwargs['fed_res_file_names']

    validate_files_dict = kwargs['validate_files']
    if files and fed_res_fnames:
        validate_files_dict['are_files_valid'] = False
        validate_files_dict['message'] = 'Please upload files from ' \
                                         'either local disk or irods, not both.'
        return

    ori_file_info = res_obj.metadata.originalfileinfo.all().first()
    some_new_files_added = True

    file_info_list = []  # [[full_name1, full_path1], [full_name2, full_path2], ...]
    for f in files:
        f_info = [f.name, f.file.name]
        file_info_list.append(f_info)
    if fed_res_fnames:
        # copy all irods files to django server to extract metadata
        irods_file_path_list = utils.get_fed_zone_files(fed_res_fnames)
        fed_tmpfile_name_list = []
        for file_path in irods_file_path_list:
            fed_tmpfile_name_list.append(file_path)
            file_full_name = file_path[file_path.rfind('/')+1:]
            f_info = [file_full_name, file_path]
            file_info_list.append(f_info)

    try:
        if ori_file_info and ResourceFile.objects.filter(object_id=res_obj.id).count() > 0:
            # just add non-required files (not shp, shx or dfb)
            crt_f_str = ori_file_info.filenameString
            for f_info in file_info_list:
                new_f_fullname = f_info[0].lower()
                new_f_name, new_f_ext = os.path.splitext(new_f_fullname)

                if new_f_ext in [".shp", ".shx", ".dbf"]:
                    validate_files_dict['are_files_valid'] = False
                    validate_files_dict['message'] = "No more shp, shx, dbf files can be added."
                    some_new_files_added = False
                    break
                elif (new_f_name != ori_file_info.baseFilename) and \
                        (not (new_f_name == ori_file_info.
                         baseFilename + ".shp" and new_f_ext == ".xml")):
                    # need to check is it ShapefileBaseName.shp.xml
                    validate_files_dict['are_files_valid'] = False
                    validate_files_dict['message'] = "At least one file does not " \
                                                     "follow the ESRI Shapefile naming " \
                                                     "convention."
                    some_new_files_added = False
                    break
                elif crt_f_str.find(new_f_fullname) != -1:
                    validate_files_dict['are_files_valid'] = False
                    validate_files_dict['message'] = "At least one file already exists."
                    some_new_files_added = False
                    break
            if some_new_files_added:
                ori_fn_dict = json.loads(ori_file_info.filenameString)
                for f_info in file_info_list:
                    new_f_fullname = f_info[0].lower()
                    ori_fn_dict[new_f_fullname] = "new"
                res_obj.metadata.update_element('OriginalFileInfo', element_id=ori_file_info.id,
                                                filenameString=json.dumps(ori_fn_dict))
        else:  # all files have been removed, start it over
            files_type = check_uploaded_files_type(file_info_list)
            tmp_dir = None
            uploaded_file_type = None
            baseFilename = None
            uploadedFileCount = 0
            uploadedFilenameString = None
            shp_full_path = None
            validate_files_dict['are_files_valid'] = files_type['are_files_valid']
            validate_files_dict['message'] = files_type['message']

            if validate_files_dict['are_files_valid']:

                res_obj.metadata.originalfileinfo.all().delete()
                res_obj.metadata.geometryinformation.all().delete()
                res_obj.metadata.fieldinformation.all().delete()
                res_obj.metadata.originalcoverage.all().delete()
                res_obj.metadata.coverages.all().delete()

                tmp_dir = files_type['tmp_dir']
                baseFilename = files_type['baseFilename']
                uploaded_file_type = files_type['uploaded_file_type']
                uploadedFileCount = files_type['uploadedFileCount']
                uploadedFilenameString = files_type['uploadedFilenameString']
                shp_full_path = files_type['shp_full_path']
                shp_xml_full_path = files_type['shp_xml_full_path']
                if uploaded_file_type == "shp" or uploaded_file_type == "zipped_shp":
                    meta_array, meta_dict = parse_shp_zshp(uploaded_file_type,
                                                           baseFilename,
                                                           uploadedFileCount,
                                                           uploadedFilenameString,
                                                           shp_full_path)

                # create metadat objects
                if "coverage" in meta_dict.keys():
                    coverage_dict = meta_dict["coverage"]['Coverage']
                    res_obj.metadata.create_element('Coverage', type=coverage_dict['type'],
                                                    value=coverage_dict['value'])

                originalfileinfo_dict = meta_dict["originalfileinfo"]
                res_obj.metadata.\
                    create_element('OriginalFileInfo',
                                   fileType=originalfileinfo_dict['fileType'],
                                   baseFilename=originalfileinfo_dict['baseFilename'],
                                   fileCount=originalfileinfo_dict['fileCount'],
                                   filenameString=originalfileinfo_dict['filenameString'])

                originalcoverage_dict = meta_dict["originalcoverage"]['originalcoverage']
                res_obj.metadata.\
                    create_element('OriginalCoverage',
                                   northlimit=originalcoverage_dict['northlimit'],
                                   southlimit=originalcoverage_dict['southlimit'],
                                   westlimit=originalcoverage_dict['westlimit'],
                                   eastlimit=originalcoverage_dict['eastlimit'],
                                   projection_string=originalcoverage_dict['projection_string'],
                                   projection_name=originalcoverage_dict['projection_name'],
                                   datum=originalcoverage_dict['datum'],
                                   unit=originalcoverage_dict['unit'])

                field_info_array = meta_dict["field_info_array"]
                for field_info in field_info_array:
                    field_info_dict = field_info["fieldinformation"]
                    res_obj.metadata.\
                        create_element('FieldInformation',
                                       fieldName=field_info_dict['fieldName'],
                                       fieldType=field_info_dict['fieldType'],
                                       fieldTypeCode=field_info_dict['fieldTypeCode'],
                                       fieldWidth=field_info_dict['fieldWidth'],
                                       fieldPrecision=field_info_dict['fieldPrecision'])

                geometryinformation_dict = meta_dict["geometryinformation"]
                res_obj.metadata.\
                    create_element('GeometryInformation',
                                   featureCount=geometryinformation_dict['featureCount'],
                                   geometryType=geometryinformation_dict['geometryType'])

                shp_xml_metadata_list = parse_shp_xml(shp_xml_full_path)
                for shp_xml_metadata in shp_xml_metadata_list:
                    if 'description' in shp_xml_metadata:
                        # overwrite existing description metadata
                        if res_obj.metadata.description:
                            res_obj.metadata.description.delete()
                        res_obj.metadata.create_element('description',
                                                        abstract=shp_xml_metadata
                                                        ['description']['abstract'])
                    elif 'title' in shp_xml_metadata:
                        # overwrite existing title metadata
                        if res_obj.metadata.title:
                            res_obj.metadata.title.delete()
                        res_obj.metadata.create_element('title',
                                                        value=shp_xml_metadata
                                                        ['title']['value'])
                    elif 'subject' in shp_xml_metadata:
                        # append new keywords to existing keywords
                        existing_keywords = [subject.value for
                                             subject in res_obj.metadata.subjects.all()]
                        if shp_xml_metadata['subject']['value'] not in existing_keywords:
                            res_obj.metadata.create_element('subject',
                                                            value=shp_xml_metadata
                                                            ['subject']['value'])

                if uploaded_file_type == "zipped_shp":
                        if fed_res_fnames:
                            # remove the temp zip file retrieved from federated zone
                            if fed_tmpfile_name_list:
                                shutil.rmtree(os.path.dirname(fed_tmpfile_name_list[0]))
                            del kwargs['fed_res_file_names'][:]
                        del kwargs['files'][:]
                        kwargs['files'].extend(files_type["files_new"])

            else:
                validate_files_dict['are_files_valid'] = False
                validate_files_dict['message'] = "Invalid files uploaded. " \
                                                 "Please note the three mandatory files " \
                                                 "(.shp, .shx, .dbf) of ESRI Shapefiles " \
                                                 "should be uploaded at the same time " \
                                                 "(or in a zip file)."
            if tmp_dir is not None:
                shutil.rmtree(tmp_dir)
            # remove all temp files retrieved from federated zone
            if fed_res_fnames and fed_tmpfile_name_list:
                for file_path in fed_tmpfile_name_list:
                    shutil.rmtree(os.path.dirname(file_path))
    except Exception as ex:
        logger.exception("geofeature_pre_add_files_to_resource: {0}. Error:{1} ".
                         format(res_id, ex.message))
        validate_files_dict['are_files_valid'] = False
        validate_files_dict['message'] = "Invalid files uploaded. " \
                                         "Please note the three mandatory files " \
                                         "(.shp, .shx, .dbf) of ESRI Shapefiles should " \
                                         "be uploaded at the same time (or in a zip file)."

Example 18

Project: aym-cms
Source File: build.py
View license
def main():
    # retrieving default context dictionary from settings
    context = settings.CONTEXT
    deploy_dir = settings.DEPLOY_DIR
    tmp_dir = settings.TMP_DIR
    
    print u"Removing existing deploy dir, if any..."
    shutil.rmtree(deploy_dir,ignore_errors=True)
    
    print u"Removing existing temp dir, if any.."
    shutil.rmtree(tmp_dir,ignore_errors=True)

    print u"Creating deploy/ dir..."
    os.mkdir(deploy_dir)

    print u"Creating temp directory at '%s'" % tmp_dir
    os.mkdir(tmp_dir)

    print u"Copying contents of static/ into deploy/static..."
    deploy_static_dir = os.path.join(deploy_dir,'static')
    os.mkdir(deploy_static_dir)

    static_dir = settings.STATIC_DIR
    compress = settings.YUI_COMPRESSOR
    hss = settings.HSS_PATH

    for filename in os.listdir(static_dir):
        before_ext, ext = os.path.splitext(filename)
        if filename.startswith(".") or filename.endswith("~"):
            print u"Ignored '%s'" % filename
        elif hss and ext in (".hss"):
            in_path = os.path.join(static_dir, filename)
            out_path = os.path.join(deploy_static_dir, filename)
            filename = u"%s.css" % os.path.splitext(filename)[0]
            print u"Compiling HSS to CSS, compressing and copying %s to deploy/static" % filename
            s,o=commands.getstatusoutput(u"%s %s -output %s/" % (hss, in_path, deploy_static_dir))
            if s > 0: print o
            if compress:
                print u"Compressing %s" % filename
                s,o=commands.getstatusoutput(u"java -jar %s %s > %s" % (compress, in_path, out_path))
                if s > 0: print o
                
        elif settings.USE_CLEVER_CSS and ext in (settings.CLEVER_CSS_EXT):
            print u"Compiling via CleverCSS, and copying '%s' to deploy/static/" % filename
            import clevercss
            in_path = os.path.join(static_dir, filename)
            tmp_path = os.path.join(tmp_dir, u"%s.css" % before_ext)
            fin = open(in_path, 'r')
            data = fin.read()
            fin.close()
            fout = open(tmp_path,'w')
            fout.write(clevercss.convert(data))
            fout.close()
            out_path = os.path.join(deploy_static_dir, filename)
            if compress:
                            commands.getoutput(u"java -jar %s %s > %s" % (compress, tmp_path, out_path))
            else:
                commands.getoutput(u"mv %s %s" % (tmp_path, out_path))
        elif compress and ext in (".js",".css"):
            print u"Compressing and copying '%s' to deploy/static/" % filename
            in_path = os.path.join(static_dir, filename)
            out_path = os.path.join(deploy_static_dir, filename)
            commands.getoutput(u"java -jar %s %s > %s" % (compress, in_path, out_path))
        else:
            print u"Copying '%s' to deploy/static/" % filename
            in_path = os.path.join(static_dir, filename)
            out_path = os.path.join(deploy_static_dir, filename)
            commands.getoutput(u"cp %s %s" % (in_path, out_path))
            
    print u"Copying and creating thumbnails for files in images/..."
    deploy_thumb_path = os.path.join(deploy_static_dir,'thumbnail')
    deploy_image_path = os.path.join(deploy_static_dir,'image')
    os.mkdir(deploy_thumb_path)
    os.mkdir(deploy_image_path)

    images = []
    images_dict = {}
    images_dir = settings.IMAGES_DIR
    thumb_format = settings.STATIC_THUMBNAIL_FORMAT
    image_format = settings.STATIC_IMAGE_FORMAT
    thumbnail_dimensions = settings.THUMBNAIL_SIZE

    for filename in os.listdir(images_dir):
        # only process if ends with image file extension
        before_ext,ext = os.path.splitext(filename)
        if ext not in (".png",".jpg",".jpeg"):
            continue

        print u"Copying and thumbnailing %s..." % filename
        filepath = os.path.join(images_dir,filename)
        im = Image.open(filepath)
        im.save(os.path.join(deploy_image_path, filename),ext[1:].upper())
        im.thumbnail(thumbnail_dimensions, Image.ANTIALIAS)
        im.save(os.path.join(deploy_thumb_path, filename), ext[1:].upper())

        # create dict with image data 
        image_dict = {}
        image_dict['filename'] = filename
        image_dict['thumbnail'] = thumb_format % filename
        image_dict['image'] = image_format % filename

        images.append(image_dict)
        # before_ext is 'hello' in 'hello.png'
        images_dict[before_ext] = image_dict

    context['images'] = images
    context['images_dict'] = images_dict

    print u"Rendering pages..."
    pages = settings.PAGES_TO_RENDER
    for page in pages:
        print u"Rendering %s..." % page
        rendered = render_to_string(page,context)
        page_path = os.path.join(deploy_dir,page)
        fout = open(page_path,'w')
        fout.write(rendered)
        fout.close()

    # removing temp directory
    print u"Removing temp directory..."
    shutil.rmtree(tmp_dir,ignore_errors=True)

    # completed build script
    print u"Done running build.py."

Example 19

Project: shinken
Source File: cli.py
View license
def install_package(pname, raw, update_only=False):
    if update_only:
        logger.debug('UPDATE ONLY ENABLED')
    logger.debug("Installing the package %s (size:%d)", pname, len(raw))
    if len(raw) == 0:
        logger.error('The package %s cannot be found', pname)
        sys.exit(2)
        return
    tmpdir = os.path.join(tempfile.gettempdir(), pname)
    logger.debug("Unpacking the package into %s", tmpdir)

    if os.path.exists(tmpdir):
        logger.debug("Removing previous tmp dir %s", tmpdir)
        shutil.rmtree(tmpdir)
    logger.debug("Creating temporary dir %s", tmpdir)
    os.mkdir(tmpdir)

    package_content = []

    # open a file with the content
    f = StringIO(raw)
    tar_file = tarfile.open(fileobj=f, mode="r")
    logger.debug("Tar file contents:")
    for i in tar_file.getmembers():
        path = i.name
        if path == '.':
            continue
        if path.startswith('/') or '..' in path:
            logger.error("SECURITY: the path %s seems dangerous!", path)
            sys.exit(2)
            return
        # Adding all files into the package_content list
        package_content.append( {'name':i.name, 'mode':i.mode, 'type':i.type, 'size':i.size} )
        logger.debug("\t%s", path)
    # Extract all in the tmpdir
    tar_file.extractall(tmpdir)
    tar_file.close()

    # Now we look at the package.json that will give us our name and co
    package_json_p = os.path.join(tmpdir, 'package.json')
    if not os.path.exists(package_json_p):
        logger.error("Error : bad archive : Missing file %s", package_json_p)
        sys.exit(2)
        return None
    package_json = read_package_json(open(package_json_p))
    logger.debug("Package.json content %s ", package_json)

    modules_dir = CONFIG['paths']['modules']
    share_dir   = CONFIG['paths']['share']
    packs_dir   = CONFIG['paths']['packs']
    etc_dir     = CONFIG['paths']['etc']
    doc_dir     = CONFIG['paths']['doc']
    inventory_dir     = CONFIG['paths']['inventory']
    libexec_dir     = CONFIG['paths'].get('libexec', os.path.join(CONFIG['paths']['lib'], 'libexec'))
    test_dir   = CONFIG['paths'].get('test', '/__DONOTEXISTS__')
    for d in (modules_dir, share_dir, packs_dir, doc_dir, inventory_dir):
        if not os.path.exists(d):
            logger.error("The installation directory %s is missing!", d)
            sys.exit(2)
            return

    # Now install the package from $TMP$/share/* to $SHARE$/*
    p_share  = os.path.join(tmpdir, 'share')
    logger.debug("TMPDIR:%s aahre_dir:%s pname:%s", tmpdir, share_dir, pname)
    if os.path.exists(p_share):
        logger.info("Installing the share package data")
        # shutil will do the create dir
        _copytree(p_share, share_dir)
        logger.info("Copy done in the share directory %s", share_dir)


    logger.debug("TMPDIR:%s modules_dir:%s pname:%s", tmpdir, modules_dir, pname)
    # Now install the package from $TMP$/module/* to $MODULES$/pname/*
    p_module = os.path.join(tmpdir, 'module')
    if os.path.exists(p_module):
        logger.info("Installing the module package data")
        mod_dest = os.path.join(modules_dir, pname)
        if os.path.exists(mod_dest):
            logger.info("Removing previous module install at %s", mod_dest)

            shutil.rmtree(mod_dest)
        # shutil will do the create dir
        shutil.copytree(p_module, mod_dest)
        logger.info("Copy done in the module directory %s", mod_dest)


    p_doc  = os.path.join(tmpdir, 'doc')
    logger.debug("TMPDIR:%s doc_dir:%s pname:%s", tmpdir, doc_dir, pname)
    # Now install the package from $TMP$/doc/* to $MODULES$/doc/source/89_packages/pname/*
    if os.path.exists(p_doc):
        logger.info("Installing the doc package data")
        doc_dest = os.path.join(doc_dir, 'source', '89_packages', pname)
        if os.path.exists(doc_dest):
            logger.info("Removing previous doc install at %s", doc_dest)

            shutil.rmtree(doc_dest)
        # shutil will do the create dir
        shutil.copytree(p_doc, doc_dest)
        logger.info("Copy done in the doc directory %s", doc_dest)

        
    if not update_only:
        # Now install the pack from $TMP$/pack/* to $PACKS$/pname/*
        p_pack = os.path.join(tmpdir, 'pack')
        if os.path.exists(p_pack):
            logger.info("Installing the pack package data")
            pack_dest = os.path.join(packs_dir, pname)
            if os.path.exists(pack_dest):
                logger.info("Removing previous pack install at %s", pack_dest)
                shutil.rmtree(pack_dest)
            # shutil will do the create dir
            shutil.copytree(p_pack, pack_dest)
            logger.info("Copy done in the pack directory %s", pack_dest)

        # Now install the etc from $TMP$/etc/* to $ETC$/etc/*
        p_etc = os.path.join(tmpdir, 'etc')
        if os.path.exists(p_etc):
            logger.info("Merging the etc package data into your etc directory")
            # We don't use shutils because it NEED etc_dir to be non existant...
            # Come one guys..... cp is not as terrible as this...
            _copytree(p_etc, etc_dir)
            logger.info("Copy done in the etc directory %s", etc_dir)

    # Now install the tests from $TMP$/tests/* to $TESTS$/tests/*
    # if the last one is specified on the configuration file (optionnal)
    p_tests = os.path.join(tmpdir, 'test')
    if os.path.exists(p_tests) and os.path.exists(test_dir):
        logger.info("Merging the test package data into your test directory")
        # We don't use shutils because it NEED etc_dir to be non existant...
        # Come one guys..... cp is not as terrible as this...
        logger.debug("COPYING %s into %s", p_tests, test_dir)
        _copytree(p_tests, test_dir)
        logger.info("Copy done in the test directory %s", test_dir)

    # Now install the libexec things from $TMP$/libexec/* to $LIBEXEC$/*
    # but also chmod a+x the plugins copied
    p_libexec = os.path.join(tmpdir, 'libexec')
    if os.path.exists(p_libexec) and os.path.exists(libexec_dir):
        logger.info("Merging the libexec package data into your libexec directory")
        logger.debug("COPYING %s into %s", p_libexec, libexec_dir)
        # Before be sure all files in there are +x
        _chmodplusx(p_libexec)
        _copytree(p_libexec, libexec_dir)
        logger.info("Copy done in the libexec directory %s", libexec_dir)


    # then samve the package.json into the inventory dir
    p_inv = os.path.join(inventory_dir, pname)
    if not os.path.exists(p_inv):
        os.mkdir(p_inv)
    shutil.copy2(package_json_p, os.path.join(p_inv, 'package.json'))
    # and the package content
    cont = open(os.path.join(p_inv, 'content.json'), 'w')
    cont.write(json.dumps(package_content))
    cont.close()
    
    # We now clean (rm) the tmpdir we don't need any more
    try:
        shutil.rmtree(tmpdir, ignore_errors=True)
        # cannot remove? not a crime
    except OSError:
        pass

    # THE END, output all is OK :D
    cprint('OK ', 'green', end='')
    cprint('%s' % pname)

Example 20

Project: shinken
Source File: cli.py
View license
def install_package(pname, raw, update_only=False):
    if update_only:
        logger.debug('UPDATE ONLY ENABLED')
    logger.debug("Installing the package %s (size:%d)", pname, len(raw))
    if len(raw) == 0:
        logger.error('The package %s cannot be found', pname)
        sys.exit(2)
        return
    tmpdir = os.path.join(tempfile.gettempdir(), pname)
    logger.debug("Unpacking the package into %s", tmpdir)

    if os.path.exists(tmpdir):
        logger.debug("Removing previous tmp dir %s", tmpdir)
        shutil.rmtree(tmpdir)
    logger.debug("Creating temporary dir %s", tmpdir)
    os.mkdir(tmpdir)

    package_content = []

    # open a file with the content
    f = StringIO(raw)
    tar_file = tarfile.open(fileobj=f, mode="r")
    logger.debug("Tar file contents:")
    for i in tar_file.getmembers():
        path = i.name
        if path == '.':
            continue
        if path.startswith('/') or '..' in path:
            logger.error("SECURITY: the path %s seems dangerous!", path)
            sys.exit(2)
            return
        # Adding all files into the package_content list
        package_content.append( {'name':i.name, 'mode':i.mode, 'type':i.type, 'size':i.size} )
        logger.debug("\t%s", path)
    # Extract all in the tmpdir
    tar_file.extractall(tmpdir)
    tar_file.close()

    # Now we look at the package.json that will give us our name and co
    package_json_p = os.path.join(tmpdir, 'package.json')
    if not os.path.exists(package_json_p):
        logger.error("Error : bad archive : Missing file %s", package_json_p)
        sys.exit(2)
        return None
    package_json = read_package_json(open(package_json_p))
    logger.debug("Package.json content %s ", package_json)

    modules_dir = CONFIG['paths']['modules']
    share_dir   = CONFIG['paths']['share']
    packs_dir   = CONFIG['paths']['packs']
    etc_dir     = CONFIG['paths']['etc']
    doc_dir     = CONFIG['paths']['doc']
    inventory_dir     = CONFIG['paths']['inventory']
    libexec_dir     = CONFIG['paths'].get('libexec', os.path.join(CONFIG['paths']['lib'], 'libexec'))
    test_dir   = CONFIG['paths'].get('test', '/__DONOTEXISTS__')
    for d in (modules_dir, share_dir, packs_dir, doc_dir, inventory_dir):
        if not os.path.exists(d):
            logger.error("The installation directory %s is missing!", d)
            sys.exit(2)
            return

    # Now install the package from $TMP$/share/* to $SHARE$/*
    p_share  = os.path.join(tmpdir, 'share')
    logger.debug("TMPDIR:%s aahre_dir:%s pname:%s", tmpdir, share_dir, pname)
    if os.path.exists(p_share):
        logger.info("Installing the share package data")
        # shutil will do the create dir
        _copytree(p_share, share_dir)
        logger.info("Copy done in the share directory %s", share_dir)


    logger.debug("TMPDIR:%s modules_dir:%s pname:%s", tmpdir, modules_dir, pname)
    # Now install the package from $TMP$/module/* to $MODULES$/pname/*
    p_module = os.path.join(tmpdir, 'module')
    if os.path.exists(p_module):
        logger.info("Installing the module package data")
        mod_dest = os.path.join(modules_dir, pname)
        if os.path.exists(mod_dest):
            logger.info("Removing previous module install at %s", mod_dest)

            shutil.rmtree(mod_dest)
        # shutil will do the create dir
        shutil.copytree(p_module, mod_dest)
        logger.info("Copy done in the module directory %s", mod_dest)


    p_doc  = os.path.join(tmpdir, 'doc')
    logger.debug("TMPDIR:%s doc_dir:%s pname:%s", tmpdir, doc_dir, pname)
    # Now install the package from $TMP$/doc/* to $MODULES$/doc/source/89_packages/pname/*
    if os.path.exists(p_doc):
        logger.info("Installing the doc package data")
        doc_dest = os.path.join(doc_dir, 'source', '89_packages', pname)
        if os.path.exists(doc_dest):
            logger.info("Removing previous doc install at %s", doc_dest)

            shutil.rmtree(doc_dest)
        # shutil will do the create dir
        shutil.copytree(p_doc, doc_dest)
        logger.info("Copy done in the doc directory %s", doc_dest)

        
    if not update_only:
        # Now install the pack from $TMP$/pack/* to $PACKS$/pname/*
        p_pack = os.path.join(tmpdir, 'pack')
        if os.path.exists(p_pack):
            logger.info("Installing the pack package data")
            pack_dest = os.path.join(packs_dir, pname)
            if os.path.exists(pack_dest):
                logger.info("Removing previous pack install at %s", pack_dest)
                shutil.rmtree(pack_dest)
            # shutil will do the create dir
            shutil.copytree(p_pack, pack_dest)
            logger.info("Copy done in the pack directory %s", pack_dest)

        # Now install the etc from $TMP$/etc/* to $ETC$/etc/*
        p_etc = os.path.join(tmpdir, 'etc')
        if os.path.exists(p_etc):
            logger.info("Merging the etc package data into your etc directory")
            # We don't use shutils because it NEED etc_dir to be non existant...
            # Come one guys..... cp is not as terrible as this...
            _copytree(p_etc, etc_dir)
            logger.info("Copy done in the etc directory %s", etc_dir)

    # Now install the tests from $TMP$/tests/* to $TESTS$/tests/*
    # if the last one is specified on the configuration file (optionnal)
    p_tests = os.path.join(tmpdir, 'test')
    if os.path.exists(p_tests) and os.path.exists(test_dir):
        logger.info("Merging the test package data into your test directory")
        # We don't use shutils because it NEED etc_dir to be non existant...
        # Come one guys..... cp is not as terrible as this...
        logger.debug("COPYING %s into %s", p_tests, test_dir)
        _copytree(p_tests, test_dir)
        logger.info("Copy done in the test directory %s", test_dir)

    # Now install the libexec things from $TMP$/libexec/* to $LIBEXEC$/*
    # but also chmod a+x the plugins copied
    p_libexec = os.path.join(tmpdir, 'libexec')
    if os.path.exists(p_libexec) and os.path.exists(libexec_dir):
        logger.info("Merging the libexec package data into your libexec directory")
        logger.debug("COPYING %s into %s", p_libexec, libexec_dir)
        # Before be sure all files in there are +x
        _chmodplusx(p_libexec)
        _copytree(p_libexec, libexec_dir)
        logger.info("Copy done in the libexec directory %s", libexec_dir)


    # then samve the package.json into the inventory dir
    p_inv = os.path.join(inventory_dir, pname)
    if not os.path.exists(p_inv):
        os.mkdir(p_inv)
    shutil.copy2(package_json_p, os.path.join(p_inv, 'package.json'))
    # and the package content
    cont = open(os.path.join(p_inv, 'content.json'), 'w')
    cont.write(json.dumps(package_content))
    cont.close()
    
    # We now clean (rm) the tmpdir we don't need any more
    try:
        shutil.rmtree(tmpdir, ignore_errors=True)
        # cannot remove? not a crime
    except OSError:
        pass

    # THE END, output all is OK :D
    cprint('OK ', 'green', end='')
    cprint('%s' % pname)

Example 21

Project: nupic
Source File: opf_checkpoint_test.py
View license
  def _testSamePredictions(self, experiment, predSteps, checkpointAt,
                           predictionsFilename, additionalFields=None,
                           newSerialization=False):
    """ Test that we get the same predictions out from the following two
    scenarios:

    a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
    a, followed by b: Run the network for 'a' iterations, save it, load it
                      back in, then run for 'b' iterations.

    Parameters:
    -----------------------------------------------------------------------
    experiment:   base directory of the experiment. This directory should
                    contain the following:
                        base.py
                        a_plus_b/description.py
                        a/description.py
                        b/description.py
                    The sub-directory description files should import the
                    base.py and only change the first and last record used
                    from the data file.
    predSteps:   Number of steps ahead predictions are for
    checkpointAt: Number of iterations that 'a' runs for.
                 IMPORTANT: This must match the number of records that
                 a/description.py runs for - it is NOT dynamically stuffed into
                 the a/description.py.
    predictionsFilename: The name of the predictions file that the OPF
                  generates for this experiment (for example
                  'DefaulTask.NontemporalMultiStep.predictionLog.csv')
    newSerialization: Whether to use new capnproto serialization.
    """

    # Get the 3 sub-experiment directories
    aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
    aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
    bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")

    # Run a+b
    args = self._createExperimentArgs(aPlusBExpDir,
                                      newSerialization=newSerialization)
    _aPlusBExp = runExperiment(args)

    # Run a, the copy the saved checkpoint into the b directory
    args = self._createExperimentArgs(aExpDir,
                                      newSerialization=newSerialization)
    _aExp = runExperiment(args)
    if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
      shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
    shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
                    dst=os.path.join(bExpDir, 'savedmodels'))

    args = self._createExperimentArgs(bExpDir,
                                      newSerialization=newSerialization,
                                      additionalArgs=['--load=DefaultTask'])
    _bExp = runExperiment(args)

    # Now, compare the predictions at the end of a+b to those in b.
    aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference',
                                   predictionsFilename))
    bPred = FileRecordStream(os.path.join(bExpDir, 'inference',
                                   predictionsFilename))

    colNames = [x[0] for x in aPlusBPred.getFields()]
    actValueColIdx = colNames.index('multiStepPredictions.actual')
    predValueColIdx = colNames.index('multiStepPredictions.%d' % (predSteps))

    # Skip past the 'a' records in aPlusB
    for i in range(checkpointAt):
      aPlusBPred.next()

    # Now, read through the records that don't have predictions yet
    for i in range(predSteps):
      aPlusBPred.next()
      bPred.next()

    # Now, compare predictions in the two files
    rowIdx = checkpointAt + predSteps + 4 - 1
    epsilon = 0.0001
    while True:
      rowIdx += 1
      try:
        rowAPB = aPlusBPred.next()
        rowB = bPred.next()

        # Compare actuals
        self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx],
              "Mismatch in actual values: row %d of a+b has %s and row %d of "
              "b has %s" % (rowIdx, rowAPB[actValueColIdx], rowIdx-checkpointAt,
                            rowB[actValueColIdx]))

        # Compare predictions, within nearest epsilon
        predAPB = eval(rowAPB[predValueColIdx])
        predB = eval(rowB[predValueColIdx])

        # Sort with highest probabilities first
        predAPB = [(a, b) for b, a in predAPB.items()]
        predB = [(a, b) for b, a in predB.items()]
        predAPB.sort(reverse=True)
        predB.sort(reverse=True)

        if additionalFields is not None:
          for additionalField in additionalFields:
            fieldIdx = colNames.index(additionalField)
            self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
              "Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
              " and row %d of b has value: %s" % \
              (additionalField, rowIdx, rowAPB[fieldIdx],
                rowIdx-checkpointAt, rowB[fieldIdx]))

        self.assertEqual(len(predAPB), len(predB),
              "Mismatch in predicted values: row %d of a+b has %d predictions: "
              "\n  (%s) and row %d of b has %d predictions:\n  (%s)" % \
              (rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
               predB))

        for i in range(len(predAPB)):
          (aProb, aValue) = predAPB[i]
          (bProb, bValue) = predB[i]
          self.assertLess(abs(aValue-bValue), epsilon,
              "Mismatch in predicted values: row %d of a+b predicts value %s "
              "and row %d of b predicts %s" % (rowIdx, aValue,
                                               rowIdx-checkpointAt, bValue))
          self.assertLess(abs(aProb-bProb), epsilon,
              "Mismatch in probabilities: row %d of a+b predicts %s with "
              "probability %s and row %d of b predicts %s with probability %s" \
               % (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))

      except StopIteration:
        break

    # clean up model checkpoint directories
    shutil.rmtree(getCheckpointParentDir(aExpDir))
    shutil.rmtree(getCheckpointParentDir(bExpDir))
    shutil.rmtree(getCheckpointParentDir(aPlusBExpDir))

    print "Predictions match!"

Example 22

Project: stdeb
Source File: cli_runner.py
View license
def runit(cmd,usage):
    if cmd not in ['sdist_dsc','bdist_deb']:
        raise ValueError('unknown command %r'%cmd)
    # process command-line options
    bool_opts = map(translate_longopt, stdeb_cmd_bool_opts)
    parser = FancyGetopt(stdeb_cmdline_opts+[
        ('help', 'h', "show detailed help message"),
        ])
    optobj = OptObj()
    args = parser.getopt(object=optobj)
    for option in optobj.__dict__:
        value = getattr(optobj,option)
        is_string = type(value) == str
        if option in bool_opts and is_string:
            setattr(optobj, option, strtobool(value))

    if hasattr(optobj,'help'):
        print(usage)
        parser.set_option_table(stdeb_cmdline_opts)
        parser.print_help("Options:")
        return 0

    if len(args)!=1:
        log.error('not given single argument (distfile), args=%r', args)
        print(usage)
        return 1

    sdist_file = args[0]

    final_dist_dir = optobj.__dict__.get('dist_dir','deb_dist')
    tmp_dist_dir = os.path.join(final_dist_dir,'tmp_py2dsc')
    if os.path.exists(tmp_dist_dir):
        shutil.rmtree(tmp_dist_dir)
    os.makedirs(tmp_dist_dir)

    if not os.path.isfile(sdist_file):
        log.error("Package %s not found."%sdist_file)
        sys.exit(1)

    patch_file = optobj.__dict__.get('patch_file',None)
    patch_level = int(optobj.__dict__.get('patch_level',0))
    patch_posix = int(optobj.__dict__.get('patch_posix',0))

    expand_dir = os.path.join(tmp_dist_dir,'stdeb_tmp')
    if os.path.exists(expand_dir):
        shutil.rmtree(expand_dir)
    if not os.path.exists(tmp_dist_dir):
        os.mkdir(tmp_dist_dir)
    os.mkdir(expand_dir)

    expand_sdist_file(os.path.abspath(sdist_file),cwd=expand_dir)



    # now the sdist package is expanded in expand_dir
    expanded_root_files = os.listdir(expand_dir)
    assert len(expanded_root_files)==1
    repackaged_dirname = expanded_root_files[0]
    fullpath_repackaged_dirname = os.path.join(tmp_dist_dir,repackaged_dirname)
    base_dir = os.path.join(expand_dir,expanded_root_files[0])
    if os.path.exists(fullpath_repackaged_dirname):
        # prevent weird build errors if this dir exists
        shutil.rmtree(fullpath_repackaged_dirname)
    os.renames(base_dir, fullpath_repackaged_dirname)
    del base_dir # no longer useful

    ##############################################
    if patch_file is not None:
        log.info('py2dsc applying patch %s', patch_file)
        apply_patch(patch_file,
                    posix=patch_posix,
                    level=patch_level,
                    cwd=fullpath_repackaged_dirname)
        patch_already_applied = 1
    else:
        patch_already_applied = 0
    ##############################################


    abs_dist_dir = os.path.abspath(final_dist_dir)

    extra_args = []
    for long in parser.long_opts:
        if long in ['dist-dir=','patch-file=']:
            continue # dealt with by this invocation
        attr = parser.get_attr_name(long).rstrip('=')
        if hasattr(optobj,attr):
            val = getattr(optobj,attr)
            if attr=='extra_cfg_file':
                val = os.path.abspath(val)
            if long in bool_opts or long.replace('-', '_') in bool_opts:
                extra_args.append('--%s' % long)
            else:
                extra_args.append('--'+long+str(val))

    if patch_already_applied == 1:
        extra_args.append('--patch-already-applied')

    if cmd=='bdist_deb':
        extra_args.append('bdist_deb')

    args = [sys.executable,'setup.py','--command-packages','stdeb.command',
            'sdist_dsc','--dist-dir=%s'%abs_dist_dir,
            '--use-premade-distfile=%s'%os.path.abspath(sdist_file)]+extra_args

    log.info('-='*35 + '-')
#    print >> sys.stderr, '-='*20
#    print >> sys.stderr, "Note that the .cfg file(s), if present, have not "\
#          "been read at this stage. If options are necessary, pass them from "\
#          "the command line"
    log.info("running the following command in directory: %s\n%s",
             fullpath_repackaged_dirname, ' '.join(args))
    log.info('-='*35 + '-')

    try:
        returncode = subprocess.call(
            args,cwd=fullpath_repackaged_dirname,
            )
    except:
        log.error('ERROR running: %s', ' '.join(args))
        log.error('ERROR in %s', fullpath_repackaged_dirname)
        raise

    if returncode:
        log.error('ERROR running: %s', ' '.join(args))
        log.error('ERROR in %s', fullpath_repackaged_dirname)
        #log.error('   stderr: %s'res.stderr.read())
        #print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),)
        #print >> sys.stderr, res.stderr.read()
        return returncode
        #raise RuntimeError('returncode %d'%returncode)
    #result = res.stdout.read().strip()

    shutil.rmtree(tmp_dist_dir)
    return returncode

Example 23

Project: PHEnix
Source File: vcf2fasta.py
View license
def main(args):
    """
    Process VCF files and merge them into a single fasta file.
    """

    contigs = list()

    empty_tree = FastRBTree()

    exclude = {}
    include = {}

    if args["tmp"]:
        out_dir = os.path.join(args["tmp"])
        if not os.path.exists(out_dir):
            os.mkdir(out_dir)
    else:
        out_dir = tempfile.gettempdir()

    if args["reference"]:
        ref_seq = OrderedDict()
        with open(args["reference"]) as fp:
            for record in SeqIO.parse(fp, "fasta"):
                ref_seq[record.id] = list(record.seq)

        args["reference"] = ref_seq

    if args["exclude"] or args["include"]:
        pos = {}
        chr_pos = []
        bed_file = args["include"] if args["include"] is not None else args["exclude"]

        with open(bed_file) as fp:
            for line in fp:
                data = line.strip().split("\t")

                chr_pos += [ (i, False,) for i in xrange(int(data[1]), int(data[2]) + 1)]

                if data[0] not in pos:
                    pos[data[0]] = []

                pos[data[0]] += chr_pos

        pos = {chrom: FastRBTree(l) for chrom, l in pos.items()}

        if args["include"]:
            include = pos
        else:
            exclude = pos


    if args["directory"] is not None and args["input"] is None:
        regexp = args["regexp"] if args["regexp"] else "*.vcf"
        args["input"] = glob.glob(os.path.join(args["directory"], regexp))

    if not args["input"]:
        logging.warn("No VCFs found.")
        return 0


    # If we can stats and asked to stats, then output the data
    if args["with_stats"] is not None:
        args["with_stats"] = open(args["with_stats"], "wb")
        args["with_stats"].write("contig,position,mutations,n_frac,n_gaps\n")


    parallel_reader = ParallelVCFReader(args["input"])

    sample_seqs = { sample_name: tempfile.NamedTemporaryFile(prefix=sample_name, dir=out_dir) for sample_name in parallel_reader.get_samples() }
    sample_seqs["reference"] = tempfile.NamedTemporaryFile(prefix="reference", dir=out_dir)

    samples = parallel_reader.get_samples() + ["reference"]
    sample_stats = {sample: BaseStats() for sample in samples }
    last_base = 0

    total_records = 0
    guesstimate_records = guess_total_records(args["input"])

    for chrom, pos, records in parallel_reader:
        total_records += 1

        log_progress(total_records, guesstimate_records)

        final_records = pick_best_records(records)
        reference = [ record.REF for record in final_records.itervalues() if record.REF != "N"]
        valid = not reference or reference.count(reference[0]) == len(reference)

        # Make sure reference is the same across all samples.
        assert valid, "Position %s is not valid as multiple references found: %s" % (pos, reference)

        if not reference:
            continue
        else:
            reference = reference[0]

        # SKIP (or include) any pre-specified regions.
        if include and pos not in include.get(chrom, empty_tree) or exclude and pos in exclude.get(chrom, empty_tree):
            continue

        position_data = {"reference": str(reference), "stats": BaseStats()}

        for sample_name, record in final_records.iteritems():

            position_data["stats"].total += 1

            # IF this is uncallable genotype, add gap "-"
            if record.is_uncallable:
                # TODO: Mentioned in issue: #7(gitlab)
                position_data[sample_name] = "-"

                # Update stats
                position_data["stats"].gap += 1


            elif not record.FILTER:
                # If filter PASSED!
                # Make sure the reference base is the same. Maybe a vcf from different species snuck in here?!
                assert str(record.REF) == position_data["reference"] or str(record.REF) == 'N' or position_data["reference"] == 'N', "SOMETHING IS REALLY WRONG because reference for the same position is DIFFERENT! %s in %s (%s, %s)" % (record.POS, sample_name, str(record.REF), position_data["reference"])
                # update position_data['reference'] to a real base if possible
                if position_data['reference'] == 'N' and str(record.REF) != 'N':
                    position_data['reference'] = str(record.REF)
                if record.is_snp:
                    if len(record.ALT) > 1:
                        logging.info("POS %s passed filters but has multiple alleles REF: %s, ALT: %s. Inserting N", record.POS, str(record.REF), str(record.ALT))
                        position_data[sample_name] = "N"
                        position_data["stats"].N += 1

                    else:
                        position_data[sample_name] = str(record.ALT[0])

                        position_data["stats"].mut += 1

            # Filter(s) failed
            elif record.is_snp and is_above_min_depth(record):
                if args["with_mixtures"]:
                    extended_code = get_mixture(record, args["with_mixtures"])
                else:
                    extended_code = "N"

                if extended_code == "N":
                    position_data["stats"].N += 1
                elif extended_code in ["A", "C", "G", "T"]:
                    position_data["stats"].mut += 1
                else:
                    position_data["stats"].mix += 1

                position_data[sample_name] = extended_code

            else:
                # filter fail; code as N for consistency
                position_data[sample_name] = "N"
                position_data["stats"].N += 1

            # Filter columns when threashold reaches user specified value.
            if isinstance(args["column_Ns"], float) and float(position_data["stats"].N) / len(args["input"]) > args["column_Ns"]:
                break
#                 del position_data[sample_name]

            if isinstance(args["column_gaps"], float) and float(position_data["stats"].gap) / len(args["input"]) > args["column_gaps"]:
                break
#                 del position_data[sample_name]

        # this is not an if-else it's a for-else, it really is!
        else:
            if args["reference"]:
                seq = _make_ref_insert(last_base, pos, args["reference"][chrom], exclude.get(chrom, empty_tree))
                for sample in samples:
#                     sample_seqs[sample] += seq
                    sample_seqs[sample].write(''.join(seq))

            for i, sample_name in enumerate(samples):
                sample_base = position_data.get(sample_name, reference)

#                 sample_seqs[sample_name] += [sample_base]
                sample_seqs[sample_name].write(sample_base)
                sample_stats[sample_name].update(position_data, sample_name, reference)

            if args["with_stats"] is not None:
                args["with_stats"].write("%s,%i,%0.5f,%0.5f,%0.5f\n" % (chrom,
                                             pos,
                                             float(position_data["stats"].mut) / len(args["input"]),
                                             float(position_data["stats"].N) / len(args["input"]),
                                             float(position_data["stats"].gap) / len(args["input"]))
                         )

            last_base = pos

    # Fill from last snp to the end of reference.
    # FIXME: A little naughty to use chrom outside the loop!
    if args["reference"]:
        seq = _make_ref_insert(last_base, None, args["reference"][chrom], exclude.get(chrom, empty_tree))
        for sample in samples:
#             sample_seqs[sample] += seq
            sample_seqs[sample].write(''.join(seq))

    sample_seqs["reference"].seek(0)
    reference = sample_seqs["reference"].next()
    sample_seqs["reference"].close()
    del sample_seqs["reference"]

    bSamplesExcluded = False

    # Exclude any samples with high Ns or gaps
    if isinstance(args["sample_Ns"], float):
        for sample_name in samples:
            if sample_name == "reference":
                continue
            n_fraction = float(sample_stats[sample_name].N) / sample_stats[sample_name].total
            if n_fraction > args["sample_Ns"]:
                logging.info("Removing %s due to high sample Ns fraction %s", sample_name, n_fraction)

                sample_seqs[sample_name].close()
                del sample_seqs[sample_name]
                del sample_stats[sample_name]
                bSamplesExcluded = True

    # Exclude any samples with high gap fraction.
    if isinstance(args["sample_gaps"], float):
        for sample_name in samples:
            if sample_name == "reference" or sample_name not in sample_stats:
                continue

            gap_fractoin = float(sample_stats[sample_name].gap) / sample_stats[sample_name].total
            if gap_fractoin > args["sample_gaps"]:
                logging.info("Removing %s due to high sample gaps fraction %s", sample_name, gap_fractoin)

                sample_seqs[sample_name].close()
                del sample_seqs[sample_name]
                del sample_stats[sample_name]
                bSamplesExcluded = True

    try:
        assert len(sample_seqs) > 0, "All samples have been filtered out."

        reference_length = len(reference)

        dAlign = {}
        dAlign['reference'] = reference
        for sample_name, tmp_iter in sample_seqs.iteritems():
            tmp_iter.seek(0)
            # These are dumped as single long string of data. Calling next() should read it all.
            snp_sequence = tmp_iter.next()
            assert len(snp_sequence) == reference_length, "Sample %s has length %s, but should be %s (reference)" % (sample_name, len(snp_sequence), reference_length)

            dAlign[sample_name] = snp_sequence

        # if samples were excluded we need to filter the alignment for all equal positions,
        # because we might just have removed the sequence with the difference
        while bSamplesExcluded:
            dFinalAlign = {} #  this is for the new alignment
            # initialise thoes as empty
            for sample_name in dAlign.keys():
                dFinalAlign[sample_name] = ''
                sample_stats[sample_name] = BaseStats()
            # for all positions in the current alignment
            for i in range(len(dAlign['reference'])):
                # initialise empty stats for this position
                pos_stats = BaseStats()
                # get list of all nucs at this position
                ith_nucs = [seq[i] for seq in dAlign.values()]
                # check if all elements in the list are the same
                if ith_nucs.count(ith_nucs[0]) != len(ith_nucs):
                    # they are not all the same
                    # for all samples and seqs update position stats
                    for sample_name, seq in dAlign.iteritems():
                        if seq[i] == 'N':
                            pos_stats.N +=1
                        elif seq[i] == '-':
                            pos_stats.gap +=1
                        elif seq[i] != dAlign['reference'][i]:
                            pos_stats.mut +=1
                        else:
                            pass
                        pos_stats.total += 1

                    # check if we need to remove this column
                    bRmCol = False
                    if isinstance(args["column_gaps"], float):
                        gap_fractoin = float(pos_stats.gap) / pos_stats.total
                        if gap_fractoin > args["column_gaps"]:
                            bRmCol = True
                    if isinstance(args["column_Ns"], float):
                        n_fraction = float(pos_stats.N) / pos_stats.total
                        if n_fraction > args["column_Ns"]:
                            bRmCol = True

                    # remove col if necessary
                    if bRmCol == False:
                        # we don't remove it
                        for sample_name, seq in dAlign.iteritems():
                            dFinalAlign[sample_name] += seq[i]
                            # only update sample stats now that we have decided to keep the column
                            sample_stats[sample_name].total += 1
                            if seq[i] == 'N':
                                sample_stats[sample_name].N += 1
                            elif seq[i] == '-':
                                sample_stats[sample_name].gap += 1
                            elif seq[i] != dAlign['reference'][i]:
                                sample_stats[sample_name].mut += 1
                            else:
                                pass
                    else:
                        # we are removing it
                        logging.info("Removing column %i due to high Ns or gaps fraction, gaps: %s, Ns: %s", i, gap_fractoin, n_fraction)
                else:
                    # all positions they're all the same
                    pass

            # check all seqs are of the same lengths still
            seq_lens = [len(seq) for seq in dFinalAlign.values()]
            assert seq_lens.count(seq_lens[0]) == len(seq_lens), "ERROR: Not all samples in final alignment are equally long!"

            # check if additional samples need to be removed
            bSamplesExcluded = False
            for sample_name in dFinalAlign.keys():
                n_fraction = float(sample_stats[sample_name].N) / seq_lens[0]
                if n_fraction > args["sample_Ns"]:
                    logging.info("Removing %s due to high sample Ns fraction %s", sample_name, n_fraction)
                    bSamplesExcluded = True
                    del dFinalAlign[sample_name]
                    del sample_stats[sample_name]

            for sample_name in dFinalAlign.keys():
                gap_fractoin = float(sample_stats[sample_name].gap) / seq_lens[0]
                if gap_fractoin > args["sample_gaps"]:
                    logging.info("Removing %s due to high sample gaps fraction %s", sample_name, gap_fractoin)
                    bSamplesExcluded = True
                    del dFinalAlign[sample_name]
                    del sample_stats[sample_name]

            # in case we need to go again ...
            dAlign = dFinalAlign

        with open(args["out"], "w") as fp:
            # write seqs to file
            for name, seq in dAlign.iteritems():
                fp.write(">%s\n%s\n" % (name, seq))

    except AssertionError as e:
        logging.error(e.message)

        # Need to delete the malformed file.
        os.unlink(args["out"])

    finally:
        # Close all the tmp handles.
        for tmp_iter in sample_seqs.itervalues():
            tmp_iter.close()

        # Only remove tmp is it was specified.
        if args["tmp"]:
            shutil.rmtree(out_dir)

        if args["with_stats"] is not None:
            args["with_stats"].close()

    # Compute the stats.
    for sample in sample_stats:
        if sample != "reference":
            print "%s\t%s" % (sample, str(sample_stats[sample]))

#         if CAN_STATS:
#             plot_stats(avail_pos, len(samples) - 1, plots_dir=os.path.abspath(args["plots_dir"]))

    return 0

Example 24

Project: script.extendedinfo
Source File: process.py
View license
def start_info_actions(info, params):
    if "artistname" in params:
        params["artistname"] = params.get("artistname", "").split(" feat. ")[0].strip()
        params["artist_mbid"] = utils.fetch_musicbrainz_id(params["artistname"])
    utils.log(info)
    utils.pp(params)
    if "prefix" in params and not params["prefix"].endswith('.'):
        params["prefix"] = params["prefix"] + '.'

    # Audio
    if info == 'discography':
        discography = AudioDB.get_artist_discography(params["artistname"])
        if not discography:
            discography = LastFM.get_artist_albums(params.get("artist_mbid"))
        return discography
    elif info == 'mostlovedtracks':
        return AudioDB.get_most_loved_tracks(params["artistname"])
    elif info == 'trackdetails':
        return AudioDB.get_track_details(params.get("id", ""))
    elif info == 'topartists':
        return LastFM.get_top_artists()
    #  The MovieDB
    elif info == 'incinemamovies':
        return tmdb.get_movies("now_playing")
    elif info == 'upcomingmovies':
        return tmdb.get_movies("upcoming")
    elif info == 'topratedmovies':
        return tmdb.get_movies("top_rated")
    elif info == 'popularmovies':
        return tmdb.get_movies("popular")
    elif info == 'ratedmovies':
        return tmdb.get_rated_media_items("movies")
    elif info == 'starredmovies':
        return tmdb.get_fav_items("movies")
    elif info == 'accountlists':
        account_lists = tmdb.handle_lists(tmdb.get_account_lists())
        for item in account_lists:
            item.set_property("directory", True)
        return account_lists
    elif info == 'listmovies':
        return tmdb.get_movies_from_list(params["id"])
    elif info == 'airingtodaytvshows':
        return tmdb.get_tvshows("airing_today")
    elif info == 'onairtvshows':
        return tmdb.get_tvshows("on_the_air")
    elif info == 'topratedtvshows':
        return tmdb.get_tvshows("top_rated")
    elif info == 'populartvshows':
        return tmdb.get_tvshows("popular")
    elif info == 'ratedtvshows':
        return tmdb.get_rated_media_items("tv")
    elif info == 'ratedepisodes':
        return tmdb.get_rated_media_items("tv/episodes")
    elif info == 'starredtvshows':
        return tmdb.get_fav_items("tv")
    elif info == 'similarmovies':
        movie_id = params.get("id")
        if not movie_id:
            movie_id = tmdb.get_movie_tmdb_id(imdb_id=params.get("imdb_id"),
                                              dbid=params.get("dbid"))
        if movie_id:
            return tmdb.get_similar_movies(movie_id)
    elif info == 'similartvshows':
        tvshow_id = None
        dbid = params.get("dbid")
        name = params.get("name")
        tmdb_id = params.get("tmdb_id")
        tvdb_id = params.get("tvdb_id")
        imdb_id = params.get("imdb_id")
        if tmdb_id:
            tvshow_id = tmdb_id
        elif dbid and int(dbid) > 0:
            tvdb_id = local_db.get_imdb_id("tvshow", dbid)
            if tvdb_id:
                tvshow_id = tmdb.get_show_tmdb_id(tvdb_id)
        elif tvdb_id:
            tvshow_id = tmdb.get_show_tmdb_id(tvdb_id)
        elif imdb_id:
            tvshow_id = tmdb.get_show_tmdb_id(imdb_id, "imdb_id")
        elif name:
            tvshow_id = tmdb.search_media(media_name=name,
                                          year="",
                                          media_type="tv")
        if tvshow_id:
            return tmdb.get_similar_tvshows(tvshow_id)
    elif info == 'studio':
        if params.get("id"):
            return tmdb.get_company_data(params["id"])
        elif params.get("studio"):
            company_data = tmdb.search_companies(params["studio"])
            if company_data:
                return tmdb.get_company_data(company_data[0]["id"])
    elif info == 'set':
        if params.get("dbid"):
            name = local_db.get_set_name(params["dbid"])
            if name:
                params["setid"] = tmdb.get_set_id(name)
        if params.get("setid"):
            set_data, _ = tmdb.get_set_movies(params["setid"])
            return set_data
    elif info == 'movielists':
        movie_id = params.get("id")
        if not movie_id:
            movie_id = tmdb.get_movie_tmdb_id(imdb_id=params.get("imdb_id"),
                                              dbid=params.get("dbid"))
        if movie_id:
            return tmdb.get_movie_lists(movie_id)
    elif info == 'keywords':
        movie_id = params.get("id")
        if not movie_id:
            movie_id = tmdb.get_movie_tmdb_id(imdb_id=params.get("imdb_id"),
                                              dbid=params.get("dbid"))
        if movie_id:
            return tmdb.get_keywords(movie_id)
    elif info == 'trailers':
        movie_id = params.get("id")
        if not movie_id:
            movie_id = tmdb.get_movie_tmdb_id(imdb_id=params.get("imdb_id"),
                                              dbid=params.get("dbid"))
        if movie_id:
            return tmdb.handle_videos(tmdb.get_movie_videos(movie_id))
    elif info == 'popularpeople':
        return tmdb.get_popular_actors()
    elif info == 'personmovies':
        person = tmdb.get_person_info(person_label=params.get("person"),
                                      skip_dialog=True)
        if person and person.get("id"):
            movies = tmdb.get_person_movies(person["id"])
            if not movies:
                return None
            for item in movies:
                del item["credit_id"]
            return movies.reduce(key="department")
    elif info == 'traktsimilarmovies':
        if params.get("id") or params.get("dbid"):
            if params.get("dbid"):
                movie_id = local_db.get_imdb_id("movie", params["dbid"])
            else:
                movie_id = params["id"]
            return Trakt.get_similar("movie", movie_id)
    elif info == 'traktsimilartvshows':
        if params.get("id") or params.get("dbid"):
            if params.get("dbid"):
                if params.get("type") == "episode":
                    tvshow_id = local_db.get_tvshow_id_by_episode(params["dbid"])
                else:
                    tvshow_id = local_db.get_imdb_id(media_type="tvshow",
                                                     dbid=params["dbid"])
            else:
                tvshow_id = params["id"]
            return Trakt.get_similar("show", tvshow_id)
    elif info == 'airingepisodes':
        return Trakt.get_episodes("shows")
    elif info == 'premiereepisodes':
        return Trakt.get_episodes("premieres")
    elif info == 'trendingshows':
        return Trakt.get_shows("trending")
    elif info == 'popularshows':
        return Trakt.get_shows("popular")
    elif info == 'anticipatedshows':
        return Trakt.get_shows("anticipated")
    elif info == 'mostcollectedshows':
        return Trakt.get_shows_from_time("collected")
    elif info == 'mostplayedshows':
        return Trakt.get_shows_from_time("played")
    elif info == 'mostwatchedshows':
        return Trakt.get_shows_from_time("watched")
    elif info == 'trendingmovies':
        return Trakt.get_movies("trending")
    elif info == 'traktpopularmovies':
        return Trakt.get_movies("popular")
    elif info == 'mostplayedmovies':
        return Trakt.get_movies_from_time("played")
    elif info == 'mostwatchedmovies':
        return Trakt.get_movies_from_time("watched")
    elif info == 'mostcollectedmovies':
        return Trakt.get_movies_from_time("collected")
    elif info == 'mostanticipatedmovies':
        return Trakt.get_movies("anticipated")
    elif info == 'traktboxofficemovies':
        return Trakt.get_movies("boxoffice")
    elif info == 'similarartistsinlibrary':
        return local_db.get_similar_artists(params.get("artist_mbid"))
    elif info == 'trackinfo':
        addon.clear_global('%sSummary' % params.get("prefix", ""))
        if params["artistname"] and params["trackname"]:
            track_info = LastFM.get_track_info(artist_name=params["artistname"],
                                               track=params["trackname"])
            addon.set_global('%sSummary' % params.get("prefix", ""), track_info["summary"])
    elif info == 'topartistsnearevents':
        artists = local_db.get_artists()
        import BandsInTown
        return BandsInTown.get_near_events(artists[0:49])
    elif info == 'youtubesearchvideos':
        addon.set_global('%sSearchValue' % params.get("prefix", ""), params.get("id", ""))
        if params.get("id"):
            return youtube.search(search_str=params.get("id", ""),
                                  hd=params.get("hd"),
                                  orderby=params.get("orderby", "relevance"))
    elif info == 'youtubeplaylistvideos':
        return youtube.get_playlist_videos(params.get("id", ""))
    elif info == 'youtubeusersearchvideos':
        user_name = params.get("id")
        if user_name:
            playlists = youtube.get_user_playlists(user_name)
            return youtube.get_playlist_videos(playlists["uploads"])
    elif info == 'favourites':
        if params.get("id"):
            items = favs.get_favs_by_type(params["id"])
        else:
            items = favs.get_favs()
            addon.set_global('favourite.count', str(len(items)))
            if items:
                addon.set_global('favourite.1.name', items[-1]["label"])
        return items
    elif info == 'similarlocalmovies' and "dbid" in params:
        return local_db.get_similar_movies(params["dbid"])
    elif info == 'iconpanel':
        return favs.get_icon_panel(int(params["id"])), "IconPanel" + str(params["id"])
    # ACTIONS
    if params.get("handle"):
        xbmcplugin.setResolvedUrl(handle=int(params.get("handle")),
                                  succeeded=False,
                                  listitem=xbmcgui.ListItem())
    if info in ['playmovie', 'playepisode', 'playmusicvideo', 'playalbum', 'playsong']:
        kodijson.play_media(media_type=info.replace("play", ""),
                            dbid=params.get("dbid"),
                            resume=params.get("resume", "true"))
    elif info == "openinfodialog":
        if xbmc.getCondVisibility("System.HasModalDialog"):
            container_id = ""
        else:
            container_id = "Container(%s)" % utils.get_infolabel("System.CurrentControlId")
        dbid = utils.get_infolabel("%sListItem.DBID" % container_id)
        db_type = utils.get_infolabel("%sListItem.DBType" % container_id)
        if db_type == "movie":
            params = {"dbid": dbid,
                      "id": utils.get_infolabel("%sListItem.Property(id)" % container_id),
                      "name": utils.get_infolabel("%sListItem.Title" % container_id)}
            start_info_actions("extendedinfo", params)
        elif db_type == "tvshow":
            params = {"dbid": dbid,
                      "tvdb_id": utils.get_infolabel("%sListItem.Property(tvdb_id)" % container_id),
                      "id": utils.get_infolabel("%sListItem.Property(id)" % container_id),
                      "name": utils.get_infolabel("%sListItem.Title" % container_id)}
            start_info_actions("extendedtvinfo", params)
        elif db_type == "season":
            params = {"tvshow": utils.get_infolabel("%sListItem.TVShowTitle" % container_id),
                      "season": utils.get_infolabel("%sListItem.Season" % container_id)}
            start_info_actions("seasoninfo", params)
        elif db_type == "episode":
            params = {"tvshow": utils.get_infolabel("%sListItem.TVShowTitle" % container_id),
                      "season": utils.get_infolabel("%sListItem.Season" % container_id),
                      "episode": utils.get_infolabel("%sListItem.Episode" % container_id)}
            start_info_actions("extendedepisodeinfo", params)
        elif db_type in ["actor", "director"]:
            params = {"name": utils.get_infolabel("%sListItem.Label" % container_id)}
            start_info_actions("extendedactorinfo", params)
        else:
            utils.notify("Error", "Could not find valid content type")
    elif info == "ratedialog":
        if xbmc.getCondVisibility("System.HasModalDialog"):
            container_id = ""
        else:
            container_id = "Container(%s)" % utils.get_infolabel("System.CurrentControlId")
        dbid = utils.get_infolabel("%sListItem.DBID" % container_id)
        db_type = utils.get_infolabel("%sListItem.DBType" % container_id)
        if db_type == "movie":
            params = {"dbid": dbid,
                      "id": utils.get_infolabel("%sListItem.Property(id)" % container_id),
                      "type": "movie"}
            start_info_actions("ratemedia", params)
        elif db_type == "tvshow":
            params = {"dbid": dbid,
                      "id": utils.get_infolabel("%sListItem.Property(id)" % container_id),
                      "type": "tv"}
            start_info_actions("ratemedia", params)
        if db_type == "episode":
            params = {"tvshow": utils.get_infolabel("%sListItem.TVShowTitle" % container_id),
                      "season": utils.get_infolabel("%sListItem.Season" % container_id),
                      "type": "episode"}
            start_info_actions("ratemedia", params)
    elif info == 'youtubebrowser':
        wm.open_youtube_list(search_str=params.get("id", ""))
    elif info == 'moviedbbrowser':
        if addon.get_global('infodialogs.active'):
            return None
        addon.set_global('infodialogs.active', "true")
        search_str = params.get("id", "")
        if not search_str and params.get("search"):
            result = xbmcgui.Dialog().input(heading=addon.LANG(16017),
                                            type=xbmcgui.INPUT_ALPHANUM)
            if result and result > -1:
                search_str = result
            else:
                return None
        wm.open_video_list(search_str=search_str,
                           mode="search")
        addon.clear_global('infodialogs.active')
    elif info == 'extendedinfo':
        if addon.get_global('infodialogs.active'):
            return None
        addon.set_global('infodialogs.active', "true")
        wm.open_movie_info(movie_id=params.get("id"),
                           dbid=params.get("dbid"),
                           imdb_id=params.get("imdb_id"),
                           name=params.get("name"))
        addon.clear_global('infodialogs.active')
    elif info == 'extendedactorinfo':
        if addon.get_global('infodialogs.active'):
            return None
        addon.set_global('infodialogs.active', "true")
        wm.open_actor_info(actor_id=params.get("id"),
                           name=params.get("name"))
        addon.clear_global('infodialogs.active')
    elif info == 'extendedtvinfo':
        if addon.get_global('infodialogs.active'):
            return None
        addon.set_global('infodialogs.active', "true")
        wm.open_tvshow_info(tmdb_id=params.get("id"),
                            tvdb_id=params.get("tvdb_id"),
                            dbid=params.get("dbid"),
                            imdb_id=params.get("imdb_id"),
                            name=params.get("name"))
        addon.clear_global('infodialogs.active')
    elif info == 'seasoninfo':
        if addon.get_global('infodialogs.active'):
            return None
        addon.set_global('infodialogs.active', "true")
        wm.open_season_info(tvshow=params.get("tvshow"),
                            dbid=params.get("dbid"),
                            season=params.get("season"))
        addon.clear_global('infodialogs.active')
    elif info == 'extendedepisodeinfo':
        if addon.get_global('infodialogs.active'):
            return None
        addon.set_global('infodialogs.active', "true")
        wm.open_episode_info(tvshow=params.get("tvshow"),
                             tvshow_id=params.get("tvshow_id"),
                             dbid=params.get("dbid"),
                             episode=params.get("episode"),
                             season=params.get("season"))
        addon.clear_global('infodialogs.active')
    elif info == 'albuminfo':
        if params.get("id"):
            album_details = AudioDB.get_album_details(params.get("id"))
            utils.dict_to_windowprops(album_details, params.get("prefix", ""))
    elif info == 'artistdetails':
        artist_details = AudioDB.get_artist_details(params["artistname"])
        utils.dict_to_windowprops(artist_details, params.get("prefix", ""))
    elif info == 'ratemedia':
        media_type = params.get("type")
        if not media_type:
            return None
        if params.get("id"):
            tmdb_id = params["id"]
        elif media_type == "movie":
            tmdb_id = tmdb.get_movie_tmdb_id(imdb_id=params.get("imdb_id"),
                                             dbid=params.get("dbid"),
                                             name=params.get("name"))
        elif media_type == "tv" and params.get("dbid"):
            tvdb_id = local_db.get_imdb_id(media_type="tvshow",
                                           dbid=params["dbid"])
            tmdb_id = tmdb.get_show_tmdb_id(tvdb_id=tvdb_id)
        else:
            return False
        rating = utils.input_userrating()
        if rating == -1:
            return None
        tmdb.set_rating(media_type=media_type,
                        media_id=tmdb_id,
                        rating=rating,
                        dbid=params.get("dbid"))
    elif info == 'action':
        for builtin in params.get("id", "").split("$$"):
            xbmc.executebuiltin(builtin)
    elif info == "youtubevideo":
        xbmc.executebuiltin("Dialog.Close(all,true)")
        wm.play_youtube_video(params.get("id", ""))
    elif info == 'playtrailer':
        busy.show_busy()
        if params.get("id"):
            movie_id = params["id"]
        elif int(params.get("dbid", -1)) > 0:
            movie_id = local_db.get_imdb_id(media_type="movie",
                                            dbid=params["dbid"])
        elif params.get("imdb_id"):
            movie_id = tmdb.get_movie_tmdb_id(params["imdb_id"])
        else:
            movie_id = ""
        if movie_id:
            trailers = tmdb.get_movie_videos(movie_id)
            busy.hide_busy()
            time.sleep(0.1)
            if trailers:
                wm.play_youtube_video(trailers[0]["key"])
            elif params.get("title"):
                wm.open_youtube_list(search_str=params["title"])
            else:
                busy.hide_busy()
    elif info == 'deletecache':
        addon.clear_globals()
        for rel_path in os.listdir(addon.DATA_PATH):
            path = os.path.join(addon.DATA_PATH, rel_path)
            try:
                if os.path.isdir(path):
                    shutil.rmtree(path)
            except Exception as e:
                utils.log(e)
        utils.notify("Cache deleted")
    elif info == 'syncwatchlist':
        pass

Example 25

Project: quality-assessment-protocol
Source File: cli.py
View license
def _run_workflow(args):

    # build pipeline for each subject, individually
    # ~ 5 min 20 sec per subject
    # (roughly 320 seconds)

    import os
    import os.path as op
    import sys

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    import nipype.interfaces.utility as util
    import nipype.interfaces.fsl.maths as fsl

    import glob

    import time
    from time import strftime
    from nipype import config as nyconfig

    resource_pool, config, subject_info, run_name, site_name = args
    sub_id = str(subject_info[0])

    qap_type = config['qap_type']

    if subject_info[1]:
        session_id = subject_info[1]
    else:
        session_id = "session_0"

    if subject_info[2]:
        scan_id = subject_info[2]
    else:
        scan_id = "scan_0"

    # Read and apply general settings in config
    keep_outputs = config.get('write_all_outputs', False)
    output_dir = op.join(config["output_directory"], run_name,
                         sub_id, session_id, scan_id)

    try:
        os.makedirs(output_dir)
    except:
        if not op.isdir(output_dir):
            err = "[!] Output directory unable to be created.\n" \
                  "Path: %s\n\n" % output_dir
            raise Exception(err)
        else:
            pass

    log_dir = output_dir

    # set up logging
    nyconfig.update_config(
        {'logging': {'log_directory': log_dir, 'log_to_file': True}})
    logging.update_logging(nyconfig)

    # take date+time stamp for run identification purposes
    unique_pipeline_id = strftime("%Y%m%d%H%M%S")
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")

    pipeline_start_time = time.time()

    logger.info("Pipeline start time: %s" % pipeline_start_stamp)
    logger.info("Contents of resource pool:\n" + str(resource_pool))
    logger.info("Configuration settings:\n" + str(config))

    # for QAP spreadsheet generation only
    config.update({"subject_id": sub_id, "session_id": session_id,
                   "scan_id": scan_id, "run_name": run_name})

    if site_name:
        config["site_name"] = site_name

    workflow = pe.Workflow(name=scan_id)
    workflow.base_dir = op.join(config["working_directory"], sub_id,
                                session_id)

    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}

    # update that resource pool with what's already in the output directory
    for resource in os.listdir(output_dir):
        if (op.isdir(op.join(output_dir, resource)) and
                resource not in resource_pool.keys()):
            resource_pool[resource] = glob.glob(op.join(output_dir,
                                                        resource, "*"))[0]

    # resource pool check
    invalid_paths = []

    for resource in resource_pool.keys():
        if not op.isfile(resource_pool[resource]):
            invalid_paths.append((resource, resource_pool[resource]))

    if len(invalid_paths) > 0:
        err = "\n\n[!] The paths provided in the subject list to the " \
              "following resources are not valid:\n"

        for path_tuple in invalid_paths:
            err = err + path_tuple[0] + ": " + path_tuple[1] + "\n"

        err = err + "\n\n"
        raise Exception(err)

    # start connecting the pipeline
    if 'qap_' + qap_type not in resource_pool.keys():
        from qap import qap_workflows as qw
        wf_builder = getattr(qw, 'qap_' + qap_type + '_workflow')
        workflow, resource_pool = wf_builder(workflow, resource_pool, config)

    # set up the datasinks
    new_outputs = 0

    out_list = set(['qap_' + qap_type])

    # Save reports to out_dir if necessary
    if config.get('write_report', False):
        out_list.add('qap_mosaic')
        # The functional temporal also has an FD plot
        if 'functional_temporal' in qap_type:
            out_list.add('qap_fd')

    if keep_outputs:
        for k in resource_pool.keys():
            out_list.add(k)

    for output in list(out_list):
        # we use a check for len()==2 here to select those items in the
        # resource pool which are tuples of (node, node_output), instead
        # of the items which are straight paths to files

        # resource pool items which are in the tuple format are the
        # outputs that have been created in this workflow because they
        # were not present in the subject list YML (the starting resource
        # pool) and had to be generated
        if len(resource_pool[output]) == 2:
            ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
            ds.inputs.base_directory = output_dir
            node, out_file = resource_pool[output]
            workflow.connect(node, out_file, ds, output)
            new_outputs += 1

    rt = {'id': sub_id, 'session': session_id, 'scan': scan_id,
          'status': 'started'}
    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
        if config.get('write_graph', False):
            workflow.write_graph(
                dotfilename=op.join(output_dir, run_name + ".dot"),
                simple_form=False)

        nc_per_subject = config.get('num_cores_per_subject', 1)
        runargs = {'plugin': 'Linear', 'plugin_args': {}}
        if nc_per_subject > 1:
            runargs['plugin'] = 'MultiProc'
            runargs['plugin_args'] = {'n_procs': nc_per_subject}

        try:
            workflow.run(**runargs)
            rt['status'] = 'finished'
        except Exception as e:
            # ... however this is run inside a pool.map: do not raise Exception
            etype, evalue, etrace = sys.exc_info()
            tb = format_exception(etype, evalue, etrace)
            rt.update({'status': 'failed', 'msg': '%s' % e, 'traceback': tb})
            logger.error('An error occurred processing subject %s. '
                         'Runtime dict: %s\n%s' %
                         (rt['id'], rt, '\n'.join(rt['traceback'])))
    else:
        rt['status'] = 'cached'
        logger.info("\nEverything is already done for subject %s." % sub_id)

    # Remove working directory when done
    if not keep_outputs:
        try:
            work_dir = op.join(workflow.base_dir, scan_id)

            if op.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            logger.warn("Couldn\'t remove the working directory!")
            pass

    pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    pipeline_end_time = time.time()
    logger.info("Elapsed time (minutes) since last start: %s"
                % ((pipeline_end_time - pipeline_start_time) / 60))
    logger.info("Pipeline end time: %s" % pipeline_end_stamp)
    return rt

Example 26

Project: ochopod
Source File: marathon.py
View license
    def boot(self, lifecycle, model=Reactive, tools=None, local=False):

        #
        # - quick check to make sure we get the right implementations
        #
        assert issubclass(model, Model), 'model must derive from ochopod.api.Model'
        assert issubclass(lifecycle, LifeCycle), 'lifecycle must derive from ochopod.api.LifeCycle'

        #
        # - instantiate our flask endpoint
        # - default to a json handler for all HTTP errors (including an unexpected 500)
        #
        def _handler(error):
            http = error.code if isinstance(error, HTTPException) else 500
            return '{}', http, {'Content-Type': 'application/json; charset=utf-8'}

        web = Flask(__name__)
        for code in default_exceptions.iterkeys():
            web.error_handler_spec[None][code] = _handler

        #
        # - default presets in case we run outside of marathon (local vm testing)
        # - any environment variable prefixed with "ochopod." is of interest for us (e.g this is what the user puts
        #   in the marathon application configuration for instance)
        # - the other settings come from marathon (namely the port bindings & application/task identifiers)
        # - the MESOS_TASK_ID is important to keep around to enable task deletion via the marathon REST API
        #
        env = \
            {
                'ochopod_application':  '',
                'ochopod_cluster':      'default',
                'ochopod_debug':        'true',
                'ochopod_local':        'false',
                'ochopod_namespace':    'marathon',
                'ochopod_port':         '8080',
                'ochopod_start':        'true',
                'ochopod_task':         '',
                'ochopod_zk':           '',
                'PORT_8080':            '8080'
            }

        env.update(os.environ)
        ochopod.enable_cli_log(debug=env['ochopod_debug'] == 'true')
        try:

            #
            # - grab our environment variables (which are set by the marathon executor)
            # - extract the mesos PORT_* bindings and construct a small remapping dict
            #
            ports = {}
            logger.debug('environment ->\n%s' % '\n'.join(['\t%s -> %s' % (k, v) for k, v in env.items()]))
            for key, val in env.items():
                if key.startswith('PORT_'):
                    ports[key[5:]] = int(val)

            #
            # - keep any "ochopod_" environment variable & trim its prefix
            # - default all our settings, especially the mandatory ones
            # - the ip and zookeeper are defaulted to localhost to enable easy testing
            #
            hints = {k[8:]: v for k, v in env.items() if k.startswith('ochopod_')}
            if local or hints['local'] == 'true':

                #
                # - we are running in local mode (e.g on a dev workstation)
                # - default everything to localhost
                #
                logger.info('running in local mode (make sure you run a standalone zookeeper)')
                hints.update(
                    {
                        'fwk':          'marathon (debug)',
                        'ip':           '127.0.0.1',
                        'node':         'local',
                        'ports':        ports,
                        'public':       '127.0.0.1',
                        'zk':           '127.0.0.1:2181'
                    })
            else:

                #
                # - extend our hints
                # - add the application + task
                #
                hints.update(
                    {
                        'application':  env['MARATHON_APP_ID'][1:],
                        'fwk':          'marathon',
                        'ip':           '',
                        'node':         '',
                        'ports':        ports,
                        'public':       '',
                        'task':         env['MESOS_TASK_ID'],
                        'zk':           ''
                    })

                #
                # - use whatever subclass is implementing us to infer 'ip', 'node' and 'public'
                #
                hints.update(self.get_node_details())

                #
                # - lookup for the zookeeper connection string from environment variable or on disk
                # - we have to look into different places depending on how mesos was installed
                #
                def _1():

                    #
                    # - most recent DCOS release
                    # - $MESOS_MASTER is located in /opt/mesosphere/etc/mesos-slave-common
                    # - the snippet in there is prefixed by MESOS_MASTER=zk://<ip:port>/mesos
                    #
                    logger.debug('checking /opt/mesosphere/etc/mesos-slave-common...')
                    _, lines = shell("grep MESOS_MASTER /opt/mesosphere/etc/mesos-slave-common")
                    return lines[0][13:]

                def _2():

                    #
                    # - same as above except for slightly older DCOS releases
                    # - $MESOS_MASTER is located in /opt/mesosphere/etc/mesos-slave
                    #
                    logger.debug('checking /opt/mesosphere/etc/mesos-slave...')
                    _, lines = shell("grep MESOS_MASTER /opt/mesosphere/etc/mesos-slave")
                    return lines[0][13:]

                def _3():

                    #
                    # - a regular package install will write the slave settings under /etc/mesos/zk (the snippet in
                    #   there looks like zk://10.0.0.56:2181/mesos)
                    #
                    logger.debug('checking /etc/mesos/zk...')
                    _, lines = shell("cat /etc/mesos/zk")
                    return lines[0]

                def _4():

                    #
                    # - look for ZK from environment variables
                    # - user can pass down ZK using $ochopod_zk
                    # - this last-resort situation is used mostly for debugging
                    #
                    logger.debug('checking $ochopod_zk environment variable...')
                    return env['ochopod_zk']

                #
                # - depending on how the slave has been installed we might have to look in various places
                #   to find out what our zookeeper connection string is
                # - use urlparse to keep the host:port part of the URL (possibly including a login+password)
                #
                for method in [_1, _2, _3, _4]:
                    try:
                        hints['zk'] = urlparse(method()).netloc
                        break

                    except Exception:
                        pass

            #
            # - the cluster must be fully qualified with a namespace (which is defaulted anyway)
            #
            assert hints['zk'], 'unable to determine where zookeeper is located (unsupported/bogus mesos setup ?)'
            assert hints['cluster'] and hints['namespace'], 'no cluster and/or namespace defined (user error ?)'

            #
            # - load the tools
            #
            if tools:
                tools = {tool.tag: tool for tool in [clz() for clz in tools if issubclass(clz, Tool)] if tool.tag}
                logger.info('supporting tools %s' % ', '.join(tools.keys()))

            #
            # - start the life-cycle actor which will pass our hints (as a json object) to its underlying sub-process
            # - start our coordinator which will connect to zookeeper and attempt to lead the cluster
            # - upon grabbing the lock the model actor will start and implement the configuration process
            # - the hints are a convenient bag for any data that may change at runtime and needs to be returned (via
            #   the HTTP POST /info request)
            # - what's being registered in zookeeper is immutable though and decorated with additional details by
            #   the coordinator (especially the pod index which is derived from zookeeper)
            #
            latch = ThreadingFuture()
            logger.info('starting %s.%s (marathon) @ %s' % (hints['namespace'], hints['cluster'], hints['node']))
            breadcrumbs = deepcopy(hints)
            hints['metrics'] = {}
            hints['dependencies'] = model.depends_on
            env.update({'ochopod': json.dumps(hints)})
            executor = lifecycle.start(env, latch, hints)
            coordinator = Coordinator.start(
                hints['zk'].split(','),
                hints['namespace'],
                hints['cluster'],
                int(hints['port']),
                breadcrumbs,
                model,
                hints)

            #
            # - external hook forcing a coordinator reset
            # - this will force a re-connection to zookeeper and pod registration
            # - please note this will not impact the pod lifecycle (e.g the underlying sub-process will be
            #   left running)
            #
            @web.route('/reset', methods=['POST'])
            def _reset():

                logger.debug('http in -> /reset')
                coordinator.tell({'request': 'reset'})
                return '{}', 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - external hook exposing information about our pod
            # - this is a subset of what's registered in zookeeper at boot-time
            # - the data is dynamic and updated from time to time by the model and executor actors
            # - from @pferro -> the pod's dependencies defined in the model are now added as well
            #
            @web.route('/info', methods=['POST'])
            def _info():

                logger.debug('http in -> /info')
                keys = \
                    [
                        'application',
                        'dependencies',
                        'ip',
                        'metrics',
                        'node',
                        'port',
                        'ports',
                        'process',
                        'public',
                        'state',
                        'status',
                        'task'
                    ]

                subset = dict(filter(lambda i: i[0] in keys, hints.iteritems()))
                return json.dumps(subset), 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - external hook exposing our circular log
            # - reverse and dump ochopod.log as a json array
            #
            @web.route('/log', methods=['POST'])
            def _log():

                logger.debug('http in -> /log')
                with open(ochopod.LOG, 'r+') as log:
                    lines = [line for line in log]
                    return json.dumps(lines), 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - RPC call to run a custom tool within the pod
            #
            @web.route('/exec', methods=['POST'])
            def _exec():

                logger.debug('http in -> /exec')

                #
                # - make sure the command (first token in the X-Shell header) maps to a tool
                # - if no match abort on a 404
                #
                line = request.headers['X-Shell']
                tokens = line.split(' ')
                cmd = tokens[0]
                if not tools or cmd not in tools:
                    return '{}', 404, {'Content-Type': 'application/json; charset=utf-8'}

                code = 1
                tool = tools[cmd]

                #
                # - make sure the parser does not sys.exit()
                #
                class _Parser(ArgumentParser):
                    def exit(self, status=0, message=None):
                        raise ValueError(message)

                #
                # - prep a temporary directory
                # - invoke define_cmdline_parsing()
                # - switch off parsing if NotImplementedError is raised
                #
                use_parser = 1
                parser = _Parser(prog=tool.tag)
                try:
                    tool.define_cmdline_parsing(parser)

                except NotImplementedError:
                    use_parser = 0

                tmp = tempfile.mkdtemp()
                try:

                    #
                    # - parse the command line
                    # - upload any attachment
                    #
                    args = parser.parse_args(tokens[1:]) if use_parser else ' '.join(tokens[1:])
                    for tag, upload in request.files.items():
                        where = path.join(tmp, tag)
                        logger.debug('uploading %s @ %s' % (tag, tmp))
                        upload.save(where)

                    #
                    # - run the tool method
                    # - pass the temporary directory as well
                    #
                    logger.info('invoking "%s"' % line)
                    code, lines = tool.body(args, tmp)

                except ValueError as failure:

                    lines = [parser.format_help() if failure.message is None else failure.message]

                except Exception as failure:

                    lines = ['unexpected failure -> %s' % failure]

                finally:

                    #
                    # - make sure to cleanup our temporary directory
                    #
                    shutil.rmtree(tmp)

                out = \
                    {
                        'code': code,
                        'stdout': lines
                    }

                return json.dumps(out), 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - web-hook used to receive requests from the leader or the CLI tools
            # - those requests are passed down to the executor actor
            # - any non HTTP 200 response is a failure
            # - failure to acknowledge within the specified timeout will result in a HTTP 408 (REQUEST TIMEOUT)
            # - attempting to send a control request to a dead pod will result in a HTTP 410 (GONE)
            #
            @web.route('/control/<task>', methods=['POST'])
            @web.route('/control/<task>/<timeout>', methods=['POST'])
            def _control(task, timeout='60'):

                logger.debug('http in -> /control/%s' % task)
                if task not in ['check', 'on', 'off', 'ok', 'kill', 'signal']:

                    #
                    # - fail on a HTTP 400 if the request is not supported
                    #
                    return '{}', 400, {'Content-Type': 'application/json; charset=utf-8'}

                try:

                    ts = time.time()
                    latch = ThreadingFuture()
                    executor.tell({'request': task, 'latch': latch, 'data': request.data})
                    js, code = latch.get(timeout=int(timeout))
                    ms = time.time() - ts
                    logger.debug('http out -> HTTP %s (%d ms)' % (code, ms))
                    return json.dumps(js), code, {'Content-Type': 'application/json; charset=utf-8'}

                except Timeout:

                    #
                    # - we failed to match the specified timeout
                    # - gracefully fail on a HTTP 408
                    #
                    return '{}', 408, {'Content-Type': 'application/json; charset=utf-8'}

                except ActorDeadError:

                    #
                    # - the executor has been shutdown (probably after a /control/kill)
                    # - gracefully fail on a HTTP 410
                    #
                    return '{}', 410, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - internal hook required to shutdown the web-server
            # - it's not possible to do it outside of a request handler
            # - make sure this calls only comes from localhost (todo)
            #
            @web.route('/terminate', methods=['POST'])
            def _terminate():

                request.environ.get('werkzeug.server.shutdown')()
                return '{}', 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - run werkzeug from a separate thread to avoid blocking the main one
            # - we'll have to shut it down using a dedicated HTTP POST
            #
            class _Runner(threading.Thread):

                def run(self):
                    web.run(host='0.0.0.0', port=int(hints['port']), threaded=True)

            try:

                #
                # - block on the lifecycle actor until it goes down (usually after a /control/kill request)
                #
                _Runner().start()
                spin_lock(latch)
                logger.debug('pod is dead, idling')
                while 1:

                    #
                    # - simply idle forever (since the framework would restart any container that terminates)
                    # - /log and /hints HTTP requests will succeed (and show the pod as being killed)
                    # - any control request will now fail
                    #
                    time.sleep(60.0)

            finally:

                #
                # - when we exit the block first shutdown our executor (which may probably be already down)
                # - then shutdown the coordinator to un-register from zookeeper
                # - finally ask werkzeug to shutdown via a REST call
                #
                shutdown(executor)
                shutdown(coordinator)
                post('http://127.0.0.1:%s/terminate' % env['ochopod_port'])

        except KeyboardInterrupt:

            logger.fatal('CTRL-C pressed')

        except Exception as failure:

            logger.fatal('unexpected condition -> %s' % diagnostic(failure))

Example 27

Project: clam
Source File: clamdispatcher.py
View license
def main():
    if len(sys.argv) < 4:
        print("[CLAM Dispatcher] ERROR: Invalid syntax, use clamdispatcher.py [pythonpath] settingsmodule projectdir cmd arg1 arg2 ... got: " + " ".join(sys.argv[1:]), file=sys.stderr)
        with open('.done','w') as f:
            f.write(str(1))
        if os.path.exists('.pid'): os.unlink('.pid')
        return 1

    offset = 0
    if '/' in sys.argv[1]:
        #os.environ['PYTHONPATH'] = sys.argv[1]
        for path in sys.argv[1].split(':'):
            print("[CLAM Dispatcher] Adding to PYTHONPATH: " + path, file=sys.stderr)
            sys.path.append(path)
        offset = 1

    settingsmodule = sys.argv[1+offset]
    projectdir = sys.argv[2+offset]
    if projectdir == 'NONE': #Actions
        tmpdir = None
        projectdir = None
    elif projectdir.startswith('tmp://'): #Used for actions with a temporary dir
        tmpdir = projectdir[6:]
        projectdir = None
    else:
        if projectdir[-1] != '/':
            projectdir += '/'
        tmpdir = os.path.join(projectdir,'tmp')

    print("[CLAM Dispatcher] Started CLAM Dispatcher v" + str(VERSION) + " with " + settingsmodule + " (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ")", file=sys.stderr)

    cmd = sys.argv[3+offset]
    cmd = clam.common.data.unescapeshelloperators(cmd) #shell operators like pipes and redirects were passed in an escaped form
    if sys.version[0] == '2' and isinstance(cmd,str):
        cmd = unicode(cmd,'utf-8') #pylint: disable=undefined-variable
    for arg in sys.argv[4+offset:]:
        arg_u = clam.common.data.unescapeshelloperators(arg)
        if arg_u != arg:
            cmd += " " + arg_u #shell operator (pipe or something)
        else:
            cmd += " " + clam.common.data.shellsafe(arg,'"')


    if not cmd:
        print("[CLAM Dispatcher] FATAL ERROR: No command specified!", file=sys.stderr)
        if projectdir:
            f = open(projectdir + '.done','w')
            f.write(str(1))
            f.close()
            if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')
        return 1
    elif projectdir and not os.path.isdir(projectdir):
        print("[CLAM Dispatcher] FATAL ERROR: Project directory "+ projectdir + " does not exist", file=sys.stderr)
        f = open(projectdir + '.done','w')
        f.write(str(1))
        f.close()
        if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')
        return 1

    try:
        #exec("import " + settingsmodule + " as settings")
        settings = __import__(settingsmodule , globals(), locals(),0)
        try:
            if settings.CUSTOM_FORMATS:
                clam.common.data.CUSTOM_FORMATS = settings.CUSTOM_FORMATS
                print("[CLAM Dispatcher] Dependency injection for custom formats succeeded", file=sys.stderr)
        except AttributeError:
            pass
    except ImportError as e:
        print("[CLAM Dispatcher] FATAL ERROR: Unable to import settings module, settingsmodule is " + settingsmodule + ", error: " + str(e), file=sys.stderr)
        print("[CLAM Dispatcher]      hint: If you're using the development server, check you pass the path your service configuration file is in using the -P flag. For Apache integration, verify you add this path to your PYTHONPATH (can be done from the WSGI script)", file=sys.stderr)
        if projectdir:
            f = open(projectdir + '.done','w')
            f.write(str(1))
            f.close()
        return 1

    settingkeys = dir(settings)
    if not 'DISPATCHER_POLLINTERVAL' in settingkeys:
        settings.DISPATCHER_POLLINTERVAL = 30
    if not 'DISPATCHER_MAXRESMEM' in settingkeys:
        settings.DISPATCHER_MAXRESMEM = 0
    if not 'DISPATCHER_MAXTIME' in settingkeys:
        settings.DISPATCHER_MAXTIME = 0


    try:
        print("[CLAM Dispatcher] Running " + cmd, file=sys.stderr)
    except (UnicodeDecodeError, UnicodeError, UnicodeEncodeError):
        print("[CLAM Dispatcher] Running " + repr(cmd), file=sys.stderr) #unicode-issues on Python 2

    if sys.version[0] == '2' and isinstance(cmd,unicode): #pylint: disable=undefined-variable
        cmd = cmd.encode('utf-8')
    if projectdir:
        process = subprocess.Popen(cmd,cwd=projectdir, shell=True, stderr=sys.stderr)
    else:
        process = subprocess.Popen(cmd, shell=True, stderr=sys.stderr)
    begintime = datetime.datetime.now()
    if process:
        pid = process.pid
        print("[CLAM Dispatcher] Running with pid " + str(pid) + " (" + begintime.strftime('%Y-%m-%d %H:%M:%S') + ")", file=sys.stderr)
        sys.stderr.flush()
        if projectdir:
            with open(projectdir + '.pid','w') as f:
                f.write(str(pid))
    else:
        print("[CLAM Dispatcher] Unable to launch process", file=sys.stderr)
        sys.stderr.flush()
        if projectdir:
            with open(projectdir + '.done','w') as f:
                f.write(str(1))
        return 1

    #intervalf = lambda s: min(s/10.0, 15)
    abort = False
    idle = 0
    done = False
    lastpolltime = datetime.datetime.now()
    lastabortchecktime = datetime.datetime.now()

    while not done:
        d = total_seconds(datetime.datetime.now() - begintime)
        try:
            returnedpid, statuscode = os.waitpid(pid, os.WNOHANG)
            if returnedpid != 0:
                print("[CLAM Dispatcher] Process ended (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ", " + str(d)+"s) ", file=sys.stderr)
                done = True
        except OSError: #no such process
            print("[CLAM Dispatcher] Process lost! (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ", " + str(d)+"s)", file=sys.stderr)
            statuscode = 1
            done = True

        if done:
            break

        if total_seconds(datetime.datetime.now() - lastabortchecktime) >= min(10, d* 0.5):  #every 10 seconds, faster at beginning
            if projectdir and os.path.exists(projectdir + '.abort'):
                abort = True
            if abort:
                print("[CLAM Dispatcher] ABORTING PROCESS ON SIGNAL! (" + str(d)+"s)", file=sys.stderr)
                os.system("sleep 30 && kill -9 " + str(pid) + " &") #deathtrap in case the process doesn't listen within 30 seconds
                os.kill(pid, signal.SIGTERM)
                os.waitpid(pid, 0)
                if projectdir:
                    os.unlink(projectdir + '.abort')
                    open(projectdir + '.aborted','w')
                    f.close()
                done = True
                break
            lastabortchecktime = datetime.datetime.now()


        if d <= 1:
            idle += 0.05
            time.sleep(0.05)
        elif d <= 2:
            idle += 0.2
            time.sleep(0.2)
        elif d <= 10:
            idle += 0.5
            time.sleep(0.5)
        else:
            idle += 1
            time.sleep(1)

        if settings.DISPATCHER_MAXRESMEM > 0 and total_seconds(datetime.datetime.now() - lastpolltime) >= settings.DISPATCHER_POLLINTERVAL:
            resmem = mem(pid)
            if resmem > settings.DISPATCHER_MAXRESMEM * 1024:
                print("[CLAM Dispatcher] PROCESS EXCEEDS MAXIMUM RESIDENT MEMORY USAGE (" + str(resmem) + ' >= ' + str(settings.DISPATCHER_MAXRESMEM) + ')... ABORTING', file=sys.stderr)
                abort = True
                statuscode = 2
            lastpolltime = datetime.datetime.now()
        elif settings.DISPATCHER_MAXTIME > 0 and d > settings.DISPATCHER_MAXTIME:
            print("[CLAM Dispatcher] PROCESS TIMED OUT.. NO COMPLETION WITHIN " + str(d) + " SECONDS ... ABORTING", file=sys.stderr)
            abort = True
            statuscode = 3

    if projectdir:
        with open(projectdir + '.done','w') as f:
            f.write(str(statuscode))
        if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')

        #remove project index cache (has to be recomputed next time because this project now has a different size)
        if os.path.exists(os.path.join(projectdir,'..','.index')):
            os.unlink(os.path.join(projectdir,'..','.index'))


    if tmpdir and os.path.exists(tmpdir):
        print("[CLAM Dispatcher] Removing temporary files", file=sys.stderr)
        for filename in os.listdir(tmpdir):
            filepath = os.path.join(tmpdir,filename)
            try:
                if os.path.isdir(filepath):
                    shutil.rmtree(filepath)
                else:
                    os.unlink(filepath)
            except: #pylint: disable=bare-except
                print("[CLAM Dispatcher] Unable to remove " + filename, file=sys.stderr)

    d = total_seconds(datetime.datetime.now() - begintime)
    if statuscode > 127:
        print("[CLAM Dispatcher] Status code out of range (" + str(statuscode) + "), setting to 127", file=sys.stderr)
        statuscode = 127
    print("[CLAM Dispatcher] Finished (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "), exit code " + str(statuscode) + ", dispatcher wait time " + str(idle)  + "s, duration " + str(d) + "s", file=sys.stderr)

    return statuscode

Example 28

Project: tp-libvirt
Source File: lxc_life_cycle.py
View license
def run(test, params, env):
    """
    LXC container life cycle testing by virsh command
    """
    uri = params.get("connect_uri", "lxc:///")
    vm_name = params.get("main_vm")
    dom_type = params.get("lxc_domtype", "lxc")
    vcpu = int(params.get("lxc_vcpu", 1))
    max_mem = int(params.get("lxc_max_mem", 500000))
    current_mem = int(params.get("lxc_current_mem", 500000))
    os_type = params.get("lxc_ostype", "exe")
    os_arch = params.get("lxc_osarch", "x86_64")
    os_init = params.get("lxc_osinit", "/bin/sh")
    emulator_path = params.get("lxc_emulator",
                               "/usr/libexec/libvirt_lxc")
    interface_type = params.get("lxc_interface_type", "network")
    net_name = params.get("lxc_net_name", "default")
    full_os = ("yes" == params.get("lxc_full_os", "no"))
    install_root = params.get("lxc_install_root", "/")
    fs_target = params.get("lxc_fs_target", "/")
    fs_accessmode = params.get("lxc_fs_accessmode", "passthrough")
    passwd = params.get("lxc_fs_passwd", "redhat")

    def generate_container_xml():
        """
        Generate container xml
        """
        vmxml = vm_xml.VMXML(dom_type)
        vmxml.vm_name = vm_name
        vmxml.max_mem = max_mem
        vmxml.current_mem = current_mem
        vmxml.vcpu = vcpu
        # Generate os
        vm_os = vm_xml.VMOSXML()
        vm_os.type = os_type
        vm_os.arch = os_arch
        vm_os.init = os_init
        vmxml.os = vm_os
        # Generate emulator
        emulator = Emulator()
        emulator.path = emulator_path
        # Generate console
        console = Console()
        filesystem = Filesystem()
        filesystem.accessmode = fs_accessmode
        filesystem.source = {'dir': install_root}
        filesystem.target = {'dir': fs_target}
        # Add emulator and console in devices
        devices = vm_xml.VMXMLDevices()
        devices.append(emulator)
        devices.append(console)
        devices.append(filesystem)
        # Add network device
        network = Interface(type_name=interface_type)
        network.mac_address = utils_net.generate_mac_address_simple()
        network.source = {interface_type: net_name}
        devices.append(network)
        vmxml.set_devices(devices)
        return vmxml

    def check_state(expected_state):
        result = virsh.domstate(vm_name, uri=uri)
        utlv.check_exit_status(result)
        vm_state = result.stdout.strip()
        if vm_state == expected_state:
            logging.info("Get expected state: %s", vm_state)
        else:
            raise TestFail("Get unexpected state: %s", vm_state)

    virsh_args = {'uri': uri, 'debug': True}
    try:
        vmxml = generate_container_xml()
        with open(vmxml.xml, 'r') as f:
            logging.info("Container XML:\n%s", f.read())

        if full_os:
            if not os.path.exists(install_root):
                os.mkdir(install_root)
            # Install core os under installroot
            cmd = "yum --releasever=/ --installroot=%s" % install_root
            cmd += " --nogpgcheck -y groupinstall core"
            process.run(cmd, shell=True)
            # Fix root login on console
            process.run("echo 'pts/0' >> %s/etc/securetty" % install_root,
                        shell=True)
            for i in ["session    required     pam_selinux.so close",
                      "session    required     pam_selinux.so open",
                      "session    required     pam_loginuid.so"]:
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/login' %
                            (i, i, install_root), shell=True)
                # Fix root login for sshd
                process.run('sed -i s/"%s\"/"#%s"/g %s/etc/pam.d/sshd' %
                            (i, i, install_root), shell=True)

            # Config basic network
            net_file = install_root + '/etc/sysconfig/network'
            with open(net_file, 'w') as f:
                f.write('NETWORKING=yes\nHOSTNAME=%s\n' % vm_name)
            net_script = install_root + '/etc/sysconfig/network-scripts/ifcfg-eth0'
            with open(net_script, 'w') as f:
                f.write('DEVICE=eth0\nBOOTPROTO=dhcp\nONBOOT=yes\n')

            # Set root password and enable sshd
            session = aexpect.ShellSession("chroot %s" % install_root)
            session.sendline('echo %s|passwd root --stdin' % passwd)
            session.sendline('chkconfig sshd on')
            session.close()

        # Create
        result = virsh.create(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Destroy transient LXC domain successfully")
        else:
            raise TestFail("Transient LXC domain still exist after destroy")

        # Define
        result = virsh.define(vmxml.xml, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # List
        result = virsh.dom_list('--inactive', **virsh_args)
        utlv.check_exit_status(result)
        if re.findall("(%s)\s+shut off" % vm_name, result.stdout):
            logging.info("Find %s in virsh list output", vm_name)
        else:
            raise TestFail("Not find %s in virsh list output")

        # Dumpxml
        result = virsh.dumpxml(vm_name, uri=uri, debug=False)
        utlv.check_exit_status(result)

        # Edit
        edit_vcpu = '2'
        logging.info("Change vcpu of LXC container to %s", edit_vcpu)
        edit_cmd = [r":%s /[0-9]*<\/vcpu>/" + edit_vcpu + r"<\/vcpu>"]
        if not utlv.exec_virsh_edit(vm_name, edit_cmd, connect_uri=uri):
            raise TestFail("Run edit command fail")
        else:
            result = virsh.dumpxml(vm_name, **virsh_args)
            new_vcpu = re.search(r'(\d*)</vcpu>', result.stdout).group(1)
            if new_vcpu == edit_vcpu:
                logging.info("vcpu number is expected after do edit")
            else:
                raise TestFail("vcpu number is unexpected after do edit")

        # Start
        result = virsh.start(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Suspend
        result = virsh.suspend(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('paused')

        # Resume
        result = virsh.resume(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('running')

        # Reboot(not supported on RHEL6)
        result = virsh.reboot(vm_name, **virsh_args)
        supported_err = 'not supported by the connection driver: virDomainReboot'
        if supported_err in result.stderr.strip():
            logging.info("Reboot is not supported")
        else:
            utlv.check_exit_status(result)

        # Destroy
        result = virsh.destroy(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        check_state('shut off')

        # Undefine
        result = virsh.undefine(vm_name, **virsh_args)
        utlv.check_exit_status(result)
        if not virsh.domain_exists(vm_name, **virsh_args):
            logging.info("Undefine LXC domain successfully")
        else:
            raise TestFail("LXC domain still exist after undefine")

    finally:
        virsh.remove_domain(vm_name, **virsh_args)
        if full_os and os.path.exists(install_root):
            shutil.rmtree(install_root)

Example 29

Project: tp-libvirt
Source File: virsh_domstate.py
View license
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    dump_path = os.path.join(test.tmpdir, "dump/")
    os.mkdir(dump_path)
    dump_file = ""
    try:
        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                panic_dev.addr_type = "isa"
                panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd_service.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + vm_name + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                raise exceptions.TestSkipError(
                    "No 'panic' device in the guest. Maybe your libvirt "
                    "version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                libvirtd_service.stop()
                utils_misc.kill_process_by_pattern(vm_name)
                libvirtd_service.restart()
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
        except process.CmdError, detail:
            raise exceptions.TestError(
                "Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd_service.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref, extra, ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                raise exceptions.TestFail(
                    "Run successfully with wrong command!")
        else:
            if status or not output:
                raise exceptions.TestFail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        raise ActionError(vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        raise ActionError(vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        raise ActionError(vm_action)
                elif vm_action == "start":
                    if not output.count("booted"):
                        raise ActionError(vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        raise ActionError(vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        raise ActionError(vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or
                        re.search("blocked", output) or
                        re.search("idle", output)):
                    raise exceptions.TestFail("Run failed with right command")
    finally:
        qemu_conf.restore()
        libvirtd.restart()
        vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)

Example 30

Project: tp-qemu
Source File: steps.py
View license
def barrier_2(vm, words, params, debug_dir, data_scrdump_filename,
              current_step_num):
    if len(words) < 7:
        logging.error("Bad barrier_2 command line")
        return False

    # Parse barrier command line
    _, dx, dy, x1, y1, md5sum, timeout = words[:7]
    dx, dy, x1, y1, timeout = map(int, [dx, dy, x1, y1, timeout])

    # Define some paths
    scrdump_filename = os.path.join(debug_dir, "scrdump.ppm")
    cropped_scrdump_filename = os.path.join(debug_dir, "cropped_scrdump.ppm")
    expected_scrdump_filename = os.path.join(debug_dir, "scrdump_expected.ppm")
    expected_cropped_scrdump_filename = os.path.join(debug_dir,
                                                     "cropped_scrdump_expected.ppm")
    comparison_filename = os.path.join(debug_dir, "comparison.ppm")
    history_dir = os.path.join(debug_dir, "barrier_history")

    # Collect a few parameters
    timeout_multiplier = float(params.get("timeout_multiplier") or 1)
    fail_if_stuck_for = float(params.get("fail_if_stuck_for") or 1e308)
    stuck_detection_history = int(params.get("stuck_detection_history") or 2)
    keep_screendump_history = params.get("keep_screendump_history") == "yes"
    keep_all_history = params.get("keep_all_history") == "yes"

    # Multiply timeout by the timeout multiplier
    timeout *= timeout_multiplier

    # Timeout/5 is the time it took stepmaker to complete this step.
    # Divide that number by 10 to poll 10 times, just in case
    # current machine is stronger then the "stepmaker machine".
    # Limit to 1 (min) and 10 (max) seconds between polls.
    sleep_duration = float(timeout) / 50.0
    if sleep_duration < 1.0:
        sleep_duration = 1.0
    if sleep_duration > 10.0:
        sleep_duration = 10.0

    end_time = time.time() + timeout
    end_time_stuck = time.time() + fail_if_stuck_for
    start_time = time.time()

    prev_whole_image_md5sums = []

    failure_message = None

    # Main loop
    while True:
        # Check for timeouts
        if time.time() > end_time:
            failure_message = "regular timeout"
            break
        if time.time() > end_time_stuck:
            failure_message = "guest is stuck"
            break

        # Make sure vm is alive
        if not vm.is_alive():
            failure_message = "VM is dead"
            break

        # Request screendump
        try:
            vm.monitor.screendump(scrdump_filename, debug=False)
        except qemu_monitor.MonitorError, e:
            logging.warn(e)
            continue

        # Read image file
        try:
            (w, h, data) = ppm_utils.image_read_from_ppm_file(scrdump_filename)
        except IOError, e:
            logging.warn(e)
            continue

        # Make sure image is valid
        if not ppm_utils.image_verify_ppm_file(scrdump_filename):
            logging.warn("Got invalid screendump: dimensions: %dx%d, "
                         "data size: %d", w, h, len(data))
            continue

        # Compute md5sum of whole image
        whole_image_md5sum = ppm_utils.image_md5sum(w, h, data)

        # Write screendump to history_dir (as JPG) if requested
        # and if the screendump differs from the previous one
        if (keep_screendump_history and
                whole_image_md5sum not in prev_whole_image_md5sums[:1]):
            try:
                os.makedirs(history_dir)
            except Exception:
                pass
            history_scrdump_filename = os.path.join(history_dir,
                                                    "scrdump-step_%s-%s.jpg" % (current_step_num,
                                                                                time.strftime("%Y%m%d-%H%M%S")))
            try:
                image = PIL.Image.open(scrdump_filename)
                image.save(history_scrdump_filename, format='JPEG',
                           quality=30)
            except NameError:
                pass

        # Compare md5sum of barrier region with the expected md5sum
        calced_md5sum = ppm_utils.get_region_md5sum(w, h, data, x1, y1, dx, dy,
                                                    cropped_scrdump_filename)
        if calced_md5sum == md5sum:
            # Success -- remove screendump history unless requested not to
            if keep_screendump_history and not keep_all_history:
                shutil.rmtree(history_dir)
            # Report success
            return True

        # Insert image md5sum into queue of last seen images:
        # If md5sum is already in queue...
        if whole_image_md5sum in prev_whole_image_md5sums:
            # Remove md5sum from queue
            prev_whole_image_md5sums.remove(whole_image_md5sum)
        else:
            # Otherwise extend 'stuck' timeout
            end_time_stuck = time.time() + fail_if_stuck_for
        # Insert md5sum at beginning of queue
        prev_whole_image_md5sums.insert(0, whole_image_md5sum)
        # Limit queue length to stuck_detection_history
        prev_whole_image_md5sums = \
            prev_whole_image_md5sums[:stuck_detection_history]

        # Sleep for a while
        time.sleep(sleep_duration)

    # Failure
    message = ("Barrier failed at step %s after %.2f seconds (%s)" %
               (current_step_num, time.time() - start_time, failure_message))

    # What should we do with this failure?
    if words[-1] == "optional":
        logging.info(message)
        return False
    else:
        # Collect information and put it in debug_dir
        if data_scrdump_filename and os.path.exists(data_scrdump_filename):
            # Read expected screendump image
            (ew, eh, edata) = \
                ppm_utils.image_read_from_ppm_file(data_scrdump_filename)
            # Write it in debug_dir
            ppm_utils.image_write_to_ppm_file(expected_scrdump_filename,
                                              ew, eh, edata)
            # Write the cropped version as well
            ppm_utils.get_region_md5sum(ew, eh, edata, x1, y1, dx, dy,
                                        expected_cropped_scrdump_filename)
            # Perform comparison
            (w, h, data) = ppm_utils.image_read_from_ppm_file(scrdump_filename)
            if w == ew and h == eh:
                (w, h, data) = ppm_utils.image_comparison(w, h, data, edata)
                ppm_utils.image_write_to_ppm_file(comparison_filename, w, h,
                                                  data)
        # Print error messages and fail the test
        long_message = message + "\n(see analysis at %s)" % debug_dir
        logging.error(long_message)
        raise error.TestFail(message)

Example 31

Project: PyClassLessons
Source File: wheel.py
View license
    def install(self, paths, maker, **kwargs):
        """
        Install a wheel to the specified paths. If kwarg ``warner`` is
        specified, it should be a callable, which will be called with two
        tuples indicating the wheel version of this software and the wheel
        version in the file, if there is a discrepancy in the versions.
        This can be used to issue any warnings to raise any exceptions.
        If kwarg ``lib_only`` is True, only the purelib/platlib files are
        installed, and the headers, scripts, data and dist-info metadata are
        not written.

        The return value is a :class:`InstalledDistribution` instance unless
        ``options.lib_only`` is True, in which case the return value is ``None``.
        """

        dry_run = maker.dry_run
        warner = kwargs.get('warner')
        lib_only = kwargs.get('lib_only', False)

        pathname = os.path.join(self.dirname, self.filename)
        name_ver = '%s-%s' % (self.name, self.version)
        data_dir = '%s.data' % name_ver
        info_dir = '%s.dist-info' % name_ver

        metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
        wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
        record_name = posixpath.join(info_dir, 'RECORD')

        wrapper = codecs.getreader('utf-8')

        with ZipFile(pathname, 'r') as zf:
            with zf.open(wheel_metadata_name) as bwf:
                wf = wrapper(bwf)
                message = message_from_file(wf)
            wv = message['Wheel-Version'].split('.', 1)
            file_version = tuple([int(i) for i in wv])
            if (file_version != self.wheel_version) and warner:
                warner(self.wheel_version, file_version)

            if message['Root-Is-Purelib'] == 'true':
                libdir = paths['purelib']
            else:
                libdir = paths['platlib']

            records = {}
            with zf.open(record_name) as bf:
                with CSVReader(stream=bf) as reader:
                    for row in reader:
                        p = row[0]
                        records[p] = row

            data_pfx = posixpath.join(data_dir, '')
            info_pfx = posixpath.join(info_dir, '')
            script_pfx = posixpath.join(data_dir, 'scripts', '')

            # make a new instance rather than a copy of maker's,
            # as we mutate it
            fileop = FileOperator(dry_run=dry_run)
            fileop.record = True    # so we can rollback if needed

            bc = not sys.dont_write_bytecode    # Double negatives. Lovely!

            outfiles = []   # for RECORD writing

            # for script copying/shebang processing
            workdir = tempfile.mkdtemp()
            # set target dir later
            # we default add_launchers to False, as the
            # Python Launcher should be used instead
            maker.source_dir = workdir
            maker.target_dir = None
            try:
                for zinfo in zf.infolist():
                    arcname = zinfo.filename
                    if isinstance(arcname, text_type):
                        u_arcname = arcname
                    else:
                        u_arcname = arcname.decode('utf-8')
                    # The signature file won't be in RECORD,
                    # and we  don't currently don't do anything with it
                    if u_arcname.endswith('/RECORD.jws'):
                        continue
                    row = records[u_arcname]
                    if row[2] and str(zinfo.file_size) != row[2]:
                        raise DistlibException('size mismatch for '
                                               '%s' % u_arcname)
                    if row[1]:
                        kind, value = row[1].split('=', 1)
                        with zf.open(arcname) as bf:
                            data = bf.read()
                        _, digest = self.get_hash(data, kind)
                        if digest != value:
                            raise DistlibException('digest mismatch for '
                                                   '%s' % arcname)

                    if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
                        logger.debug('lib_only: skipping %s', u_arcname)
                        continue
                    is_script = (u_arcname.startswith(script_pfx)
                                 and not u_arcname.endswith('.exe'))

                    if u_arcname.startswith(data_pfx):
                        _, where, rp = u_arcname.split('/', 2)
                        outfile = os.path.join(paths[where], convert_path(rp))
                    else:
                        # meant for site-packages.
                        if u_arcname in (wheel_metadata_name, record_name):
                            continue
                        outfile = os.path.join(libdir, convert_path(u_arcname))
                    if not is_script:
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, outfile)
                        outfiles.append(outfile)
                        # Double check the digest of the written file
                        if not dry_run and row[1]:
                            with open(outfile, 'rb') as bf:
                                data = bf.read()
                                _, newdigest = self.get_hash(data, kind)
                                if newdigest != digest:
                                    raise DistlibException('digest mismatch '
                                                           'on write for '
                                                           '%s' % outfile)
                        if bc and outfile.endswith('.py'):
                            try:
                                pyc = fileop.byte_compile(outfile)
                                outfiles.append(pyc)
                            except Exception:
                                # Don't give up if byte-compilation fails,
                                # but log it and perhaps warn the user
                                logger.warning('Byte-compilation failed',
                                               exc_info=True)
                    else:
                        fn = os.path.basename(convert_path(arcname))
                        workname = os.path.join(workdir, fn)
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, workname)

                        dn, fn = os.path.split(outfile)
                        maker.target_dir = dn
                        filenames = maker.make(fn)
                        fileop.set_executable_mode(filenames)
                        outfiles.extend(filenames)

                if lib_only:
                    logger.debug('lib_only: returning None')
                    dist = None
                else:
                    # Generate scripts

                    # Try to get pydist.json so we can see if there are
                    # any commands to generate. If this fails (e.g. because
                    # of a legacy wheel), log a warning but don't give up.
                    commands = None
                    file_version = self.info['Wheel-Version']
                    if file_version == '1.0':
                        # Use legacy info
                        ep = posixpath.join(info_dir, 'entry_points.txt')
                        try:
                            with zf.open(ep) as bwf:
                                epdata = read_exports(bwf)
                            commands = {}
                            for key in ('console', 'gui'):
                                k = '%s_scripts' % key
                                if k in epdata:
                                    commands['wrap_%s' % key] = d = {}
                                    for v in epdata[k].values():
                                        s = '%s:%s' % (v.prefix, v.suffix)
                                        if v.flags:
                                            s += ' %s' % v.flags
                                        d[v.name] = s
                        except Exception:
                            logger.warning('Unable to read legacy script '
                                           'metadata, so cannot generate '
                                           'scripts')
                    else:
                        try:
                            with zf.open(metadata_name) as bwf:
                                wf = wrapper(bwf)
                                commands = json.load(wf).get('commands')
                        except Exception:
                            logger.warning('Unable to read JSON metadata, so '
                                           'cannot generate scripts')
                    if commands:
                        console_scripts = commands.get('wrap_console', {})
                        gui_scripts = commands.get('wrap_gui', {})
                        if console_scripts or gui_scripts:
                            script_dir = paths.get('scripts', '')
                            if not os.path.isdir(script_dir):
                                raise ValueError('Valid script path not '
                                                 'specified')
                            maker.target_dir = script_dir
                            for k, v in console_scripts.items():
                                script = '%s = %s' % (k, v)
                                filenames = maker.make(script)
                                fileop.set_executable_mode(filenames)

                            if gui_scripts:
                                options = {'gui': True }
                                for k, v in gui_scripts.items():
                                    script = '%s = %s' % (k, v)
                                    filenames = maker.make(script, options)
                                    fileop.set_executable_mode(filenames)

                    p = os.path.join(libdir, info_dir)
                    dist = InstalledDistribution(p)

                    # Write SHARED
                    paths = dict(paths)     # don't change passed in dict
                    del paths['purelib']
                    del paths['platlib']
                    paths['lib'] = libdir
                    p = dist.write_shared_locations(paths, dry_run)
                    if p:
                        outfiles.append(p)

                    # Write RECORD
                    dist.write_installed_files(outfiles, paths['prefix'],
                                               dry_run)
                return dist
            except Exception:  # pragma: no cover
                logger.exception('installation failed.')
                fileop.rollback()
                raise
            finally:
                shutil.rmtree(workdir)

Example 32

View license
def getPmPerceptualError(mesh, pm_filebuf, mipmap_tarfilebuf):
    perceptualdiff = which('perceptualdiff')
    if perceptualdiff is None:
        raise Exception("perceptualdiff exectuable not found on path")
    
    pm_chunks = []
    
    if pm_filebuf is not None:
        data = pm_filebuf.read(PM_CHUNK_SIZE)
        refinements_read = 0
        num_refinements = None
        while len(data) > 0:
            (refinements_read, num_refinements, pm_refinements, data_left) = pdae_utils.readPDAEPartial(data, refinements_read, num_refinements)
            pm_chunks.append(pm_refinements)
            data = data_left + pm_filebuf.read(PM_CHUNK_SIZE)
    
    tar = tarfile.TarFile(fileobj=mipmap_tarfilebuf)
    texsizes = []
    largest_tarinfo = (0, None)
    for tarinfo in tar:
        tarinfo.xsize = int(tarinfo.name.split('x')[0])
        if tarinfo.xsize > largest_tarinfo[0]:
            largest_tarinfo = (tarinfo.xsize, tarinfo)
        if tarinfo.xsize >= 128:
            texsizes.append(tarinfo)
    if len(texsizes) == 0:
        texsizes.append(largest_tarinfo[1])
    
    texsizes = sorted(texsizes, key=lambda t: t.xsize)
    texims = []
    first_image_data = None
    for tarinfo in texsizes:
        f = tar.extractfile(tarinfo)
        texdata = f.read()
        if first_image_data is None:
            first_image_data = texdata
        
        texpnm = PNMImage()
        texpnm.read(StringStream(texdata), 'something.jpg')
        newtex = Texture()
        newtex.load(texpnm)
        texims.append(newtex)
    
    mesh.images[0].setData(first_image_data)
    
    scene_members = getSceneMembers(mesh)
    
    # turn off panda3d printing to stdout
    nout = MultiplexStream()
    Notify.ptr().setOstreamPtr(nout, 0)
    nout.addFile(Filename(os.devnull))
    
    base = ShowBase()
    
    rotateNode = GeomNode("rotater")
    rotatePath = base.render.attachNewNode(rotateNode)
    matrix = numpy.identity(4)
    if mesh.assetInfo.upaxis == collada.asset.UP_AXIS.X_UP:
        r = collada.scene.RotateTransform(0,1,0,90)
        matrix = r.matrix
    elif mesh.assetInfo.upaxis == collada.asset.UP_AXIS.Y_UP:
        r = collada.scene.RotateTransform(1,0,0,90)
        matrix = r.matrix
    rotatePath.setMat(Mat4(*matrix.T.flatten().tolist()))
    geom, renderstate, mat4 = scene_members[0]
    node = GeomNode("primitive")
    node.addGeom(geom)
    if renderstate is not None:
        node.setGeomState(0, renderstate)
    geomPath = rotatePath.attachNewNode(node)
    geomPath.setMat(mat4)
        
    wrappedNode = ensureCameraAt(geomPath, base.camera)
    base.disableMouse()
    attachLights(base.render)
    base.render.setShaderAuto()
    base.render.setTransparency(TransparencyAttrib.MNone)
    base.render.setColorScaleOff(9999)
    
    controls.KeyboardMovement()
    controls.MouseDrag(wrappedNode)
    controls.MouseScaleZoom(wrappedNode)
    controls.ButtonUtils(wrappedNode)
    controls.MouseCamera()
    
    error_data = []
    
    try:
        tempdir = tempfile.mkdtemp(prefix='meshtool-print-pm-perceptual-error')
        
        triangleCounts = []
        
        hprs = [(0, 0, 0),
                (0, 90, 0),
                (0, 180, 0),
                (0, 270, 0),
                (90, 0, 0),
                (-90, 0, 0)]
        
        for texim in texims:
            np = base.render.find("**/rotater/collada")
            np.setTextureOff(1)
            np.setTexture(texim, 1)
            for angle, hpr in enumerate(hprs):
                wrappedNode.setHpr(*hpr)
                takeScreenshot(tempdir, base, geomPath, texim, angle)
        triangleCounts.append(getNumTriangles(geomPath))
        
        for pm_chunk in pm_chunks:
            pdae_panda.add_refinements(geomPath, pm_chunk)
            
            for texim in texims:
                np = base.render.find("**/rotater/collada")
                np.setTextureOff(1)
                np.setTexture(texim, 1)
                for angle, hpr in enumerate(hprs):
                    wrappedNode.setHpr(*hpr)
                    takeScreenshot(tempdir, base, geomPath, texim, angle)
            triangleCounts.append(getNumTriangles(geomPath))
        
        full_tris = triangleCounts[-1]
        full_tex = texims[-1]
        
        for numtris in triangleCounts:
            for texim in texims:
                pixel_diff = 0
                for angle, hpr in enumerate(hprs):
                    curFile = '%d_%d_%d_%d.png' % (numtris, texim.getXSize(), texim.getYSize(), angle)
                    curFile = os.path.join(tempdir, curFile)
                    
                    fullFile = '%d_%d_%d_%d.png' % (full_tris, full_tex.getXSize(), full_tex.getYSize(), angle)
                    fullFile = os.path.join(tempdir, fullFile)
                    
                    try:
                        output = subprocess.check_output([perceptualdiff, '-threshold', '1', fullFile, curFile])
                    except subprocess.CalledProcessError, ex:
                        output = ex.output
                    
                    output = output.strip()
                    if len(output) > 0:
                        pixel_diff = max(pixel_diff, int(output.split('\n')[1].split()[0]))
                    
                error_data.append({'triangles': numtris,
                                   'width': texim.getXSize(),
                                   'height': texim.getYSize(),
                                   'pixel_error': pixel_diff})
    
    finally:
        shutil.rmtree(tempdir, ignore_errors=True)
        
    return error_data

Example 33

Project: numexpr
Source File: setup.py
View license
def setup_package():
    metadata = dict(
                      description='Fast numerical expression evaluator for NumPy',
                      author='David M. Cooke, Francesc Alted and others',
                      author_email='[email protected], [email protected]',
                      url='https://github.com/pydata/numexpr',
                      license='MIT',
                      packages=['numexpr'],
                      install_requires=requirements,
                      setup_requires=requirements
    )
    if (len(sys.argv) >= 2 and
        ('--help' in sys.argv[1:] or
         (sys.argv[1] in (
             '--help-commands', 'egg_info', '--version', 'clean', '--name')))):

        # For these actions, NumPy is not required.
        #
        # They are required to succeed without Numpy for example when
        # pip is used to install Numexpr when Numpy is not yet present in
        # the system.
        # (via https://github.com/abhirk/scikit-learn/blob/master/setup.py)
        try:
            from setuptools import setup
        except ImportError:
            from distutils.core import setup

        metadata['name']    = 'numexpr'
        metadata['version'] = version
    else:
        from numpy.distutils.core import setup
        from numpy.distutils.command.build_ext import build_ext as numpy_build_ext

        try:  # Python 3
            # Code taken form numpy/distutils/command/build_py.py
            # XXX: update LICENSES
            from distutils.command.build_py import build_py_2to3 as old_build_py
            from numpy.distutils.misc_util import is_string

            class build_py(old_build_py):

                def run(self):
                    build_src = self.get_finalized_command('build_src')
                    if build_src.py_modules_dict and self.packages is None:
                        self.packages = list(build_src.py_modules_dict.keys())
                    old_build_py.run(self)

                def find_package_modules(self, package, package_dir):
                    modules = old_build_py.find_package_modules(
                        self, package, package_dir)

                    # Find build_src generated *.py files.
                    build_src = self.get_finalized_command('build_src')
                    modules += build_src.py_modules_dict.get(package, [])

                    return modules

                def find_modules(self):
                    old_py_modules = self.py_modules[:]
                    new_py_modules = list(filter(is_string, self.py_modules))
                    self.py_modules[:] = new_py_modules
                    modules = old_build_py.find_modules(self)
                    self.py_modules[:] = old_py_modules

                    return modules

        except ImportError:  # Python 2
            from numpy.distutils.command.build_py import build_py

        DEBUG = False

        def localpath(*args):
            return op.abspath(op.join(*((op.dirname(__file__),) + args)))

        def debug(instring):
            if DEBUG:
                print(" DEBUG: " + instring)


        def configuration():
            from numpy.distutils.misc_util import Configuration, dict_append
            from numpy.distutils.system_info import system_info

            config = Configuration('numexpr')

            #try to find configuration for MKL, either from environment or site.cfg
            if op.exists('site.cfg'):
                mkl_config_data = config.get_info('mkl')
                # Some version of MKL needs to be linked with libgfortran.
                # For this, use entries of DEFAULT section in site.cfg.
                default_config = system_info()
                dict_append(mkl_config_data,
                            libraries=default_config.get_libraries(),
                            library_dirs=default_config.get_lib_dirs())
            else:
                mkl_config_data = {}

            # setup information for C extension
            if os.name == 'nt':
                pthread_win = ['numexpr/win32/pthread.c']
            else:
                pthread_win = []
            extension_config_data = {
                'sources': ['numexpr/interpreter.cpp',
                            'numexpr/module.cpp',
                            'numexpr/numexpr_object.cpp'] + pthread_win,
                'depends': ['numexpr/interp_body.cpp',
                            'numexpr/complex_functions.hpp',
                            'numexpr/interpreter.hpp',
                            'numexpr/module.hpp',
                            'numexpr/msvc_function_stubs.hpp',
                            'numexpr/numexpr_config.hpp',
                            'numexpr/numexpr_object.hpp'],
                'libraries': ['m'],
                'extra_compile_args': ['-funroll-all-loops', ],
            }
            dict_append(extension_config_data, **mkl_config_data)
            if 'library_dirs' in mkl_config_data:
                library_dirs = ':'.join(mkl_config_data['library_dirs'])
            config.add_extension('interpreter', **extension_config_data)
            config.set_options(quiet=True)

            config.make_config_py()
            config.add_subpackage('tests', 'numexpr/tests')

            #version handling
            config.get_version('numexpr/version.py')
            return config


        class cleaner(clean):

            def run(self):
                # Recursive deletion of build/ directory
                path = localpath("build")
                try:
                    shutil.rmtree(path)
                except Exception:
                    debug("Failed to remove directory %s" % path)
                else:
                    debug("Cleaned up %s" % path)

                # Now, the extension and other files
                try:
                    import imp
                except ImportError:
                    if os.name == 'posix':
                        paths = [localpath("numexpr/interpreter.so")]
                    else:
                        paths = [localpath("numexpr/interpreter.pyd")]
                else:
                    paths = []
                    for suffix, _, _ in imp.get_suffixes():
                        if suffix == '.py':
                            continue
                        paths.append(localpath("numexpr", "interpreter" + suffix))
                paths.append(localpath("numexpr/__config__.py"))
                paths.append(localpath("numexpr/__config__.pyc"))
                for path in paths:
                    try:
                        os.remove(path)
                    except Exception:
                        debug("Failed to clean up file %s" % path)
                    else:
                        debug("Cleaning up %s" % path)

                clean.run(self)

        class build_ext(numpy_build_ext):
            def build_extension(self, ext):
                # at this point we know what the C compiler is.
                if self.compiler.compiler_type == 'msvc' or self.compiler.compiler_type == 'intelemw':
                    ext.extra_compile_args = []
                    # also remove extra linker arguments msvc doesn't understand
                    ext.extra_link_args = []
                    # also remove gcc math library
                    ext.libraries.remove('m')
                numpy_build_ext.build_extension(self, ext)

        if setuptools:
            metadata['zip_safe'] = False

        metadata['cmdclass'] = {
            'build_ext': build_ext,
            'clean': cleaner,
            'build_py': build_py,
        }
        metadata['configuration'] = configuration

    setup(**metadata)

Example 34

Project: pip
Source File: install.py
View license
    def run(self, options, args):
        cmdoptions.resolve_wheel_no_use_binary(options)
        cmdoptions.check_install_build_global(options)

        if options.as_egg:
            warnings.warn(
                "--egg has been deprecated and will be removed in the future. "
                "This flag is mutually exclusive with large parts of pip, and "
                "actually using it invalidates pip's ability to manage the "
                "installation process.",
                RemovedInPip10Warning,
            )

        if options.allow_external:
            warnings.warn(
                "--allow-external has been deprecated and will be removed in "
                "the future. Due to changes in the repository protocol, it no "
                "longer has any effect.",
                RemovedInPip10Warning,
            )

        if options.allow_all_external:
            warnings.warn(
                "--allow-all-external has been deprecated and will be removed "
                "in the future. Due to changes in the repository protocol, it "
                "no longer has any effect.",
                RemovedInPip10Warning,
            )

        if options.allow_unverified:
            warnings.warn(
                "--allow-unverified has been deprecated and will be removed "
                "in the future. Due to changes in the repository protocol, it "
                "no longer has any effect.",
                RemovedInPip10Warning,
            )

        if options.download_dir:
            warnings.warn(
                "pip install --download has been deprecated and will be "
                "removed in the future. Pip now has a download command that "
                "should be used instead.",
                RemovedInPip10Warning,
            )
            options.ignore_installed = True

        if options.build_dir:
            options.build_dir = os.path.abspath(options.build_dir)

        options.src_dir = os.path.abspath(options.src_dir)
        install_options = options.install_options or []
        if options.use_user_site:
            if options.prefix_path:
                raise CommandError(
                    "Can not combine '--user' and '--prefix' as they imply "
                    "different installation locations"
                )
            if virtualenv_no_global():
                raise InstallationError(
                    "Can not perform a '--user' install. User site-packages "
                    "are not visible in this virtualenv."
                )
            install_options.append('--user')
            install_options.append('--prefix=')

        temp_target_dir = None
        if options.target_dir:
            options.ignore_installed = True
            temp_target_dir = tempfile.mkdtemp()
            options.target_dir = os.path.abspath(options.target_dir)
            if (os.path.exists(options.target_dir) and not
                    os.path.isdir(options.target_dir)):
                raise CommandError(
                    "Target path exists but is not a directory, will not "
                    "continue."
                )
            install_options.append('--home=' + temp_target_dir)

        global_options = options.global_options or []

        with self._build_session(options) as session:

            finder = self._build_package_finder(options, session)
            build_delete = (not (options.no_clean or options.build_dir))
            wheel_cache = WheelCache(options.cache_dir, options.format_control)
            if options.cache_dir and not check_path_owner(options.cache_dir):
                logger.warning(
                    "The directory '%s' or its parent directory is not owned "
                    "by the current user and caching wheels has been "
                    "disabled. check the permissions and owner of that "
                    "directory. If executing pip with sudo, you may want "
                    "sudo's -H flag.",
                    options.cache_dir,
                )
                options.cache_dir = None

            with BuildDirectory(options.build_dir,
                                delete=build_delete) as build_dir:
                requirement_set = RequirementSet(
                    build_dir=build_dir,
                    src_dir=options.src_dir,
                    download_dir=options.download_dir,
                    upgrade=options.upgrade,
                    upgrade_strategy=options.upgrade_strategy,
                    as_egg=options.as_egg,
                    ignore_installed=options.ignore_installed,
                    ignore_dependencies=options.ignore_dependencies,
                    ignore_requires_python=options.ignore_requires_python,
                    force_reinstall=options.force_reinstall,
                    use_user_site=options.use_user_site,
                    target_dir=temp_target_dir,
                    session=session,
                    pycompile=options.compile,
                    isolated=options.isolated_mode,
                    wheel_cache=wheel_cache,
                    require_hashes=options.require_hashes,
                )

                self.populate_requirement_set(
                    requirement_set, args, options, finder, session, self.name,
                    wheel_cache
                )

                if not requirement_set.has_requirements:
                    return

                try:
                    if (options.download_dir or not wheel or not
                            options.cache_dir):
                        # on -d don't do complex things like building
                        # wheels, and don't try to build wheels when wheel is
                        # not installed.
                        requirement_set.prepare_files(finder)
                    else:
                        # build wheels before install.
                        wb = WheelBuilder(
                            requirement_set,
                            finder,
                            build_options=[],
                            global_options=[],
                        )
                        # Ignore the result: a failed wheel will be
                        # installed from the sdist/vcs whatever.
                        wb.build(autobuilding=True)

                    if not options.download_dir:
                        requirement_set.install(
                            install_options,
                            global_options,
                            root=options.root_path,
                            prefix=options.prefix_path,
                        )

                        possible_lib_locations = get_lib_location_guesses(
                            user=options.use_user_site,
                            home=temp_target_dir,
                            root=options.root_path,
                            prefix=options.prefix_path,
                            isolated=options.isolated_mode,
                        )
                        reqs = sorted(
                            requirement_set.successfully_installed,
                            key=operator.attrgetter('name'))
                        items = []
                        for req in reqs:
                            item = req.name
                            try:
                                installed_version = get_installed_version(
                                    req.name, possible_lib_locations
                                )
                                if installed_version:
                                    item += '-' + installed_version
                            except Exception:
                                pass
                            items.append(item)
                        installed = ' '.join(items)
                        if installed:
                            logger.info('Successfully installed %s', installed)
                    else:
                        downloaded = ' '.join([
                            req.name
                            for req in requirement_set.successfully_downloaded
                        ])
                        if downloaded:
                            logger.info(
                                'Successfully downloaded %s', downloaded
                            )
                except PreviousBuildDirError:
                    options.no_clean = True
                    raise
                finally:
                    # Clean up
                    if not options.no_clean:
                        requirement_set.cleanup_files()

        if options.target_dir:
            ensure_dir(options.target_dir)

            # Checking both purelib and platlib directories for installed
            # packages to be moved to target directory
            lib_dir_list = []

            purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
            platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']

            if os.path.exists(purelib_dir):
                lib_dir_list.append(purelib_dir)
            if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
                lib_dir_list.append(platlib_dir)

            for lib_dir in lib_dir_list:
                for item in os.listdir(lib_dir):
                    target_item_dir = os.path.join(options.target_dir, item)
                    if os.path.exists(target_item_dir):
                        if not options.upgrade:
                            logger.warning(
                                'Target directory %s already exists. Specify '
                                '--upgrade to force replacement.',
                                target_item_dir
                            )
                            continue
                        if os.path.islink(target_item_dir):
                            logger.warning(
                                'Target directory %s already exists and is '
                                'a link. Pip will not automatically replace '
                                'links, please remove if replacement is '
                                'desired.',
                                target_item_dir
                            )
                            continue
                        if os.path.isdir(target_item_dir):
                            shutil.rmtree(target_item_dir)
                        else:
                            os.remove(target_item_dir)

                    shutil.move(
                        os.path.join(lib_dir, item),
                        target_item_dir
                    )
            shutil.rmtree(temp_target_dir)
        return requirement_set

Example 35

Project: pip
Source File: wheel.py
View license
    def install(self, paths, maker, **kwargs):
        """
        Install a wheel to the specified paths. If kwarg ``warner`` is
        specified, it should be a callable, which will be called with two
        tuples indicating the wheel version of this software and the wheel
        version in the file, if there is a discrepancy in the versions.
        This can be used to issue any warnings to raise any exceptions.
        If kwarg ``lib_only`` is True, only the purelib/platlib files are
        installed, and the headers, scripts, data and dist-info metadata are
        not written.

        The return value is a :class:`InstalledDistribution` instance unless
        ``options.lib_only`` is True, in which case the return value is ``None``.
        """

        dry_run = maker.dry_run
        warner = kwargs.get('warner')
        lib_only = kwargs.get('lib_only', False)

        pathname = os.path.join(self.dirname, self.filename)
        name_ver = '%s-%s' % (self.name, self.version)
        data_dir = '%s.data' % name_ver
        info_dir = '%s.dist-info' % name_ver

        metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
        wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
        record_name = posixpath.join(info_dir, 'RECORD')

        wrapper = codecs.getreader('utf-8')

        with ZipFile(pathname, 'r') as zf:
            with zf.open(wheel_metadata_name) as bwf:
                wf = wrapper(bwf)
                message = message_from_file(wf)
            wv = message['Wheel-Version'].split('.', 1)
            file_version = tuple([int(i) for i in wv])
            if (file_version != self.wheel_version) and warner:
                warner(self.wheel_version, file_version)

            if message['Root-Is-Purelib'] == 'true':
                libdir = paths['purelib']
            else:
                libdir = paths['platlib']

            records = {}
            with zf.open(record_name) as bf:
                with CSVReader(stream=bf) as reader:
                    for row in reader:
                        p = row[0]
                        records[p] = row

            data_pfx = posixpath.join(data_dir, '')
            info_pfx = posixpath.join(info_dir, '')
            script_pfx = posixpath.join(data_dir, 'scripts', '')

            # make a new instance rather than a copy of maker's,
            # as we mutate it
            fileop = FileOperator(dry_run=dry_run)
            fileop.record = True    # so we can rollback if needed

            bc = not sys.dont_write_bytecode    # Double negatives. Lovely!

            outfiles = []   # for RECORD writing

            # for script copying/shebang processing
            workdir = tempfile.mkdtemp()
            # set target dir later
            # we default add_launchers to False, as the
            # Python Launcher should be used instead
            maker.source_dir = workdir
            maker.target_dir = None
            try:
                for zinfo in zf.infolist():
                    arcname = zinfo.filename
                    if isinstance(arcname, text_type):
                        u_arcname = arcname
                    else:
                        u_arcname = arcname.decode('utf-8')
                    # The signature file won't be in RECORD,
                    # and we  don't currently don't do anything with it
                    if u_arcname.endswith('/RECORD.jws'):
                        continue
                    row = records[u_arcname]
                    if row[2] and str(zinfo.file_size) != row[2]:
                        raise DistlibException('size mismatch for '
                                               '%s' % u_arcname)
                    if row[1]:
                        kind, value = row[1].split('=', 1)
                        with zf.open(arcname) as bf:
                            data = bf.read()
                        _, digest = self.get_hash(data, kind)
                        if digest != value:
                            raise DistlibException('digest mismatch for '
                                                   '%s' % arcname)

                    if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
                        logger.debug('lib_only: skipping %s', u_arcname)
                        continue
                    is_script = (u_arcname.startswith(script_pfx)
                                 and not u_arcname.endswith('.exe'))

                    if u_arcname.startswith(data_pfx):
                        _, where, rp = u_arcname.split('/', 2)
                        outfile = os.path.join(paths[where], convert_path(rp))
                    else:
                        # meant for site-packages.
                        if u_arcname in (wheel_metadata_name, record_name):
                            continue
                        outfile = os.path.join(libdir, convert_path(u_arcname))
                    if not is_script:
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, outfile)
                        outfiles.append(outfile)
                        # Double check the digest of the written file
                        if not dry_run and row[1]:
                            with open(outfile, 'rb') as bf:
                                data = bf.read()
                                _, newdigest = self.get_hash(data, kind)
                                if newdigest != digest:
                                    raise DistlibException('digest mismatch '
                                                           'on write for '
                                                           '%s' % outfile)
                        if bc and outfile.endswith('.py'):
                            try:
                                pyc = fileop.byte_compile(outfile)
                                outfiles.append(pyc)
                            except Exception:
                                # Don't give up if byte-compilation fails,
                                # but log it and perhaps warn the user
                                logger.warning('Byte-compilation failed',
                                               exc_info=True)
                    else:
                        fn = os.path.basename(convert_path(arcname))
                        workname = os.path.join(workdir, fn)
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, workname)

                        dn, fn = os.path.split(outfile)
                        maker.target_dir = dn
                        filenames = maker.make(fn)
                        fileop.set_executable_mode(filenames)
                        outfiles.extend(filenames)

                if lib_only:
                    logger.debug('lib_only: returning None')
                    dist = None
                else:
                    # Generate scripts

                    # Try to get pydist.json so we can see if there are
                    # any commands to generate. If this fails (e.g. because
                    # of a legacy wheel), log a warning but don't give up.
                    commands = None
                    file_version = self.info['Wheel-Version']
                    if file_version == '1.0':
                        # Use legacy info
                        ep = posixpath.join(info_dir, 'entry_points.txt')
                        try:
                            with zf.open(ep) as bwf:
                                epdata = read_exports(bwf)
                            commands = {}
                            for key in ('console', 'gui'):
                                k = '%s_scripts' % key
                                if k in epdata:
                                    commands['wrap_%s' % key] = d = {}
                                    for v in epdata[k].values():
                                        s = '%s:%s' % (v.prefix, v.suffix)
                                        if v.flags:
                                            s += ' %s' % v.flags
                                        d[v.name] = s
                        except Exception:
                            logger.warning('Unable to read legacy script '
                                           'metadata, so cannot generate '
                                           'scripts')
                    else:
                        try:
                            with zf.open(metadata_name) as bwf:
                                wf = wrapper(bwf)
                                commands = json.load(wf).get('extensions')
                                if commands:
                                    commands = commands.get('python.commands')
                        except Exception:
                            logger.warning('Unable to read JSON metadata, so '
                                           'cannot generate scripts')
                    if commands:
                        console_scripts = commands.get('wrap_console', {})
                        gui_scripts = commands.get('wrap_gui', {})
                        if console_scripts or gui_scripts:
                            script_dir = paths.get('scripts', '')
                            if not os.path.isdir(script_dir):
                                raise ValueError('Valid script path not '
                                                 'specified')
                            maker.target_dir = script_dir
                            for k, v in console_scripts.items():
                                script = '%s = %s' % (k, v)
                                filenames = maker.make(script)
                                fileop.set_executable_mode(filenames)

                            if gui_scripts:
                                options = {'gui': True }
                                for k, v in gui_scripts.items():
                                    script = '%s = %s' % (k, v)
                                    filenames = maker.make(script, options)
                                    fileop.set_executable_mode(filenames)

                    p = os.path.join(libdir, info_dir)
                    dist = InstalledDistribution(p)

                    # Write SHARED
                    paths = dict(paths)     # don't change passed in dict
                    del paths['purelib']
                    del paths['platlib']
                    paths['lib'] = libdir
                    p = dist.write_shared_locations(paths, dry_run)
                    if p:
                        outfiles.append(p)

                    # Write RECORD
                    dist.write_installed_files(outfiles, paths['prefix'],
                                               dry_run)
                return dist
            except Exception:  # pragma: no cover
                logger.exception('installation failed.')
                fileop.rollback()
                raise
            finally:
                shutil.rmtree(workdir)

Example 36

Project: pagure
Source File: git.py
View license
def merge_pull_request(
        session, request, username, request_folder, domerge=True):
    ''' Merge the specified pull-request.
    '''
    if request.remote:
        # Get the fork
        repopath = pagure.get_remote_repo_path(
            request.remote_git, request.branch_from)
    else:
        # Get the fork
        repopath = pagure.get_repo_path(request.project_from)

    fork_obj = PagureRepo(repopath)

    # Get the original repo
    parentpath = pagure.get_repo_path(request.project)

    # Clone the original repo into a temp folder
    newpath = tempfile.mkdtemp(prefix='pagure-pr-merge')
    new_repo = pygit2.clone_repository(parentpath, newpath)

    # Update the start and stop commits in the DB, one last time
    diff_commits = diff_pull_request(
        session, request, fork_obj, PagureRepo(parentpath),
        requestfolder=request_folder, with_diff=False)[0]

    if request.project.settings.get(
            'Enforce_signed-off_commits_in_pull-request', False):
        for commit in diff_commits:
            if 'signed-off-by' not in commit.message.lower():
                shutil.rmtree(newpath)
                raise pagure.exceptions.PagureException(
                    'This repo enforces that all commits are '
                    'signed off by their author. ')

    # Checkout the correct branch
    branch_ref = get_branch_ref(new_repo, request.branch)
    if not branch_ref:
        shutil.rmtree(newpath)
        raise pagure.exceptions.BranchNotFoundException(
            'Branch %s could not be found in the repo %s' % (
                request.branch, request.project.fullname
            ))

    new_repo.checkout(branch_ref)

    branch = get_branch_ref(fork_obj, request.branch_from)
    if not branch:
        shutil.rmtree(newpath)
        raise pagure.exceptions.BranchNotFoundException(
            'Branch %s could not be found in the repo %s' % (
                request.branch_from, request.project_from.fullname
                if request.project_from else request.remote_git
            ))

    repo_commit = fork_obj[branch.get_object().hex]

    ori_remote = new_repo.remotes[0]
    # Add the fork as remote repo
    reponame = '%s_%s' % (request.user.user, request.uid)

    remote = new_repo.create_remote(reponame, repopath)

    # Fetch the commits
    remote.fetch()

    merge = new_repo.merge(repo_commit.oid)
    if merge is None:
        mergecode = new_repo.merge_analysis(repo_commit.oid)[0]

    refname = '%s:refs/heads/%s' % (branch_ref.name, request.branch)
    if (
            (merge is not None and merge.is_uptodate)
            or
            (merge is None and
             mergecode & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE)):

        if domerge:
            pagure.lib.close_pull_request(
                session, request, username,
                requestfolder=request_folder)
            shutil.rmtree(newpath)
            try:
                session.commit()
            except SQLAlchemyError as err:  # pragma: no cover
                session.rollback()
                pagure.APP.logger.exception(err)
                raise pagure.exceptions.PagureException(
                    'Could not close this pull-request')
            raise pagure.exceptions.PagureException(
                'Nothing to do, changes were already merged')
        else:
            request.merge_status = 'NO_CHANGE'
            session.commit()
            shutil.rmtree(newpath)
            return 'NO_CHANGE'

    elif (
            (merge is not None and merge.is_fastforward)
            or
            (merge is None and
             mergecode & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD)):

        if domerge:
            head = new_repo.lookup_reference('HEAD').get_object()
            if not request.project.settings.get('always_merge', False):
                if merge is not None:
                    # This is depending on the pygit2 version
                    branch_ref.target = merge.fastforward_oid
                elif merge is None and mergecode is not None:
                    branch_ref.set_target(repo_commit.oid.hex)
                commit = repo_commit.oid.hex
            else:
                tree = new_repo.index.write_tree()
                user_obj = pagure.lib.get_user(session, username)
                author = pygit2.Signature(
                    user_obj.fullname.encode('utf-8'),
                    user_obj.default_email.encode('utf-8'))
                commit = new_repo.create_commit(
                    'refs/heads/%s' % request.branch,
                    author,
                    author,
                    'Merge #%s `%s`' % (request.id, request.title),
                    tree,
                    [head.hex, repo_commit.oid.hex])

            PagureRepo.push(ori_remote, refname)
            fork_obj.run_hook(
                head.hex, commit, 'refs/heads/%s' % request.branch,
                username)
        else:
            request.merge_status = 'FFORWARD'
            session.commit()
            shutil.rmtree(newpath)
            return 'FFORWARD'

    else:
        tree = None
        try:
            tree = new_repo.index.write_tree()
        except pygit2.GitError:
            shutil.rmtree(newpath)
            if domerge:
                raise pagure.exceptions.PagureException('Merge conflicts!')
            else:
                request.merge_status = 'CONFLICTS'
                session.commit()
                return 'CONFLICTS'

        if domerge:
            head = new_repo.lookup_reference('HEAD').get_object()
            user_obj = pagure.lib.get_user(session, username)
            author = pygit2.Signature(
                user_obj.fullname.encode('utf-8'),
                user_obj.default_email.encode('utf-8'))
            commit = new_repo.create_commit(
                'refs/heads/%s' % request.branch,
                author,
                author,
                'Merge #%s `%s`' % (request.id, request.title),
                tree,
                [head.hex, repo_commit.oid.hex])

            PagureRepo.push(ori_remote, refname)
            fork_obj.run_hook(
                head.hex, commit, 'refs/heads/%s' % request.branch,
                username)

        else:
            request.merge_status = 'MERGE'
            session.commit()
            shutil.rmtree(newpath)
            return 'MERGE'

    # Update status
    pagure.lib.close_pull_request(
        session, request, username,
        requestfolder=request_folder,
    )
    try:
        # Reset the merge_status of all opened PR to refresh their cache
        pagure.lib.reset_status_pull_request(session, request.project)
        session.commit()
    except SQLAlchemyError as err:  # pragma: no cover
        session.rollback()
        pagure.APP.logger.exception(err)
        shutil.rmtree(newpath)
        raise pagure.exceptions.PagureException(
            'Could not update this pull-request in the database')
    shutil.rmtree(newpath)

    return 'Changes merged!'

Example 37

Project: pysb
Source File: kappa.py
View license
def run_simulation(model, time=10000, points=200, cleanup=True,
                   output_prefix=None, output_dir=None, flux_map=False,
                   perturbation=None, seed=None, verbose=False):
    """Runs the given model using KaSim and returns the parsed results.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSim.
    time : number
        The amount of time (in arbitrary units) to run a simulation.
        Identical to the -t argument when using KaSim at the command line.
        Default value is 10000. If set to 0, no simulation will be run.
    points : integer
        The number of data points to collect for plotting.
        Identical to the -p argument when using KaSim at the command line.
        Default value is 200. Note that the number of points actually returned
        by the simulator will be points + 1 (including the 0 point).
    cleanup : boolean
        Specifies whether output files produced by KaSim should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    flux_map: boolean
        Specifies whether or not to produce the flux map (generated over the
        full duration of the simulation). Default value is False.
    perturbation : string or None
        Optional perturbation language syntax to be appended to the Kappa file.
        See KaSim manual for more details. Default value is None (no
        perturbation).
    seed : integer
        A seed integer for KaSim random number generator. Set to None to
        allow KaSim to use a random seed (default) or supply a seed for
        deterministic behaviour (e.g. for testing)
    verbose : boolean
        Whether to pass the output of KaSim through to stdout/stderr.

    Returns
    -------
    If flux_map is False, returns the kasim simulation data as a Numpy ndarray.
    Data is accessed using the syntax::

            results[index_name]

    The index 'time' gives the time coordinates of the simulation. Data for the
    observables can be accessed by indexing the array with the names of the
    observables. Each entry in the ndarray has length points + 1, due to the
    inclusion of both the zero point and the final timepoint.

    If flux_map is True, returns an instance of SimulationResult, a namedtuple
    with two members, `timecourse` and `flux_map`. The `timecourse` field
    contains the simulation ndarray, and the `flux_map` field is an instance of
    a pygraphviz AGraph containing the flux map. The flux map can be rendered
    as a pdf using the dot layout program as follows::

        fluxmap.draw('fluxmap.pdf', prog='dot')
    """

    gen = KappaGenerator(model)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, model.name)
    kappa_filename = base_filename + '.ka'
    fm_filename = base_filename + '_fm.dot'
    out_filename = base_filename + '.out'

    args = ['-i', kappa_filename, '-t', str(time), '-p', str(points),
            '-o', out_filename]

    if seed:
        args.extend(['-seed', str(seed)])

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        kappa_file.write(gen.get_content())
        # If desired, add instructions to the kappa file to generate the
        # flux map:
        if flux_map:
            kappa_file.write('%%mod: [true] do $FLUX "%s" [true]\n' %
                             fm_filename)
        # If any perturbation language code has been passed in, add it to
        # the Kappa file:
        if perturbation:
            kappa_file.write('\n%s\n' % perturbation)

    # Run KaSim
    kasim_path = _get_kappa_path('KaSim')
    p = subprocess.Popen([kasim_path] + args,
                         stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasimInterfaceError(p_out + '\n' + p_err)

    # The simulation data, as a numpy array
    data = _parse_kasim_outfile(out_filename)

    if flux_map:
        try:
            import pygraphviz
            flux_graph = pygraphviz.AGraph(fm_filename)
        except ImportError:
            if cleanup:
                raise RuntimeError(
                        "Couldn't import pygraphviz, which is "
                        "required to return the flux map as a "
                        "pygraphviz AGraph object. Either install "
                        "pygraphviz or set cleanup=False to retain "
                        "dot files.")
            else:
                warnings.warn(
                        "pygraphviz could not be imported so no AGraph "
                        "object returned (returning None); flux map "
                        "dot file available at %s" % fm_filename)
                flux_graph = None

    if cleanup:
        shutil.rmtree(base_directory)

    # If a flux map was generated, return both the simulation output and the
    # flux map as a pygraphviz graph
    if flux_map:
        return SimulationResult(data, flux_graph)
    # If no flux map was requested, return only the simulation data
    else:
        return data

Example 38

Project: python-control
Source File: runtests.py
View license
def main(argv):
    parser = ArgumentParser(usage=__doc__.lstrip())
    parser.add_argument("--verbose", "-v", action="count", default=1,
                        help="more verbosity")
    parser.add_argument("--no-build", "-n", action="store_true", default=False,
                        help="do not build the project (use system installed version)")
    parser.add_argument("--build-only", "-b", action="store_true", default=False,
                        help="just build, do not run any tests")
    parser.add_argument("--doctests", action="store_true", default=False,
                        help="Run doctests in module")
    parser.add_argument("--coverage_html", action="store_true", default=False,
                        help=("report coverage of project code. HTML output goes "
                              "under build/coverage"))
    parser.add_argument("--coverage", action="store_true", default=False,
                        help=("report coverage of project code."))
    parser.add_argument("--gcov", action="store_true", default=False,
                        help=("enable C code coverage via gcov (requires GCC). "
                              "gcov output goes to build/**/*.gc*"))
    parser.add_argument("--lcov-html", action="store_true", default=False,
                        help=("produce HTML for C code coverage information "
                              "from a previous run with --gcov. "
                              "HTML output goes to build/lcov/"))
    parser.add_argument("--mode", "-m", default="fast",
                        help="'fast', 'full', or something that could be "
                             "passed to nosetests -A [default: fast]")
    parser.add_argument("--submodule", "-s", default=None,
                        help="Submodule whose tests to run (cluster, constants, ...)")
    parser.add_argument("--pythonpath", "-p", default=None,
                        help="Paths to prepend to PYTHONPATH")
    parser.add_argument("--tests", "-t", action='append',
                        help="Specify tests to run")
    parser.add_argument("--python", action="store_true",
                        help="Start a Python shell with PYTHONPATH set")
    parser.add_argument("--ipython", "-i", action="store_true",
                        help="Start IPython shell with PYTHONPATH set")
    parser.add_argument("--shell", action="store_true",
                        help="Start Unix shell with PYTHONPATH set")
    parser.add_argument("--debug", "-g", action="store_true",
                        help="Debug build")
    parser.add_argument("--show-build-log", action="store_true",
                        help="Show build output rather than using a log file")
    parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
                        help="Arguments to pass to Nose, Python or shell")
    args = parser.parse_args(argv)

    if args.lcov_html:
        # generate C code coverage output
        lcov_generate()
        sys.exit(0)

    if args.pythonpath:
        for p in reversed(args.pythonpath.split(os.pathsep)):
            sys.path.insert(0, p)

    if args.gcov:
        gcov_reset_counters()

    if not args.no_build:
        site_dir = build_project(args)
        sys.path.insert(0, site_dir)
        os.environ['PYTHONPATH'] = site_dir

    extra_argv = args.args[:]
    if extra_argv and extra_argv[0] == '--':
        extra_argv = extra_argv[1:]

    if args.python:
        if extra_argv:
            # Don't use subprocess, since we don't want to include the
            # current path in PYTHONPATH.
            sys.argv = extra_argv
            with open(extra_argv[0], 'r') as f:
                script = f.read()
            sys.modules['__main__'] = imp.new_module('__main__')
            ns = dict(__name__='__main__',
                      __file__=extra_argv[0])
            exec_(script, ns)
            sys.exit(0)
        else:
            import code
            code.interact()
            sys.exit(0)

    if args.ipython:
        import IPython
        IPython.embed(user_ns={})
        sys.exit(0)

    if args.shell:
        shell = os.environ.get('SHELL', 'sh')
        print("Spawning a Unix shell...")
        os.execv(shell, [shell] + extra_argv)
        sys.exit(1)

    if args.coverage_html:
        dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
        fn = os.path.join(dst_dir, 'coverage_html.js')
        if os.path.isdir(dst_dir) and os.path.isfile(fn):
            shutil.rmtree(dst_dir)
        extra_argv += ['--cover-html',
                       '--cover-html-dir='+dst_dir]

    if args.coverage:
        extra_argv += ['--cover-erase', '--with-coverage',
                       '--cover-package=control']

    test_dir = os.path.join(ROOT_DIR, 'build', 'test')

    if args.build_only:
        sys.exit(0)
    elif args.submodule:
        modname = PROJECT_MODULE + '.' + args.submodule
        try:
            __import__(modname)
            test = sys.modules[modname].test
        except (ImportError, KeyError, AttributeError):
            print("Cannot run tests for %s" % modname)
            sys.exit(2)
    elif args.tests:
        def fix_test_path(x):
            # fix up test path
            p = x.split(':')
            p[0] = os.path.relpath(os.path.abspath(p[0]),
                                   test_dir)
            return ':'.join(p)

        tests = [fix_test_path(x) for x in args.tests]

        def test(*a, **kw):
            extra_argv = kw.pop('extra_argv', ())
            extra_argv = extra_argv + tests[1:]
            kw['extra_argv'] = extra_argv
            from numpy.testing import Tester
            return Tester(tests[0]).test(*a, **kw)
    else:
        __import__(PROJECT_MODULE)
        test = sys.modules[PROJECT_MODULE].test

    # Run the tests under build/test
    try:
        shutil.rmtree(test_dir)
    except OSError:
        pass
    try:
        os.makedirs(test_dir)
    except OSError:
        pass

    cwd = os.getcwd()
    try:
        os.chdir(test_dir)
        result = test(args.mode,
                      verbose=args.verbose,
                      extra_argv=extra_argv,
                      doctests=args.doctests,
                      coverage=args.coverage)
    finally:
        os.chdir(cwd)

    if result.wasSuccessful():
        sys.exit(0)
    else:
        sys.exit(1)

Example 39

Project: judge
Source File: StdChal.py
View license
    @gen.coroutine
    def start(self):
        '''Start the challenge.

        Returns:
            dict: Challenge result.

        '''

        cache_hash = None
        cache_gid = None
        # Check if special judge needs to rebuild.
        if self.judge_typ in ['ioredir']:
            hashproc = process.Subprocess( \
                ['./HashDir.py', self.res_path + '/check'], \
                stdout=process.Subprocess.STREAM)
            dirhash = yield hashproc.stdout.read_until(b'\n')
            dirhash = int(dirhash.decode('utf-8').rstrip('\n'), 16)

            ret = StdChal.build_cache_find(self.res_path)
            if ret is not None and ret[0] == dirhash:
                cache_hash, cache_gid = ret
                judge_ioredir = IORedirJudge('container/standard', \
                    '/cache/%x'%cache_hash)

            else:
                cache_hash = dirhash
                _, cache_gid = StdChal.get_standard_ugid()
                build_ugid = StdChal.get_standard_ugid()
                build_relpath = '/cache/%x'%cache_hash
                build_path = 'container/standard' + build_relpath

                judge_ioredir = IORedirJudge('container/standard', \
                    build_relpath)
                if not (yield judge_ioredir.build(build_ugid, self.res_path)):
                    return [(0, 0, STATUS_ERR)] * len(self.test_list), ''
                FileUtils.setperm(build_path, \
                    Privilege.JUDGE_UID, cache_gid, umask=0o750)
                with StackContext(Privilege.fullaccess):
                    os.chmod(build_path, 0o750)

                StdChal.build_cache_update(self.res_path, cache_hash, cache_gid)
                print('StdChal %d built checker %x'%(self.chal_id, cache_hash))

            StdChal.build_cache_incref(cache_hash)

        print('StdChal %d started'%self.chal_id)

        # Create challenge environment.
        self.chal_path = 'container/standard/home/%d'%self.uniqid
        with StackContext(Privilege.fileaccess):
            os.mkdir(self.chal_path, mode=0o771)

        try:
            yield self.prefetch()
            print('StdChal %d prefetched'%self.chal_id)

            if self.comp_typ in ['g++', 'clang++']:
                ret, verdict = yield self.comp_cxx()

            elif self.comp_typ == 'makefile':
                ret, verdict = yield self.comp_make()

            elif self.comp_typ == 'python3':
                ret, verdict = yield self.comp_python()

            if ret != PyExt.DETECT_NONE:
                return [(0, 0, STATUS_CE, verdict)] * len(self.test_list)
            print('StdChal %d compiled'%self.chal_id)

            # Prepare test arguments
            if self.comp_typ == 'python3':
                exefile_path = self.chal_path \
                    + '/compile/__pycache__/test.cpython-34.pyc'
                exe_path = '/usr/bin/python3.4'
                argv = ['./a.out']
                envp = ['HOME=/', 'LANG=en_US.UTF-8']

            else:
                exefile_path = self.chal_path + '/compile/a.out'
                exe_path = './a.out'
                argv = []
                envp = []

            # Prepare judge
            test_future = []
            if self.judge_typ == 'diff':
                for test in self.test_list:
                    test_future.append(self.judge_diff(
                        exefile_path,
                        exe_path, argv, envp,
                        test['in'], test['ans'],
                        test['timelimit'], test['memlimit']))
            elif self.judge_typ == 'ioredir':
                for test in self.test_list:
                    check_uid, _ = StdChal.get_standard_ugid()
                    test_uid, test_gid = StdChal.get_restrict_ugid()
                    test_future.append(judge_ioredir.judge( \
                        exefile_path, exe_path, argv, envp, \
                        (check_uid, cache_gid), \
                        (test_uid, test_gid), \
                        '/home/%d/run_%d'%(self.uniqid, test_uid), \
                        test, self.metadata))

            # Emit tests
            test_result = yield gen.multi(test_future)
            ret_result = list()
            for result in test_result:
                test_pass, data, verdict = result
                runtime, peakmem, error = data
                status = STATUS_ERR
                if error == PyExt.DETECT_NONE:
                    if test_pass is True:
                        status = STATUS_AC
                    else:
                        status = STATUS_WA
                elif error == PyExt.DETECT_OOM:
                    status = STATUS_MLE
                elif error == PyExt.DETECT_TIMEOUT \
                    or error == PyExt.DETECT_FORCETIMEOUT:
                    status = STATUS_TLE
                elif error == PyExt.DETECT_EXITERR:
                    status = STATUS_RE
                else:
                    status = STATUS_ERR
                ret_result.append((runtime, peakmem, status, verdict))

            return ret_result

        finally:
            if cache_hash is not None:
                StdChal.build_cache_decref(cache_hash)
            with StackContext(Privilege.fileaccess):
                shutil.rmtree(self.chal_path)
            print('StdChal %d done'%self.chal_id)

Example 40

Project: saga-python
Source File: go_remote_dir_copy.py
View license
def main():

    tmp_dir = None

    try:

        tmp_dir = tempfile.mkdtemp(prefix='saga-test-', suffix='-%s' % TEST_NAME,
                                   dir=os.path.expanduser('~/tmp'))

        print 'tmpdir: %s' % tmp_dir

        ctx = saga.Context("x509")
        ctx.user_proxy = '/Users/mark/proj/myproxy/xsede.x509'

        session = saga.Session()
        session.add_context(ctx)

        source_url = saga.Url()
        source_url.schema = 'go'
        source_url.host = SOURCE
        source_url.path = tmp_dir

        target_url = saga.Url()
        target_url.schema = 'go'
        target_url.host = TARGET
        target_url.path = os.path.join('~/saga-tests/', os.path.basename(tmp_dir))

        print "Point to local Directory through GO ..."
        d = saga.filesystem.Directory(source_url)
        print "And check ..."
        assert d.is_dir() == True
        assert d.is_file() == False
        assert d.is_link() == False
        d.close()
        print "Point to remote Directory through GO ..."
        d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS)
        print "And check ..."
        assert d.is_dir() == True
        assert d.is_file() == False
        assert d.is_link() == False
        d.close()

        print "Point to local file through GO, before creation ..."
        caught = False
        try:
            saga.filesystem.File(os.path.join(str(source_url), FILE_A_level_0))
        except saga.DoesNotExist:
            caught = True
        assert caught == True

        print "Create actual file ..."
        touch(tmp_dir, FILE_A_level_0)
        print "Try again ..."
        f = saga.filesystem.File(os.path.join(str(source_url), FILE_A_level_0))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy local file to remote, using different filename ..."
        d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS)
        d.copy(os.path.join(str(source_url), FILE_A_level_0), FILE_A_level_0+COPIED_SUFFIX)
        d.close()
        f = saga.filesystem.File(os.path.join(str(target_url), FILE_A_level_0+COPIED_SUFFIX))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy local file to remote, keeping filename in tact ..."
        d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS)
        d.copy(os.path.join(str(source_url), FILE_A_level_0), FILE_A_level_0)
        d.close()
        f = saga.filesystem.File(os.path.join(str(target_url), FILE_A_level_0))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print 'Create file in level 1 ...'
        tree = LEVEL_1
        os.mkdir(os.path.join(tmp_dir, tree))
        touch(os.path.join(tmp_dir, tree), FILE_A_level_1)
        print "Test local file ..."
        f = saga.filesystem.File(os.path.join(str(source_url), tree, FILE_A_level_1))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy local file to remote, keeping filename in tact ..."
        d = saga.filesystem.Directory(os.path.join(str(target_url), tree), flags=saga.filesystem.CREATE_PARENTS)
        d.copy(os.path.join(str(source_url), tree, FILE_A_level_1), FILE_A_level_1)
        d.close()

        print "Test file after transfer ..."
        f = saga.filesystem.File(os.path.join(str(target_url), tree, FILE_A_level_1))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy non-existent local file to remote, keeping filename in tact ..."
        d = saga.filesystem.Directory(str(target_url), flags=saga.filesystem.CREATE_PARENTS)
        try:
            d.copy(os.path.join(str(source_url), NON_EXISTING_FILE), NON_EXISTING_FILE)
        except saga.DoesNotExist:
            caught = True
        assert caught == True

        print "Test file after (non-)transfer ..."
        caught = False
        try:
            saga.filesystem.File(os.path.join(str(target_url), NON_EXISTING_FILE))
        except saga.DoesNotExist:
            caught = True
        assert caught == True

        # destination = "go://gridftp.stampede.tacc.xsede.org/~/tmp/"
        # #destination = "go://oasis-dm.sdsc.xsede.org/~/tmp/"
        # #destination = "go://ncsa#BlueWaters/~/tmp/"
        # #destination = "go://marksant#netbook/Users/mark/tmp/go/"
        # src_filename = "my_file"
        # dst_filename = "my_file_"
        # rt_filename = "my_file__"
        #
        # # open home directory on a remote machine
        # source_dir = saga.filesystem.Directory(source)
        #
        # # copy .bash_history to /tmp/ on the local machine
        # source_dir.copy(src_filename, os.path.join(destination, dst_filename))
        #
        # # list 'm*' in local /tmp/ directory
        # dest_dir = saga.filesystem.Directory(destination)
        # for entry in dest_dir.list(pattern='%s*' % src_filename[0]):
        #     print entry
        #
        # dest_file = saga.filesystem.File(os.path.join(destination, dst_filename))
        # assert dest_file.is_file() == True
        # assert dest_file.is_link() == False
        # assert dest_file.is_dir() == False
        # print 'Size: %d' % dest_file.get_size()
        #
        # dest_file.copy(source)
        #
        # dest_file.copy(os.path.join(source+'broken', rt_filename))

        print "Before return 0"
        return 0

    except saga.SagaException as ex:
        # Catch all saga exceptions
        print "An exception occurred: (%s) %s " % (ex.type, (str(ex)))
        # Trace back the exception. That can be helpful for debugging.
        print " \n*** Backtrace:\n %s" % ex.traceback

        print "before return -1"
        return -1

    finally:

        print "and finally ..."

        if CLEANUP and tmp_dir:
            shutil.rmtree(tmp_dir)

Example 41

Project: rebase-helper
Source File: application.py
View license
    def build_packages(self):
        """Function calls build class for building packages"""
        if self.conf.buildtool == KojiBuildTool.CMD and not koji_builder:
            logger.info('Importing module koji failed. Switching to mock builder.')
            self.conf.buildtool = MockBuildTool.CMD
        try:
            builder = Builder(self.conf.buildtool)
        except NotImplementedError as ni_e:
            raise RebaseHelperError('%s. Supported build tools are %s' % six.text_type(ni_e),
                                    Builder.get_supported_tools())

        for version in ['old', 'new']:
            spec_object = self.spec_file if version == 'old' else self.rebase_spec_file
            build_dict = {}
            task_id = None
            if self.conf.build_tasks is None:
                build_dict['name'] = spec_object.get_package_name()
                build_dict['version'] = spec_object.get_version()
                patches = [x.get_path() for x in spec_object.get_patches()]
                spec = spec_object.get_path()
                sources = spec_object.get_sources()
                logger.info('Building packages for %s version %s',
                            spec_object.get_package_name(),
                            spec_object.get_full_version())
            else:
                if version == 'old':
                    task_id = self.conf.build_tasks[0]
                else:
                    task_id = self.conf.build_tasks[1]
            results_dir = os.path.join(self.results_dir, version)
            build_dict['builds_nowait'] = self.conf.builds_nowait
            build_dict['build_tasks'] = self.conf.build_tasks
            build_dict['builder_options'] = self.conf.builder_options

            files = {}
            number_retries = 0
            while self.conf.build_retries != number_retries:
                try:
                    if self.conf.build_tasks is None:
                        build_dict.update(builder.build(spec, sources, patches, results_dir, **build_dict))
                    if not self.conf.builds_nowait:
                        if self.conf.buildtool == KojiBuildTool.CMD:
                            while True:
                                kh = KojiHelper()
                                build_dict['rpm'], build_dict['logs'] = kh.get_koji_tasks(
                                    build_dict['koji_task_id'],
                                    results_dir
                                )
                                if build_dict['rpm']:
                                    break
                    else:
                        if self.conf.build_tasks:
                            if self.conf.buildtool == KojiBuildTool.CMD:
                                kh = KojiHelper()
                                try:
                                    build_dict['rpm'], build_dict['logs'] = kh.get_koji_tasks(task_id, results_dir)
                                    results_store.set_build_data(version, build_dict)
                                    if not build_dict['rpm']:
                                        return False
                                except TypeError:
                                    logger.info('Koji tasks are not finished yet. Try again later')
                                    return False
                            elif self.conf.buildtool == CoprBuildTool.CMD:
                                copr_helper = CoprHelper()
                                client = copr_helper.get_client()
                                build_id = int(task_id)
                                status = copr_helper.get_build_status(client, build_id)
                                if status in ['importing', 'pending', 'starting', 'running']:
                                    logger.info('Copr build is not finished yet. Try again later')
                                    return False
                                else:
                                    build_dict['rpm'], build_dict['logs'] = copr_helper.download_build(
                                        client,
                                        build_id,
                                        results_dir
                                    )
                                    if status not in ['succeeded', 'skipped']:
                                        logger.info('Copr build {} did not complete successfully'.format(build_id))
                                        return False
                    # Build finishes properly. Go out from while cycle
                    results_store.set_build_data(version, build_dict)
                    break

                except SourcePackageBuildError:
                    #  always fail for original version
                    if version == 'old':
                        raise RebaseHelperError('Creating old SRPM package failed.')
                    logger.error('Building source package failed.')
                    #  TODO: implement log analyzer for SRPMs and add the checks here!!!
                    raise

                except BinaryPackageBuildError:
                    #  always fail for original version
                    rpm_dir = os.path.join(results_dir, 'RPM')
                    build_dict.update(builder.get_logs())
                    results_store.set_build_data(version, build_dict)
                    build_log = 'build.log'
                    build_log_path = os.path.join(rpm_dir, build_log)
                    if version == 'old':
                        error_message = 'Building old RPM package failed. Check logs: {} '.format(
                            builder.get_logs().get('logs', 'N/A')
                        )
                        raise RebaseHelperError(error_message)
                    logger.error('Building binary packages failed.')
                    msg = 'Building package failed'
                    try:
                        files = BuildLogAnalyzer.parse_log(rpm_dir, build_log)
                    except BuildLogAnalyzerMissingError:
                        raise RebaseHelperError('Build log %s does not exist', build_log_path)
                    except BuildLogAnalyzerMakeError:
                        raise RebaseHelperError('%s during build. Check log %s', msg, build_log_path)
                    except BuildLogAnalyzerPatchError:
                        raise RebaseHelperError('%s during patching. Check log %s', msg, build_log_path)
                    except RuntimeError:
                        if self.conf.build_retries == number_retries:
                            raise RebaseHelperError('%s with unknown reason. Check log %s', msg, build_log_path)

                    if 'missing' in files:
                        missing_files = '\n'.join(files['missing'])
                        logger.info('Files not packaged in the SPEC file:\n%s', missing_files)
                    elif 'deleted' in files:
                        deleted_files = '\n'.join(files['deleted'])
                        logger.warning('Removed files packaged in SPEC file:\n%s', deleted_files)
                    else:
                        if self.conf.build_retries == number_retries:
                            raise RebaseHelperError("Build failed, but no issues were found in the build log %s",
                                                    build_log)
                    self.rebase_spec_file.modify_spec_files_section(files)

                if not self.conf.non_interactive:
                        msg = 'Do you want rebase-helper to try build the packages one more time'
                        if not ConsoleHelper.get_message(msg):
                            raise KeyboardInterrupt
                else:
                    logger.warning('Some patches were not successfully applied')
                #  build just failed, otherwise we would break out of the while loop
                logger.debug('Number of retries is %s', self.conf.build_retries)
                if os.path.exists(os.path.join(results_dir, 'RPM')):
                    shutil.rmtree(os.path.join(results_dir, 'RPM'))
                if os.path.exists(os.path.join(results_dir, 'SRPM')):
                    shutil.rmtree(os.path.join(results_dir, 'SRPM'))
                number_retries += 1
            if self.conf.build_retries == number_retries:
                raise RebaseHelperError('Building package failed with unknown reason. Check all available log files.')

        return True

Example 42

Project: headphones
Source File: music_encoder.py
View license
def encode(albumPath):
    use_xld = headphones.CONFIG.ENCODER == 'xld'

    # Return if xld details not found
    if use_xld:
        (xldProfile, xldFormat, xldBitrate) = getXldProfile.getXldProfile(
            headphones.CONFIG.XLDPROFILE)
        if not xldFormat:
            logger.error('Details for xld profile \'%s\' not found, files will not be re-encoded',
                         xldProfile)
            return None
    else:
        xldProfile = None

    tempDirEncode = os.path.join(albumPath, "temp")
    musicFiles = []
    musicFinalFiles = []
    musicTempFiles = []
    encoder = ""

    # Create temporary directory, but remove the old one first.
    try:
        if os.path.exists(tempDirEncode):
            shutil.rmtree(tempDirEncode)
            time.sleep(1)

        os.mkdir(tempDirEncode)
    except Exception as e:
        logger.exception("Unable to create temporary directory")
        return None

    for r, d, f in os.walk(albumPath):
        for music in f:
            if any(music.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
                if not use_xld:
                    encoderFormat = headphones.CONFIG.ENCODEROUTPUTFORMAT.encode(
                        headphones.SYS_ENCODING)
                else:
                    xldMusicFile = os.path.join(r, music)
                    xldInfoMusic = MediaFile(xldMusicFile)
                    encoderFormat = xldFormat

                if headphones.CONFIG.ENCODERLOSSLESS:
                    ext = os.path.normpath(os.path.splitext(music)[1].lstrip(".")).lower()
                    if not use_xld and ext == 'flac' or use_xld and (
                            ext != xldFormat and (xldInfoMusic.bitrate / 1000 > 400)):
                        musicFiles.append(os.path.join(r, music))
                        musicTemp = os.path.normpath(
                            os.path.splitext(music)[0] + '.' + encoderFormat)
                        musicTempFiles.append(os.path.join(tempDirEncode, musicTemp))
                    else:
                        logger.debug('%s is already encoded', music)
                else:
                    musicFiles.append(os.path.join(r, music))
                    musicTemp = os.path.normpath(os.path.splitext(music)[0] + '.' + encoderFormat)
                    musicTempFiles.append(os.path.join(tempDirEncode, musicTemp))

    if headphones.CONFIG.ENCODER_PATH:
        encoder = headphones.CONFIG.ENCODER_PATH.encode(headphones.SYS_ENCODING)
    else:
        if use_xld:
            encoder = os.path.join('/Applications', 'xld')
        elif headphones.CONFIG.ENCODER == 'lame':
            if headphones.SYS_PLATFORM == "win32":
                # NEED THE DEFAULT LAME INSTALL ON WIN!
                encoder = "C:/Program Files/lame/lame.exe"
            else:
                encoder = "lame"
        elif headphones.CONFIG.ENCODER == 'ffmpeg':
            if headphones.SYS_PLATFORM == "win32":
                encoder = "C:/Program Files/ffmpeg/bin/ffmpeg.exe"
            else:
                encoder = "ffmpeg"
        elif headphones.CONFIG.ENCODER == 'libav':
            if headphones.SYS_PLATFORM == "win32":
                encoder = "C:/Program Files/libav/bin/avconv.exe"
            else:
                encoder = "avconv"

    i = 0
    encoder_failed = False
    jobs = []

    for music in musicFiles:
        infoMusic = MediaFile(music)
        encode = False

        if use_xld:
            if xldBitrate and (infoMusic.bitrate / 1000 <= xldBitrate):
                logger.info('%s has bitrate <= %skb, will not be re-encoded',
                            music.decode(headphones.SYS_ENCODING, 'replace'), xldBitrate)
            else:
                encode = True
        elif headphones.CONFIG.ENCODER == 'lame':
            if not any(
                    music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.' + x) for x
                    in ["mp3", "wav"]):
                logger.warn('Lame cannot encode %s format for %s, use ffmpeg',
                            os.path.splitext(music)[1], music)
            else:
                if music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.mp3') and (
                        int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE):
                    logger.info('%s has bitrate <= %skb, will not be re-encoded', music,
                                headphones.CONFIG.BITRATE)
                else:
                    encode = True
        else:
            if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
                if music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.ogg'):
                    logger.warn('Cannot re-encode .ogg %s',
                                music.decode(headphones.SYS_ENCODING, 'replace'))
                else:
                    encode = True
            else:
                if music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.' + headphones.CONFIG.ENCODEROUTPUTFORMAT) and (int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE):
                    logger.info('%s has bitrate <= %skb, will not be re-encoded', music, headphones.CONFIG.BITRATE)
                else:
                    encode = True
        # encode
        if encode:
            job = (encoder, music, musicTempFiles[i], albumPath, xldProfile)
            jobs.append(job)
        else:
            musicFiles[i] = None
            musicTempFiles[i] = None

        i = i + 1

    # Encode music files
    if len(jobs) > 0:
        processes = 1

        # Use multicore if enabled
        if headphones.CONFIG.ENCODER_MULTICORE:
            if headphones.CONFIG.ENCODER_MULTICORE_COUNT == 0:
                processes = multiprocessing.cpu_count()
            else:
                processes = headphones.CONFIG.ENCODER_MULTICORE_COUNT

            logger.debug("Multi-core encoding enabled, spawning %d processes",
                         processes)

        # Use multiprocessing only if it's worth the overhead. and if it is
        # enabled. If not, then use the old fashioned way.
        if processes > 1:
            with logger.listener():
                pool = multiprocessing.Pool(processes=processes)
                results = pool.map_async(command_map, jobs)

                # No new processes will be created, so close it and wait for all
                # processes to finish
                pool.close()
                pool.join()

                # Retrieve the results
                results = results.get()
        else:
            results = map(command_map, jobs)

        # The results are either True or False, so determine if one is False
        encoder_failed = not all(results)

    musicFiles = filter(None, musicFiles)
    musicTempFiles = filter(None, musicTempFiles)

    # check all files to be encoded now exist in temp directory
    if not encoder_failed and musicTempFiles:
        for dest in musicTempFiles:
            if not os.path.exists(dest):
                encoder_failed = True
                logger.error("Encoded file '%s' does not exist in the destination temp directory",
                             dest)

    # No errors, move from temp to parent
    if not encoder_failed and musicTempFiles:
        i = 0
        for dest in musicTempFiles:
            if os.path.exists(dest):
                source = musicFiles[i]
                if headphones.CONFIG.DELETE_LOSSLESS_FILES:
                    os.remove(source)
                check_dest = os.path.join(albumPath, os.path.split(dest)[1])
                if os.path.exists(check_dest):
                    os.remove(check_dest)
                try:
                    shutil.move(dest, albumPath)
                except Exception as e:
                    logger.error('Could not move %s to %s: %s', dest, albumPath, e)
                    encoder_failed = True
                    break
            i += 1

    # remove temp directory
    shutil.rmtree(tempDirEncode)

    # Return with error if any encoding errors
    if encoder_failed:
        logger.error(
            "One or more files failed to encode. Ensure you have the latest version of %s installed.",
            headphones.CONFIG.ENCODER)
        return None

    time.sleep(1)
    for r, d, f in os.walk(albumPath):
        for music in f:
            if any(music.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
                musicFinalFiles.append(os.path.join(r, music))

    if not musicTempFiles:
        logger.info('Encoding for folder \'%s\' is not required', albumPath)

    return musicFinalFiles

Example 43

Project: scons
Source File: install_scons.py
View license
def main(argv=None):
    if argv is None:
        argv = sys.argv

    all = False
    downloads_dir = 'Downloads'
    downloads_url = 'http://downloads.sourceforge.net/scons'
    if sys.platform == 'win32':
        sudo = ''
        prefix = sys.prefix
    else:
        sudo = 'sudo'
        prefix = '/usr/local'
    python = sys.executable

    short_options = 'ad:hnp:q'
    long_options = ['all', 'help', 'no-exec', 'prefix=', 'quiet']

    helpstr = """\
Usage:  install_scons.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]

  -a, --all                     Install all SCons versions.
  -d DIR, --downloads=DIR       Downloads directory.
  -h, --help                    Print this help and exit
  -n, --no-exec                 No execute, just print command lines
  -p PREFIX, --prefix=PREFIX    Installation prefix.
  -q, --quiet                   Quiet, don't print command lines
"""

    try:
        try:
            opts, args = getopt.getopt(argv[1:], short_options, long_options)
        except getopt.error, msg:
            raise Usage(msg)

        for o, a in opts:
            if o in ('-a', '--all'):
                all = True
            elif o in ('-d', '--downloads'):
                downloads_dir = a
            elif o in ('-h', '--help'):
                print helpstr
                sys.exit(0)
            elif o in ('-n', '--no-exec'):
                CommandRunner.execute = CommandRunner.do_not_execute
            elif o in ('-p', '--prefix'):
                prefix = a
            elif o in ('-q', '--quiet'):
                CommandRunner.display = CommandRunner.do_not_display
    except Usage, err:
        sys.stderr.write(str(err.msg) + '\n')
        sys.stderr.write('use -h to get help\n')
        return 2

    if all:
        if args:
            msg = 'install-scons.py:  -a and version arguments both specified'
            sys.stderr.write(msg)
            sys.exit(1)

        args = all_versions

    cmd = CommandRunner()

    for version in args:
        scons = 'scons-' + version
        tar_gz = os.path.join(downloads_dir, scons + '.tar.gz')
        tar_gz_url = "%s/%s.tar.gz" % (downloads_url, scons)

        cmd.subst_dictionary(locals())

        if not os.path.exists(tar_gz):
            if not os.path.exists(downloads_dir):
                cmd.run('mkdir %(downloads_dir)s')
            cmd.run((urllib.urlretrieve, tar_gz_url, tar_gz),
                    'wget -O %(tar_gz)s %(tar_gz_url)s')

        def extract(tar_gz):
            tarfile.open(tar_gz, "r:gz").extractall()
        cmd.run((extract, tar_gz), 'tar zxf %(tar_gz)s')

        cmd.run('cd %(scons)s')

        if version in ('0.01', '0.02', '0.03', '0.04', '0.05',
                       '0.06', '0.07', '0.08', '0.09', '0.10'):

            # 0.01 through 0.10 install /usr/local/bin/scons and
            # /usr/local/lib/scons.  The "scons" script knows how to
            # look up the library in a version-specific directory, but
            # we have to move both it and the library directory into
            # the right version-specific name by hand.
            cmd.run('%(python)s setup.py build')
            cmd.run('%(sudo)s %(python)s setup.py install --prefix=%(prefix)s')
            cmd.run('%(sudo)s mv %(prefix)s/bin/scons %(prefix)s/bin/scons-%(version)s')
            cmd.run('%(sudo)s mv %(prefix)s/lib/scons %(prefix)s/lib/scons-%(version)s')

        elif version in ('0.11', '0.12', '0.13', '0.14', '0.90'):

            # 0.11 through 0.90 install /usr/local/bin/scons and
            # /usr/local/lib/scons-%(version)s.  We just need to move
            # the script to a version-specific name.
            cmd.run('%(python)s setup.py build')
            cmd.run('%(sudo)s %(python)s setup.py install --prefix=%(prefix)s')
            cmd.run('%(sudo)s mv %(prefix)s/bin/scons %(prefix)s/bin/scons-%(version)s')

        elif version in ('0.91', '0.92', '0.93',
                         '0.94', '0.94.1',
                         '0.95', '0.95.1',
                         '0.96', '0.96.1', '0.96.90'):

            # 0.91 through 0.96.90 install /usr/local/bin/scons,
            # /usr/local/bin/sconsign and /usr/local/lib/scons-%(version)s.
            # We need to move both scripts to version-specific names.
            cmd.run('%(python)s setup.py build')
            cmd.run('%(sudo)s %(python)s setup.py install --prefix=%(prefix)s')
            cmd.run('%(sudo)s mv %(prefix)s/bin/scons %(prefix)s/bin/scons-%(version)s')
            cmd.run('%(sudo)s mv %(prefix)s/bin/sconsign %(prefix)s/bin/sconsign-%(version)s')
            lib_scons = os.path.join(prefix, 'lib', 'scons')
            if os.path.isdir(lib_scons):
                cmd.run('%(sudo)s mv %(prefix)s/lib/scons %(prefix)s/lib/scons-%(version)s')

        else:

            # Versions from 0.96.91 and later support what we want
            # with a --no-scons-script option.
            cmd.run('%(python)s setup.py build')
            cmd.run('%(sudo)s %(python)s setup.py install --prefix=%(prefix)s --no-scons-script')

        cmd.run('cd ..')

        cmd.run((shutil.rmtree, scons), 'rm -rf %(scons)s')

Example 44

Project: attention-lvcsr
Source File: svhn.py
View license
@check_exists(required_files=FORMAT_1_FILES)
def convert_svhn_format_1(directory, output_directory,
                          output_filename='svhn_format_1.hdf5'):
    """Converts the SVHN dataset (format 1) to HDF5.

    This method assumes the existence of the files
    `{train,test,extra}.tar.gz`, which are accessible through the
    official website [SVHNSITE].

    .. [SVHNSITE] http://ufldl.stanford.edu/housenumbers/

    Parameters
    ----------
    directory : str
        Directory in which input files reside.
    output_directory : str
        Directory in which to save the converted dataset.
    output_filename : str, optional
        Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.

    Returns
    -------
    output_paths : tuple of str
        Single-element tuple containing the path to the converted dataset.

    """
    try:
        output_path = os.path.join(output_directory, output_filename)
        h5file = h5py.File(output_path, mode='w')
        TMPDIR = tempfile.mkdtemp()

        # Every image has three channels (RGB) and variable height and width.
        # It features a variable number of bounding boxes that identify the
        # location and label of digits. The bounding box location is specified
        # using the x and y coordinates of its top left corner along with its
        # width and height.
        BoundingBoxes = namedtuple(
            'BoundingBoxes', ['labels', 'heights', 'widths', 'lefts', 'tops'])
        sources = ('features',) + tuple('bbox_{}'.format(field)
                                        for field in BoundingBoxes._fields)
        source_dtypes = dict([(source, 'uint8') for source in sources[:2]] +
                             [(source, 'uint16') for source in sources[2:]])
        source_axis_labels = {
            'features': ('channel', 'height', 'width'),
            'bbox_labels': ('bounding_box', 'index'),
            'bbox_heights': ('bounding_box', 'height'),
            'bbox_widths': ('bounding_box', 'width'),
            'bbox_lefts': ('bounding_box', 'x'),
            'bbox_tops': ('bounding_box', 'y')}

        # The dataset is split into three sets: the training set, the test set
        # and an extra set of examples that are somewhat less difficult but
        # can be used as extra training data. These sets are stored separately
        # as 'train.tar.gz', 'test.tar.gz' and 'extra.tar.gz'. Each file
        # contains a directory named after the split it stores. The examples
        # are stored in that directory as PNG images. The directory also
        # contains a 'digitStruct.mat' file with all the bounding box and
        # label information.
        splits = ('train', 'test', 'extra')
        file_paths = dict(zip(splits, FORMAT_1_FILES))
        for split, path in file_paths.items():
            file_paths[split] = os.path.join(directory, path)
        digit_struct_paths = dict(
            [(split, os.path.join(TMPDIR, split, 'digitStruct.mat'))
             for split in splits])

        # We first extract the data files in a temporary directory. While doing
        # that, we also count the number of examples for each split. Files are
        # extracted individually, which allows to display a progress bar. Since
        # the splits will be concatenated in the HDF5 file, we also compute the
        # start and stop intervals of each split within the concatenated array.
        def extract_tar(split):
            with tarfile.open(file_paths[split], 'r:gz') as f:
                members = f.getmembers()
                num_examples = sum(1 for m in members if '.png' in m.name)
                progress_bar_context = progress_bar(
                    name='{} file'.format(split), maxval=len(members),
                    prefix='Extracting')
                with progress_bar_context as bar:
                    for i, member in enumerate(members):
                        f.extract(member, path=TMPDIR)
                        bar.update(i)
            return num_examples

        examples_per_split = OrderedDict(
            [(split, extract_tar(split)) for split in splits])
        cumulative_num_examples = numpy.cumsum(
            [0] + list(examples_per_split.values()))
        num_examples = cumulative_num_examples[-1]
        intervals = zip(cumulative_num_examples[:-1],
                        cumulative_num_examples[1:])
        split_intervals = dict(zip(splits, intervals))

        # The start and stop indices are used to create a split dict that will
        # be parsed into the split array required by the H5PYDataset interface.
        # The split dict is organized as follows:
        #
        #     dict(split -> dict(source -> (start, stop)))
        #
        split_dict = OrderedDict([
            (split, OrderedDict([(s, split_intervals[split])
                                 for s in sources]))
            for split in splits])
        h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)

        # We then prepare the HDF5 dataset. This involves creating datasets to
        # store data sources and datasets to store auxiliary information
        # (namely the shapes for variable-length axes, and labels to indicate
        # what these variable-length axes represent).
        def make_vlen_dataset(source):
            # Create a variable-length 1D dataset
            dtype = h5py.special_dtype(vlen=numpy.dtype(source_dtypes[source]))
            dataset = h5file.create_dataset(
                source, (num_examples,), dtype=dtype)
            # Create a dataset to store variable-length shapes.
            axis_labels = source_axis_labels[source]
            dataset_shapes = h5file.create_dataset(
                '{}_shapes'.format(source), (num_examples, len(axis_labels)),
                dtype='uint16')
            # Create a dataset to store labels for variable-length axes.
            dataset_vlen_axis_labels = h5file.create_dataset(
                '{}_vlen_axis_labels'.format(source), (len(axis_labels),),
                dtype='S{}'.format(
                    numpy.max([len(label) for label in axis_labels])))
            # Fill variable-length axis labels
            dataset_vlen_axis_labels[...] = [
                label.encode('utf8') for label in axis_labels]
            # Attach auxiliary datasets as dimension scales of the
            # variable-length 1D dataset. This is in accordance with the
            # H5PYDataset interface.
            dataset.dims.create_scale(dataset_shapes, 'shapes')
            dataset.dims[0].attach_scale(dataset_shapes)
            dataset.dims.create_scale(dataset_vlen_axis_labels, 'shape_labels')
            dataset.dims[0].attach_scale(dataset_vlen_axis_labels)
            # Tag fixed-length axis with its label
            dataset.dims[0].label = 'batch'

        for source in sources:
            make_vlen_dataset(source)

        # The "fun" part begins: we extract the bounding box and label
        # information contained in 'digitStruct.mat'. This is a version 7.3
        # Matlab file, which uses HDF5 under the hood, albeit with a very
        # convoluted layout.
        def get_boxes(split):
            boxes = []
            with h5py.File(digit_struct_paths[split], 'r') as f:
                bar_name = '{} digitStruct'.format(split)
                bar_maxval = examples_per_split[split]
                with progress_bar(bar_name, bar_maxval) as bar:
                    for image_number in range(examples_per_split[split]):
                        # The 'digitStruct' group is the main group of the HDF5
                        # file. It contains two datasets: 'bbox' and 'name'.
                        # The 'name' dataset isn't of interest to us, as it
                        # stores file names and there's already a one-to-one
                        # mapping between row numbers and image names (e.g.
                        # row 0 corresponds to '1.png', row 1 corresponds to
                        # '2.png', and so on).
                        main_group = f['digitStruct']
                        # The 'bbox' dataset contains the bounding box and
                        # label information we're after. It has as many rows
                        # as there are images, and one column. Elements of the
                        # 'bbox' dataset are object references that point to
                        # (yet another) group that contains the information
                        # for the corresponding image.
                        image_reference = main_group['bbox'][image_number, 0]

                        # There are five datasets contained in that group:
                        # 'label', 'height', 'width', 'left' and 'top'. Each of
                        # those datasets has as many rows as there are bounding
                        # boxes in the corresponding image, and one column.
                        def get_dataset(name):
                            return main_group[image_reference][name][:, 0]
                        names = ('label', 'height', 'width', 'left', 'top')
                        datasets = dict(
                            [(name, get_dataset(name)) for name in names])

                        # If there is only one bounding box, the information is
                        # stored directly in the datasets. If there are
                        # multiple bounding boxes, elements of those datasets
                        # are object references pointing to 1x1 datasets that
                        # store the information (fortunately, it's the last
                        # hop we need to make).
                        def get_elements(dataset):
                            if len(dataset) > 1:
                                return [int(main_group[reference][0, 0])
                                        for reference in dataset]
                            else:
                                return [int(dataset[0])]
                        # Names are pluralized in the BoundingBox named tuple.
                        kwargs = dict(
                            [(name + 's', get_elements(dataset))
                             for name, dataset in iteritems(datasets)])
                        boxes.append(BoundingBoxes(**kwargs))
                        if bar:
                            bar.update(image_number)
            return boxes

        split_boxes = dict([(split, get_boxes(split)) for split in splits])

        # The final step is to fill the HDF5 file.
        def fill_split(split, bar=None):
            for image_number in range(examples_per_split[split]):
                image_path = os.path.join(
                    TMPDIR, split, '{}.png'.format(image_number + 1))
                image = numpy.asarray(
                    Image.open(image_path)).transpose(2, 0, 1)
                bounding_boxes = split_boxes[split][image_number]
                num_boxes = len(bounding_boxes.labels)
                index = image_number + split_intervals[split][0]

                h5file['features'][index] = image.flatten()
                h5file['features'].dims[0]['shapes'][index] = image.shape
                for field in BoundingBoxes._fields:
                    name = 'bbox_{}'.format(field)
                    h5file[name][index] = numpy.maximum(0,
                                                        getattr(bounding_boxes,
                                                                field))
                    h5file[name].dims[0]['shapes'][index] = [num_boxes, 1]

                # Replace label '10' with '0'.
                labels = h5file['bbox_labels'][index]
                labels[labels == 10] = 0
                h5file['bbox_labels'][index] = labels

                if image_number % 1000 == 0:
                    h5file.flush()
                if bar:
                    bar.update(index)

        with progress_bar('SVHN format 1', num_examples) as bar:
            for split in splits:
                fill_split(split, bar=bar)
    finally:
        if os.path.isdir(TMPDIR):
            shutil.rmtree(TMPDIR)
        h5file.flush()
        h5file.close()

    return (output_path,)

Example 45

Project: batch-shipyard
Source File: cascade.py
View license
    def _pull_and_save(self) -> None:
        """Thread main logic for pulling and saving docker image"""
        if _REGISTRY is None:
            raise RuntimeError(
                ('{} image specified for global resource, but there are '
                 'no registries available').format(self.resource))
        file = None
        resource_hash = compute_resource_hash(self.resource)
        image = get_docker_image_name_from_resource(self.resource)
        _record_perf('pull-start', 'img={}'.format(image))
        start = datetime.datetime.now()
        logger.info('pulling image {} from {}'.format(image, _REGISTRY))
        if _REGISTRY == 'registry.hub.docker.com':
            subprocess.check_output(
                'docker pull {}'.format(image), shell=True)
        else:
            _pub = False
            try:
                subprocess.check_output(
                    'docker pull {}/{}'.format(_REGISTRY, image),
                    shell=True)
            except subprocess.CalledProcessError:
                if _ALLOW_PUBLIC_PULL_WITH_PRIVATE:
                    logger.warning(
                        'could not pull from private registry, attempting '
                        'Docker Public Hub instead')
                    subprocess.check_output(
                        'docker pull {}'.format(image), shell=True)
                    _pub = True
                else:
                    raise
            # tag image to remove registry ip
            if not _pub:
                subprocess.check_call(
                    'docker tag {}/{} {}'.format(_REGISTRY, image, image),
                    shell=True)
            del _pub
        diff = (datetime.datetime.now() - start).total_seconds()
        logger.debug('took {} sec to pull docker image {} from {}'.format(
            diff, image, _REGISTRY))
        # register service
        _merge_service(
            self.table_client, self.resource, self.nglobalresources)
        # save docker image to seed to torrent
        if _ENABLE_P2P:
            _record_perf('pull-end', 'img={},diff={}'.format(
                image, diff))
            _record_perf('save-start', 'img={}'.format(image))
            start = datetime.datetime.now()
            if _COMPRESSION:
                # need to create reproducible compressed tarballs
                # 1. untar docker save file
                # 2. re-tar files sorted by name and set mtime/user/group
                #    to known values
                # 3. fast compress with parallel gzip ignoring certain file
                #    properties
                # 4. remove temporary directory
                tmpdir = _TORRENT_DIR / '{}-tmp'.format(resource_hash)
                tmpdir.mkdir(parents=True, exist_ok=True)
                file = _TORRENT_DIR / '{}.{}'.format(
                    resource_hash, _SAVELOAD_FILE_EXTENSION)
                logger.info('saving docker image {} to {} for seeding'.format(
                    image, file))
                subprocess.check_call(
                    ('(docker save {} | tar -xf -) '
                     '&& (tar --sort=name --mtime=\'1970-01-01\' '
                     '--owner=0 --group=0 -cf - . '
                     '| pigz --fast -n -T -c > {})').format(image, file),
                    cwd=str(tmpdir), shell=True)
                shutil.rmtree(str(tmpdir), ignore_errors=True)
                del tmpdir
                fsize = file.stat().st_size
            else:
                # tarball generated by docker save is not reproducible
                # we need to untar it and torrent the contents instead
                file = _TORRENT_DIR / '{}'.format(resource_hash)
                file.mkdir(parents=True, exist_ok=True)
                logger.info('saving docker image {} to {} for seeding'.format(
                    image, file))
                subprocess.check_call(
                    'docker save {} | tar -xf -'.format(image),
                    cwd=str(file), shell=True)
                fsize = 0
                for entry in scantree(str(file)):
                    if entry.is_file(follow_symlinks=False):
                        fsize += entry.stat().st_size
            diff = (datetime.datetime.now() - start).total_seconds()
            logger.debug('took {} sec to save docker image {} to {}'.format(
                diff, image, file))
            _record_perf('save-end', 'img={},size={},diff={}'.format(
                image, fsize, diff))
            # generate torrent file
            start = datetime.datetime.now()
            torrent_file, torrent_sha1 = generate_torrent(file, resource_hash)
            # check if blob exists and is non-zero length prior to uploading
            try:
                _bp = self.blob_client.get_blob_properties(
                    _STORAGE_CONTAINERS['blob_torrents'],
                    str(torrent_file.name))
                if _bp.properties.content_length == 0:
                    raise ValueError()
            except Exception:
                self.blob_client.create_blob_from_path(
                    _STORAGE_CONTAINERS['blob_torrents'],
                    str(torrent_file.name), str(torrent_file))
            diff = (datetime.datetime.now() - start).total_seconds()
            logger.debug(
                'took {} sec to generate and upload torrent file: {}'.format(
                    diff, torrent_file))
            start = datetime.datetime.now()
            # add to torrent dict (effectively enqueues for torrent start)
            entity = {
                'PartitionKey': _PARTITION_KEY,
                'RowKey': resource_hash,
                'Resource': self.resource,
                'TorrentFileLocator': '{},{}'.format(
                    _STORAGE_CONTAINERS['blob_torrents'],
                    str(torrent_file.name)),
                'TorrentFileSHA1': torrent_sha1,
                'TorrentIsDir': file.is_dir(),
                'TorrentContentSizeBytes': fsize,
            }
            with _PT_LOCK:
                _PENDING_TORRENTS[self.resource] = {
                    'entity': entity,
                    'torrent_file': torrent_file,
                    'started': False,
                    'seed': True,
                    'loaded': True,
                    'loading': False,
                    'registered': True,
                }
                _TORRENT_REVERSE_LOOKUP[resource_hash] = self.resource
            # wait until torrent has started
            logger.info(
                'waiting for torrent {} to start'.format(self.resource))
            while (self.resource not in _TORRENTS or
                   not _TORRENTS[self.resource]['started']):
                time.sleep(0.1)
            diff = (datetime.datetime.now() - start).total_seconds()
            logger.debug('took {} sec for {} torrent to start'.format(
                diff, self.resource))
        else:
            # get docker image size
            try:
                output = subprocess.check_output(
                    'docker images {}'.format(image), shell=True)
                size = ' '.join(output.decode('utf-8').split()[-2:])
                _record_perf('pull-end', 'img={},diff={},size={}'.format(
                    image, diff, size))
            except subprocess.CalledProcessError as ex:
                logger.exception(ex)
                _record_perf('pull-end', 'img={},diff={}'.format(image, diff))

Example 46

Project: pyfrc
Source File: cli_deploy.py
View license
    def run(self, options, robot_class, **static_options):
        
        from .. import config
        config.mode = 'upload'
        
        # run the test suite before uploading
        if not options.skip_tests:
            from .cli_test import PyFrcTest
            
            tester = PyFrcTest()
            
            retval = tester.run_test([], robot_class, options.builtin, ignore_missing_test=True)
            if retval != 0:
                print_err("ERROR: Your robot tests failed, aborting upload.")
                if not sys.stdin.isatty():
                    print_err("- Use --skip-tests if you want to upload anyways")
                    return retval
                
                print()
                if not yesno('- Upload anyways?'):
                    return retval
                
                if not yesno('- Are you sure? Your robot code may crash!'):
                    return retval
                
                print()
                print("WARNING: Uploading code against my better judgement...")
        
        # upload all files in the robot.py source directory
        robot_file = abspath(inspect.getfile(robot_class))
        robot_path = dirname(robot_file)
        robot_filename = basename(robot_file)
        cfg_filename = join(robot_path, '.deploy_cfg')
        
        if not options.nonstandard and robot_filename != 'robot.py':
            print_err("ERROR: Your robot code must be in a file called robot.py (launched from %s)!" % robot_filename)
            print_err()
            print_err("If you really want to do this, then specify the --nonstandard argument")
            return 1
        
        # This probably should be configurable... oh well
        
        deploy_dir = '/home/lvuser'
        py_deploy_dir = '%s/py' % deploy_dir
        
        # note below: deployed_cmd appears that it only can be a single line
        
        # In 2015, there were stdout/stderr issues. In 2016, they seem to
        # have been fixed, but need to use -u for it to really work properly
        
        if options.debug:
            deployed_cmd = 'env LD_LIBRARY_PATH=/usr/local/frc/rpath-lib/ /usr/local/frc/bin/netconsole-host /usr/local/bin/python3 -u %s/%s -v run' % (py_deploy_dir, robot_filename)
            deployed_cmd_fname = 'robotDebugCommand'
            extra_cmd = 'touch /tmp/frcdebug; chown lvuser:ni /tmp/frcdebug'
        else:
            deployed_cmd = 'env LD_LIBRARY_PATH=/usr/local/frc/rpath-lib/ /usr/local/frc/bin/netconsole-host /usr/local/bin/python3 -u -O %s/%s run' % (py_deploy_dir, robot_filename)
            deployed_cmd_fname = 'robotCommand'
            extra_cmd = ''

        if options.in_place:
            del_cmd = ''
        else:
            del_cmd = "[ -d %(py_deploy_dir)s ] && rm -rf %(py_deploy_dir)s"

        del_cmd %= {"py_deploy_dir": py_deploy_dir}
        
        check_version = '/usr/local/bin/python3 -c "exec(open(\\"$SITEPACKAGES/wpilib/version.py\\", \\"r\\").read(), globals()); print(\\"WPILib version on robot is \\" + __version__);exit(0) if __version__ == \\"%s\\" else exit(89)"' % wpilib.__version__
        if options.no_version_check:
            check_version = ''
        
        # This is a nasty bit of code now...
        sshcmd = inspect.cleandoc("""
            /bin/bash -ce '[ -x /usr/local/bin/python3 ] || exit 87
            SITEPACKAGES=$(/usr/local/bin/python3 -c "import site; print(site.getsitepackages()[0])")
            [ -f $SITEPACKAGES/wpilib/version.py ] || exit 88
            %(check_version)s
            %(del_cmd)s
            echo "%(cmd)s" > %(deploy_dir)s/%(cmd_fname)s
            %(extra_cmd)s'
        """)
              
        sshcmd %= {
            'del_cmd': del_cmd,
            'deploy_dir': deploy_dir,
            'cmd': deployed_cmd,
            'cmd_fname': deployed_cmd_fname,
            'extra_cmd': extra_cmd,
            'check_version': check_version
        }
        
        sshcmd = re.sub("\n+", ";", sshcmd)
        
        nc_thread = None
        
        try:
            controller = installer.ssh_from_cfg(cfg_filename,
                                                username='lvuser',
                                                password='',
                                                hostname=options.robot,
                                                allow_mitm=True,
                                                no_resolve=options.no_resolve)
            
            # Housekeeping first
            logger.debug('SSH: %s', sshcmd)
            controller.ssh(sshcmd)
            
            # Copy the files over, copy to a temporary directory first
            # -> this is inefficient, but it's easier in sftp
            tmp_dir = tempfile.mkdtemp()
            py_tmp_dir = join(tmp_dir, 'py')
                    
            try:
                self._copy_to_tmpdir(py_tmp_dir, robot_path)
                controller.sftp(py_tmp_dir, deploy_dir, mkdir=not options.in_place)
            finally:
                shutil.rmtree(tmp_dir)
            
            # start the netconsole listener now if requested, *before* we
            # actually start the robot code, so we can see all messages
            if options.nc:
                from netconsole import run
                nc_event = threading.Event()
                nc_thread = threading.Thread(target=run,
                                             kwargs={'init_event': nc_event},
                                             daemon=True)
                nc_thread.start()
                nc_event.wait(5)
                logger.info("Netconsole is listening...")
            
            if not options.in_place:
                # Restart the robot code and we're done!
                sshcmd = "/bin/bash -ce '" + \
                         '. /etc/profile.d/natinst-path.sh; ' + \
                         'chown -R lvuser:ni %s; ' + \
                         '/usr/local/frc/bin/frcKillRobot.sh -t -r' + \
                         "'"
            
                sshcmd %= (py_deploy_dir)
            
                logger.debug('SSH: %s', sshcmd)
                controller.ssh(sshcmd)
            
        except installer.SshExecError as e:
            if e.retval == 87:
                print_err("ERROR: python3 was not found on the roboRIO: have you installed robotpy?")
            elif e.retval == 88:
                print_err("ERROR: WPILib was not found on the roboRIO: have you installed robotpy?")
            elif e.retval == 89:
                print_err("ERROR: expected WPILib version %s" % wpilib.__version__)
                print_err()
                print_err("You should either:")
                print_err("- If the robot version is older, upgrade the RobotPy on your robot")
                print_err("- Otherwise, upgrade pyfrc on your computer")
                print_err()
                print_err("Alternatively, you can specify --no-version-check to skip this check")
            else:
                print_err("ERROR: %s" % e)
            return 1
        except installer.Error as e:
            print_err("ERROR: %s" % e)
            return 1
        else:
            print("\nSUCCESS: Deploy was successful!")
        
        if nc_thread is not None:
            nc_thread.join()
        
        return 0

Example 47

Project: rockstor-core
Source File: initrock.py
View license
def main():
    loglevel = logging.INFO
    if (len(sys.argv) > 1 and sys.argv[1] == '-x'):
        loglevel = logging.DEBUG
    logging.basicConfig(format='%(asctime)s: %(message)s', level=loglevel)
    set_def_kernel(logging)
    try:
        delete_old_kernels(logging)
    except Exception, e:
        logging.debug('Exception while deleting old kernels. Soft error. Moving on.')
        logging.exception(e)

    cert_loc = '%s/certs/' % BASE_DIR
    if (os.path.isdir(cert_loc)):
        if (not os.path.isfile('%s/rockstor.cert' % cert_loc) or
            not os.path.isfile('%s/rockstor.key' % cert_loc)):
            shutil.rmtree(cert_loc)

    if (not os.path.isdir(cert_loc)):
        os.mkdir(cert_loc)
        dn = ("/C=US/ST=Rockstor user's state/L=Rockstor user's "
              "city/O=Rockstor user/OU=Rockstor dept/CN=rockstor.user")
        logging.info('Creating openssl cert...')
        run_command([OPENSSL, 'req', '-nodes', '-newkey', 'rsa:2048',
                     '-keyout', '%s/first.key' % cert_loc, '-out',
                     '%s/rockstor.csr' % cert_loc, '-subj', dn])
        logging.debug('openssl cert created')
        logging.info('Creating rockstor key...')
        run_command([OPENSSL, 'rsa', '-in', '%s/first.key' % cert_loc, '-out',
                     '%s/rockstor.key' % cert_loc])
        logging.debug('rockstor key created')
        logging.info('Singing cert with rockstor key...')
        run_command([OPENSSL, 'x509', '-in', '%s/rockstor.csr' % cert_loc,
                     '-out', '%s/rockstor.cert' % cert_loc, '-req', '-signkey',
                     '%s/rockstor.key' % cert_loc, '-days', '3650'])
        logging.debug('cert signed.')
        logging.info('restarting nginx...')
        run_command([SUPERCTL, 'restart', 'nginx'])

    cleanup_rclocal(logging)
    logging.info('Checking for flash and Running flash optimizations if appropriate.')
    run_command([FLASH_OPTIMIZE, '-x'], throw=False)
    tz_updated = False
    try:
        logging.info('Updating the timezone from the system')
        tz_updated = update_tz(logging)
    except Exception, e:
        logging.error('Exception while updating timezone: %s' % e.__str__())
        logging.exception(e)

    try:
        logging.info('Updating sshd_config')
        bootstrap_sshd_config(logging)
    except Exception, e:
        logging.error('Exception while updating sshd_config: %s' % e.__str__())

    if (not os.path.isfile(STAMP)):
        logging.info('Please be patient. This script could take a few minutes')
        shutil.copyfile('%s/conf/django-hack' % BASE_DIR,
                        '%s/django' % BASE_BIN)
        run_command([SYSCTL, 'enable', 'postgresql'])
        logging.debug('Progresql enabled')
        shutil.rmtree('/var/lib/pgsql/data')
        logging.info('initializing Postgresql...')
        run_command(['/usr/bin/postgresql-setup', 'initdb'])
        logging.info('Done.')
        run_command([SYSCTL, 'restart', 'postgresql'])
        run_command([SYSCTL, 'status', 'postgresql'])
        logging.debug('Postgresql restarted')
        logging.info('Creating app databases...')
        run_command(['su', '-', 'postgres', '-c', '/usr/bin/createdb smartdb'])
        logging.debug('smartdb created')
        run_command(['su', '-', 'postgres', '-c',
                     '/usr/bin/createdb storageadmin'])
        logging.debug('storageadmin created')
        logging.info('Done')
        logging.info('Initializing app databases...')
        run_command(['su', '-', 'postgres', '-c', "psql -c \"CREATE ROLE rocky WITH SUPERUSER LOGIN PASSWORD 'rocky'\""])
        logging.debug('rocky ROLE created')
        run_command(['su', '-', 'postgres', '-c', "psql storageadmin -f %s/conf/storageadmin.sql.in" % BASE_DIR])
        logging.debug('storageadmin app database loaded')
        run_command(['su', '-', 'postgres', '-c', "psql smartdb -f %s/conf/smartdb.sql.in" % BASE_DIR])
        logging.debug('smartdb app database loaded')
        run_command(['su', '-', 'postgres', '-c', "psql storageadmin -c \"select setval('south_migrationhistory_id_seq', (select max(id) from south_migrationhistory))\""])
        logging.debug('storageadmin migration history copied')
        run_command(['su', '-', 'postgres', '-c', "psql smartdb -c \"select setval('south_migrationhistory_id_seq', (select max(id) from south_migrationhistory))\""])
        logging.debug('smartdb migration history copied')
        logging.info('Done')
        run_command(['cp', '-f', '%s/conf/postgresql.conf' % BASE_DIR,
                     '/var/lib/pgsql/data/'])
        logging.debug('postgresql.conf copied')
        run_command(['cp', '-f', '%s/conf/pg_hba.conf' % BASE_DIR,
                     '/var/lib/pgsql/data/'])
        logging.debug('pg_hba.conf copied')
        run_command([SYSCTL, 'restart', 'postgresql'])
        logging.info('Postgresql restarted')
        logging.info('Running app database migrations...')
        run_command([DJANGO, 'migrate', 'oauth2_provider', '--database=default',
                     '--noinput'])
        run_command([DJANGO, 'migrate', 'storageadmin', '--database=default',
                     '--noinput'])
        logging.debug('storageadmin migrated')
        run_command([DJANGO, 'migrate', 'django_ztask', '--database=default',
                     '--noinput'])
        logging.debug('django_ztask migrated')
        run_command([DJANGO, 'migrate', 'smart_manager',
                     '--database=smart_manager', '--noinput'])
        logging.debug('smart manager migrated')
        logging.info('Done')
        logging.info('Running prepdb...')
        run_command([PREP_DB, ])
        logging.info('Done')
        run_command(['touch', STAMP])
        require_postgres(logging)
        logging.info('Done')
    else:
        logging.info('Running prepdb...')
        run_command([PREP_DB, ])


    logging.info('stopping firewalld...')
    run_command([SYSCTL, 'stop', 'firewalld'])
    run_command([SYSCTL, 'disable', 'firewalld'])
    logging.info('firewalld stopped and disabled')
    update_nginx(logging)
    try:
        #downgrading python is a stopgap until it's fixed in upstream.
        downgrade_python(logging)
    except Exception, e:
        logging.error('Exception while downgrading python: %s' % e.__str__())
        logging.exception(e)

    shutil.copyfile('/etc/issue', '/etc/issue.rockstor')
    for i in range(30):
        try:
            if (init_update_issue() is not None):
                # init_update_issue() didn't cause an exception and did return
                # an ip so we break out of the multi try loop as we are done.
                break
            else:
                # execute except block with message so we can try again.
                raise Exception('default interface IP not yet configured')
        except Exception, e:
            # only executed if there is an actual exception with
            # init_update_issue() or if it returns None so we can try again
            # regardless as in both instances we may succeed on another try.
            logging.debug('Exception occurred while running update_issue: %s. '
                         'Trying again after 2 seconds.' % e.__str__())
            if (i > 28):
                logging.error('Waited too long and tried too many times. '
                              'Quiting.')
                raise e
            time.sleep(2)

    enable_rockstor_service(logging)
    enable_bootstrap_service(logging)

Example 48

Project: rootpy
Source File: plot_contour_matrix.py
View license
def plot_contour_matrix(arrays,
                        fields,
                        filename,
                        weights=None,
                        sample_names=None,
                        sample_lines=None,
                        sample_colors=None,
                        color_map=None,
                        num_bins=20,
                        num_contours=3,
                        cell_width=2,
                        cell_height=2,
                        cell_margin_x=0.05,
                        cell_margin_y=0.05,
                        dpi=100,
                        padding=0,
                        animate_field=None,
                        animate_steps=10,
                        animate_delay=20,
                        animate_loop=0):
    """
    Create a matrix of contour plots showing all possible 2D projections of a
    multivariate dataset. You may optionally animate the contours as a cut on
    one of the fields is increased. ImageMagick must be installed to produce
    animations.

    Parameters
    ----------

    arrays : list of arrays of shape [n_samples, n_fields]
        A list of 2D NumPy arrays for each sample. All arrays must have the
        same number of columns.

    fields : list of strings
        A list of the field names.

    filename : string
        The output filename. If animatation is enabled
        ``animate_field is not None`` then ``filename`` must have the .gif
        extension.

    weights : list of arrays, optional (default=None)
        List of 1D NumPy arrays of sample weights corresponding to the arrays
        in ``arrays``.

    sample_names : list of strings, optional (default=None)
        A list of the sample names for the legend. If None, then no legend will
        be shown.

    sample_lines : list of strings, optional (default=None)
        A list of matplotlib line styles for each sample. If None then line
        styles will cycle through 'dashed', 'solid', 'dashdot', and 'dotted'.
        Elements of this list may also be a list of line styles which will be
        cycled through for the contour lines of the corresponding sample.

    sample_colors : list of matplotlib colors, optional (default=None)
        The color of the contours for each sample. If None, then colors will be
        selected according to regular intervals along the ``color_map``.

    color_map : a matplotlib color map, optional (default=None)
        If ``sample_colors is None`` then select colors according to regular
        intervals along this matplotlib color map. If ``color_map`` is None,
        then the spectral color map is used.

    num_bins : int, optional (default=20)
        The number of bins along both axes of the 2D histograms.

    num_contours : int, optional (default=3)
        The number of contour line to show for each sample.

    cell_width : float, optional (default=2)
        The width, in inches, of each subplot in the matrix.

    cell_height : float, optional (default=2)
        The height, in inches, of each subplot in the matrix.

    cell_margin_x : float, optional (default=0.05)
        The horizontal margin between adjacent subplots, as a fraction
        of the subplot size.

    cell_margin_y : float, optional (default=0.05)
        The vertical margin between adjacent subplots, as a fraction
        of the subplot size.

    dpi : int, optional (default=100)
        The number of pixels per inch.

    padding : float, optional (default=0)
        The padding, as a fraction of the range of the value along each axes to
        guarantee around each sample's contour plot.

    animate_field : string, optional (default=None)
        The field to animate a cut along. By default no animation is produced.
        If ``animate_field is not None`` then ``filename`` must end in the .gif
        extension and an animated GIF is produced.

    animate_steps : int, optional (default=10)
        The number of frames in the animation, corresponding to the number of
        regularly spaced cut values to show along the range of the
        ``animate_field``.

    animate_delay : int, optional (default=20)
        The duration that each frame is shown in the animation as a multiple of
        1 / 100 of a second.

    animate_loop : int, optional (default=0)
        The number of times to loop the animation. If zero, then loop forever.

    Notes
    -----

    NumPy and matplotlib are required

    """
    import numpy as np
    from .. import root2matplotlib as r2m
    import matplotlib.pyplot as plt
    from matplotlib.ticker import MaxNLocator
    from matplotlib import cm
    from matplotlib.lines import Line2D

    # we must have at least two fields (columns)
    num_fields = len(fields)
    if num_fields < 2:
        raise ValueError(
            "record arrays must have at least two fields")
    # check that all arrays have the same number of columns
    for array in arrays:
        if array.shape[1] != num_fields:
            raise ValueError(
                "number of array columns does not match number of fields")

    if sample_colors is None:
        if color_map is None:
            color_map = cm.spectral
        steps = np.linspace(0, 1, len(arrays) + 2)[1:-1]
        sample_colors = [color_map(s) for s in steps]

    # determine range of each field
    low = np.vstack([a.min(axis=0) for a in arrays]).min(axis=0)
    high = np.vstack([a.max(axis=0) for a in arrays]).max(axis=0)
    width = np.abs(high - low)
    width *= padding
    low -= width
    high += width

    def single_frame(arrays, filename, label=None):

        # create the canvas and divide into matrix
        fig, axes = plt.subplots(
            nrows=num_fields,
            ncols=num_fields,
            figsize=(cell_width * num_fields, cell_height * num_fields))
        fig.subplots_adjust(hspace=cell_margin_y, wspace=cell_margin_x)

        for ax in axes.flat:
            # only show the left and bottom axes ticks and labels
            if ax.is_last_row() and not ax.is_last_col():
                ax.xaxis.set_visible(True)
                ax.xaxis.set_ticks_position('bottom')
                ax.xaxis.set_major_locator(MaxNLocator(4, prune='both'))
                for tick in ax.xaxis.get_major_ticks():
                    tick.label.set_rotation('vertical')
            else:
                ax.xaxis.set_visible(False)

            if ax.is_first_col() and not ax.is_first_row():
                ax.yaxis.set_visible(True)
                ax.yaxis.set_ticks_position('left')
                ax.yaxis.set_major_locator(MaxNLocator(4, prune='both'))
            else:
                ax.yaxis.set_visible(False)

        # turn off axes frames in upper triangular matrix
        for ix, iy in zip(*np.triu_indices_from(axes, k=0)):
            axes[ix, iy].axis('off')

        levels = np.linspace(0, 1, num_contours + 2)[1:-1]

        # plot the data
        for iy, ix in zip(*np.tril_indices_from(axes, k=-1)):
            ymin = float(low[iy])
            ymax = float(high[iy])
            xmin = float(low[ix])
            xmax = float(high[ix])
            for isample, a in enumerate(arrays):
                hist = Hist2D(
                    num_bins, xmin, xmax,
                    num_bins, ymin, ymax)
                if weights is not None:
                    hist.fill_array(a[:, [ix, iy]], weights[isample])
                else:
                    hist.fill_array(a[:, [ix, iy]])
                # normalize so maximum is 1.0
                _max = hist.GetMaximum()
                if _max != 0:
                    hist /= _max
                r2m.contour(hist,
                    axes=axes[iy, ix],
                    levels=levels,
                    linestyles=sample_lines[isample] if sample_lines else LINES,
                    colors=sample_colors[isample])

        # label the diagonal subplots
        for i, field in enumerate(fields):
            axes[i, i].annotate(field,
                (0.1, 0.2),
                rotation=45,
                xycoords='axes fraction',
                ha='left', va='center')

        # make proxy artists for legend
        lines = []
        for color in sample_colors:
            lines.append(Line2D([0, 0], [0, 0], color=color))

        if sample_names is not None:
            # draw the legend
            leg = fig.legend(lines, sample_names, loc=(0.65, 0.8))
            leg.set_frame_on(False)

        if label is not None:
            axes[0, 0].annotate(label, (0, 1),
                ha='left', va='top',
                xycoords='axes fraction')

        fig.savefig(filename, bbox_inches='tight', dpi=dpi)
        plt.close(fig)

    if animate_field is not None:
        _, ext = os.path.splitext(filename)
        if ext != '.gif':
            raise ValueError(
                "animation is only supported for .gif files")
        field_idx = fields.index(animate_field)
        cuts = np.linspace(
            low[field_idx],
            high[field_idx],
            animate_steps + 1)[:-1]
        gif = GIF()
        temp_dir = tempfile.mkdtemp()
        for i, cut in enumerate(cuts):
            frame_filename = os.path.join(temp_dir, 'frame_{0:d}.png'.format(i))
            label = '{0} > {1:.2f}'.format(animate_field, cut)
            log.info("creating frame for {0} ...".format(label))
            new_arrays = []
            for array in arrays:
                new_arrays.append(array[array[:, field_idx] > cut])
            single_frame(new_arrays,
                filename=frame_filename,
                label=label)
            gif.add_frame(frame_filename)
        gif.write(filename, delay=animate_delay, loop=animate_loop)
        shutil.rmtree(temp_dir)
    else:
        single_frame(arrays, filename=filename)

Example 49

Project: s3ql
Source File: benchmark.py
View license
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    # /dev/urandom may be slow, so we cache the data first
    log.info('Preparing test data...')
    rnd_fh = tempfile.TemporaryFile()
    with open('/dev/urandom', 'rb', 0) as src:
        copied = 0
        while copied < 50 * 1024 * 1024:
            buf = src.read(BUFSIZE)
            rnd_fh.write(buf)
            copied += len(buf)

    log.info('Measuring throughput to cache...')
    backend_dir = tempfile.mkdtemp(prefix='s3ql-benchmark-')
    mnt_dir = tempfile.mkdtemp(prefix='s3ql-mnt')
    atexit.register(shutil.rmtree, backend_dir)
    atexit.register(shutil.rmtree, mnt_dir)

    block_sizes = [ 2**b for b in range(12, 18) ]
    for blocksize in block_sizes:
        write_time = 0
        size = 50 * 1024 * 1024
        while write_time < 3:
            log.debug('Write took %.3g seconds, retrying', write_time)
            subprocess.check_call([exec_prefix + 'mkfs.s3ql', '--plain', 'local://%s' % backend_dir,
                                   '--quiet', '--force', '--cachedir', options.cachedir])
            subprocess.check_call([exec_prefix + 'mount.s3ql', '--threads', '1', '--quiet',
                                   '--cachesize', '%d' % (2 * size / 1024), '--log',
                                   '%s/mount.log' % backend_dir, '--cachedir', options.cachedir,
                                   'local://%s' % backend_dir, mnt_dir])
            try:
                size *= 2
                with open('%s/bigfile' % mnt_dir, 'wb', 0) as dst:
                    rnd_fh.seek(0)
                    write_time = time.time()
                    copied = 0
                    while copied < size:
                        buf = rnd_fh.read(blocksize)
                        if not buf:
                            rnd_fh.seek(0)
                            continue
                        dst.write(buf)
                        copied += len(buf)

                write_time = time.time() - write_time
                os.unlink('%s/bigfile' % mnt_dir)
            finally:
                subprocess.check_call([exec_prefix + 'umount.s3ql', mnt_dir])

        fuse_speed = copied / write_time
        log.info('Cache throughput with %3d KiB blocks: %d KiB/sec',
                 blocksize / 1024, fuse_speed / 1024)

    # Upload random data to prevent effects of compression
    # on the network layer
    log.info('Measuring raw backend throughput..')
    try:
        backend = get_backend(options, raw=True)
    except DanglingStorageURLError as exc:
        raise QuietError(str(exc)) from None

    upload_time = 0
    size = 512 * 1024
    while upload_time < 10:
        size *= 2
        def do_write(dst):
            rnd_fh.seek(0)
            stamp = time.time()
            copied = 0
            while copied < size:
                buf = rnd_fh.read(BUFSIZE)
                if not buf:
                    rnd_fh.seek(0)
                    continue
                dst.write(buf)
                copied += len(buf)
            return (copied, stamp)
        (upload_size, upload_time) = backend.perform_write(do_write, 's3ql_testdata')
        upload_time = time.time() - upload_time
    backend_speed = upload_size / upload_time
    log.info('Backend throughput: %d KiB/sec', backend_speed / 1024)
    backend.delete('s3ql_testdata')

    src = options.file
    size = os.fstat(options.file.fileno()).st_size
    log.info('Test file size: %.2f MiB', (size / 1024 ** 2))

    in_speed = dict()
    out_speed = dict()
    for alg in ALGS:
        log.info('compressing with %s-6...', alg)
        backend = ComprencBackend(b'pass', (alg, 6), Backend('local://' + backend_dir, None, None))
        def do_write(dst): #pylint: disable=E0102
            src.seek(0)
            stamp = time.time()
            while True:
                buf = src.read(BUFSIZE)
                if not buf:
                    break
                dst.write(buf)
            return (dst, stamp)
        (dst_fh, stamp) = backend.perform_write(do_write, 's3ql_testdata')
        dt = time.time() - stamp
        in_speed[alg] = size / dt
        out_speed[alg] = dst_fh.get_obj_size() / dt
        log.info('%s compression speed: %d KiB/sec per thread (in)', alg, in_speed[alg] / 1024)
        log.info('%s compression speed: %d KiB/sec per thread (out)', alg, out_speed[alg] / 1024)

    print('')
    print('With %d KiB blocks, maximum performance for different compression'
          % (block_sizes[-1]/1024), 'algorithms and thread counts is:', '', sep='\n')

    threads = set([1,2,4,8])
    cores = os.sysconf('SC_NPROCESSORS_ONLN')
    if cores != -1:
        threads.add(cores)
    if options.threads:
        threads.add(options.threads)

    print('%-26s' % 'Threads:',
          ('%12d' * len(threads)) % tuple(sorted(threads)))

    for alg in ALGS:
        speeds = []
        limits = []
        for t in sorted(threads):
            if fuse_speed > t * in_speed[alg]:
                limit = 'CPU'
                speed = t * in_speed[alg]
            else:
                limit = 'S3QL/FUSE'
                speed = fuse_speed

            if speed / in_speed[alg] * out_speed[alg] > backend_speed:
                limit = 'uplink'
                speed = backend_speed * in_speed[alg] / out_speed[alg]

            limits.append(limit)
            speeds.append(speed / 1024)

        print('%-26s' % ('Max FS throughput (%s):' % alg),
              ('%7d KiB/s' * len(threads)) % tuple(speeds))
        print('%-26s' % '..limited by:',
              ('%12s' * len(threads)) % tuple(limits))

    print('')
    print('All numbers assume that the test file is representative and that',
          'there are enough processor cores to run all active threads in parallel.',
          'To compensate for network latency, you should use about twice as',
          'many upload threads as indicated by the above table.\n', sep='\n')

Example 50

Project: hg-git
Source File: run-tests.py
View license
def runone(options, test, count):
    '''returns a result element: (code, test, msg)'''

    def skip(msg):
        if options.verbose:
            log("\nSkipping %s: %s" % (testpath, msg))
        return 's', test, msg

    def fail(msg, ret):
        warned = ret is False
        if not options.nodiff:
            log("\n%s: %s %s" % (warned and 'Warning' or 'ERROR', test, msg))
        if (not ret and options.interactive
            and os.path.exists(testpath + ".err")):
            iolock.acquire()
            print "Accept this change? [n] ",
            answer = sys.stdin.readline().strip()
            iolock.release()
            if answer.lower() in "y yes".split():
                if test.endswith(".t"):
                    rename(testpath + ".err", testpath)
                else:
                    rename(testpath + ".err", testpath + ".out")
                return '.', test, ''
        return warned and '~' or '!', test, msg

    def success():
        return '.', test, ''

    def ignore(msg):
        return 'i', test, msg

    def describe(ret):
        if ret < 0:
            return 'killed by signal %d' % -ret
        return 'returned error code %d' % ret

    testpath = os.path.join(TESTDIR, test)
    err = os.path.join(TESTDIR, test + ".err")
    lctest = test.lower()

    if not os.path.exists(testpath):
            return skip("doesn't exist")

    if not (options.whitelisted and test in options.whitelisted):
        if options.blacklist and test in options.blacklist:
            return skip("blacklisted")

        if options.retest and not os.path.exists(test + ".err"):
            return ignore("not retesting")

        if options.keywords:
            fp = open(test)
            t = fp.read().lower() + test.lower()
            fp.close()
            for k in options.keywords.lower().split():
                if k in t:
                    break
                else:
                    return ignore("doesn't match keyword")

    if not lctest.startswith("test-"):
        return skip("not a test file")
    for ext, func, out in testtypes:
        if lctest.endswith(ext):
            runner = func
            ref = os.path.join(TESTDIR, test + out)
            break
    else:
        return skip("unknown test type")

    vlog("# Test", test)

    if os.path.exists(err):
        os.remove(err)       # Remove any previous output files

    # Make a tmp subdirectory to work in
    threadtmp = os.path.join(HGTMP, "child%d" % count)
    testtmp = os.path.join(threadtmp, os.path.basename(test))
    os.mkdir(threadtmp)
    os.mkdir(testtmp)

    port = options.port + count * 3
    replacements = [
        (r':%s\b' % port, ':$HGPORT'),
        (r':%s\b' % (port + 1), ':$HGPORT1'),
        (r':%s\b' % (port + 2), ':$HGPORT2'),
        ]
    if os.name == 'nt':
        replacements.append(
            (''.join(c.isalpha() and '[%s%s]' % (c.lower(), c.upper()) or
                     c in '/\\' and r'[/\\]' or
                     c.isdigit() and c or
                     '\\' + c
                     for c in testtmp), '$TESTTMP'))
    else:
        replacements.append((re.escape(testtmp), '$TESTTMP'))

    env = createenv(options, testtmp, threadtmp, port)
    createhgrc(env['HGRCPATH'], options)

    starttime = time.time()
    try:
        ret, out = runner(testpath, testtmp, options, replacements, env)
    except KeyboardInterrupt:
        endtime = time.time()
        log('INTERRUPTED: %s (after %d seconds)' % (test, endtime - starttime))
        raise
    endtime = time.time()
    times.append((test, endtime - starttime))
    vlog("# Ret was:", ret)

    killdaemons(env['DAEMON_PIDS'])

    skipped = (ret == SKIPPED_STATUS)

    # If we're not in --debug mode and reference output file exists,
    # check test output against it.
    if options.debug:
        refout = None                   # to match "out is None"
    elif os.path.exists(ref):
        f = open(ref, "r")
        refout = f.read().splitlines(True)
        f.close()
    else:
        refout = []

    if (ret != 0 or out != refout) and not skipped and not options.debug:
        # Save errors to a file for diagnosis
        f = open(err, "wb")
        for line in out:
            f.write(line)
        f.close()

    if skipped:
        if out is None:                 # debug mode: nothing to parse
            missing = ['unknown']
            failed = None
        else:
            missing, failed = parsehghaveoutput(out)
        if not missing:
            missing = ['irrelevant']
        if failed:
            result = fail("hghave failed checking for %s" % failed[-1], ret)
            skipped = False
        else:
            result = skip(missing[-1])
    elif ret == 'timeout':
        result = fail("timed out", ret)
    elif out != refout:
        if not options.nodiff:
            iolock.acquire()
            if options.view:
                os.system("%s %s %s" % (options.view, ref, err))
            else:
                showdiff(refout, out, ref, err)
            iolock.release()
        if ret:
            result = fail("output changed and " + describe(ret), ret)
        else:
            result = fail("output changed", ret)
    elif ret:
        result = fail(describe(ret), ret)
    else:
        result = success()

    if not options.verbose:
        iolock.acquire()
        sys.stdout.write(result[0])
        sys.stdout.flush()
        iolock.release()

    if not options.keep_tmpdir:
        shutil.rmtree(threadtmp, True)
    return result