tempfile.mkdtemp

Here are the examples of the python api tempfile.mkdtemp taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: YCM-Generator
Source File: config_gen.py
View license
def fake_build(project_dir, c_build_log_path, cxx_build_log_path, verbose, make_cmd, build_system, cc, cxx, out_of_tree, configure_opts, make_flags, preserve_environment, qt_version):
    '''Builds the project using the fake toolchain, to collect the compiler flags.

    project_dir: the directory containing the source files
    build_log_path: the file to log commands to
    verbose: show the build process output
    make_cmd: the path of the make executable
    cc: the path of the clang executable
    cxx: the path of the clang++ executable
    out_of_tree: perform an out-of-tree build (autotools only)
    configure_opts: additional flags for configure stage
    make_flags: additional flags for make
    preserve_environment: pass environment variables to build processes
    qt_version: The Qt version to use when building with qmake.
    '''

    # TODO: add Windows support
    assert(not sys.platform.startswith("win32"))
    fake_path = os.path.join(ycm_generator_dir, "fake-toolchain", "Unix")

    # environment variables and arguments for build process
    started = time.time()
    FNULL = open(os.devnull, "w")
    proc_opts = {} if verbose else {
        "stdin": FNULL,
        "stdout": FNULL,
        "stderr": FNULL
    }
    proc_opts["cwd"] = project_dir

    if(preserve_environment):
        env = os.environ
    else:
        # Preserve HOME, since Cmake needs it to find some packages and it's
        # normally there anyway. See #26.
        env = dict(map(lambda x: (x, os.environ[x]), ["HOME"]))

    env["PATH"]  = "{}:{}".format(fake_path, os.environ["PATH"])
    env["CC"] = "clang"
    env["CXX"] = "clang++"
    env["YCM_CONFIG_GEN_CC_LOG"] = c_build_log_path
    env["YCM_CONFIG_GEN_CXX_LOG"] = cxx_build_log_path

    # used during configuration stage, so that cmake, etc. can verify what the compiler supports
    env_config = env.copy()
    env_config["YCM_CONFIG_GEN_CC_PASSTHROUGH"] = cc
    env_config["YCM_CONFIG_GEN_CXX_PASSTHROUGH"] = cxx

    # use -i (ignore errors), since the makefile may include scripts which
    # depend upon the existence of various output files
    make_args = [make_cmd] + make_flags

    # Used for the qmake build system below
    pro_files = glob.glob(os.path.join(project_dir, "*.pro"))

    # sanity check - make sure the toolchain is available
    assert os.path.exists(fake_path), "Could not find toolchain at '{}'".format(fake_path)

    # helper function to display exact commands used
    def run(cmd, *args, **kwargs):
        print("$ " + " ".join(cmd))
        subprocess.call(cmd, *args, **kwargs)

    if build_system is None:
        if os.path.exists(os.path.join(project_dir, "CMakeLists.txt")):
            build_system = "cmake"
        elif os.path.exists(os.path.join(project_dir, "configure")):
            build_system = "autotools"
        elif pro_files:
            build_system = "qmake"
        elif any([os.path.exists(os.path.join(project_dir, x)) for x in ["GNUmakefile", "makefile", "Makefile"]]):
            build_system = "make"

    # execute the build system
    if build_system == "cmake":
        # cmake
        # run cmake in a temporary directory, then compile the project as usual
        build_dir = tempfile.mkdtemp()
        proc_opts["cwd"] = build_dir

        # if the project was built in-tree, we need to hide the cache file so that cmake
        # populates the build dir instead of just re-generating the existing files
        cache_path = os.path.join(project_dir, "CMakeCache.txt")

        if(os.path.exists(cache_path)):
            fd, cache_tmp = tempfile.mkstemp()
            os.close(fd)
            shutil.move(cache_path, cache_tmp)
        else:
            cache_tmp = None

        print("Running cmake in '{}'...".format(build_dir))
        sys.stdout.flush()
        run(["cmake", project_dir] + configure_opts, env=env_config, **proc_opts)

        print("\nRunning make...")
        sys.stdout.flush()
        run(make_args, env=env, **proc_opts)

        print("\nCleaning up...")
        print("")
        sys.stdout.flush()
        shutil.rmtree(build_dir)

        if(cache_tmp):
            shutil.move(cache_tmp, cache_path)

    elif build_system == "autotools":
        # autotools
        # perform build in-tree, since not all projects handle out-of-tree builds correctly

        if(out_of_tree):
            build_dir = tempfile.mkdtemp()
            proc_opts["cwd"] = build_dir
            print("Configuring autotools in '{}'...".format(build_dir))
        else:
            print("Configuring autotools...")

        run([os.path.join(project_dir, "configure")] + configure_opts, env=env_config, **proc_opts)

        print("\nRunning make...")
        run(make_args, env=env, **proc_opts)

        print("\nCleaning up...")

        if(out_of_tree):
            print("")
            shutil.rmtree(build_dir)
        else:
            run([make_cmd, "maintainer-clean"], env=env, **proc_opts)

    elif build_system == "qmake":
        # qmake
        # make sure there is only one .pro file
        if len(pro_files) != 1:
            print("ERROR: Found {} .pro files (expected one): {}.".format(
                len(pro_files), ', '.join(pro_files)))
            sys.exit(1)

        # run qmake in a temporary directory, then compile the project as usual
        build_dir = tempfile.mkdtemp()
        proc_opts["cwd"] = build_dir
        env_config["QT_SELECT"] = qt_version

        # QMAKESPEC is platform dependent - valid mkspecs are in
        # /usr/share/qt4/mkspecs, /usr/lib64/qt5/mkspecs
        env_config["QMAKESPEC"] = {
            ("Linux",  True):   "unsupported/linux-clang",
            ("Linux",  False):  "linux-clang",
            ("Darwin", True):   "unsupported/macx-clang",
            ("Darwin", False):  "macx-clang",
            ("FreeBSD", False): "unsupported/freebsd-clang",
        }[(os.uname()[0], qt_version == "4")]

        print("Running qmake in '{}' with Qt {}...".format(build_dir, qt_version))
        run(["qmake"] + configure_opts + [pro_files[0]], env=env_config,
            **proc_opts)

        print("\nRunning make...")
        run(make_args, env=env, **proc_opts)

        print("\nCleaning up...")
        print("")
        shutil.rmtree(build_dir)

    elif build_system == "make":
        # make
        # needs to be handled last, since other build systems can generate Makefiles
        print("Preparing build directory...")
        run([make_cmd, "clean"], env=env, **proc_opts)

        print("\nRunning make...")
        run(make_args, env=env, **proc_opts)

    elif(os.path.exists(os.path.join(project_dir, "Make/options"))):
        print("Found OpenFOAM Make/options")

        # OpenFOAM build system
        make_args = ["wmake"]

        # Since icpc could not find directory in which g++ resides,
        # set environmental variables to gcc to make fake_build operate normally.

        env['WM_COMPILER']='Gcc'
        env['WM_CC']='gcc'
        env['WM_CXX']='g++'

        print("\nRunning wmake...")
        run(make_args, env=env, **proc_opts)

    else:
        print("ERROR: Unknown build system")
        sys.exit(2)

    print("Build completed in {} sec".format(round(time.time() - started, 2)))
    print("")

Example 2

Project: cstar_perf
Source File: client.py
View license
    def perform_job(self, job):
        """Perform a job the server gave us, stream output and artifacts to the given websocket."""
        job = copy.deepcopy(job['test_definition'])
        # Cleanup the job structure according to what stress_compare needs:
        for operation in job['operations']:
            operation['type'] = operation['operation']
            del operation['operation']

        job_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf','jobs',job['test_id'])
        mkpath(job_dir)
        stats_path = os.path.join(job_dir,'stats.{test_id}.json'.format(test_id=job['test_id']))
        summary_path = os.path.join(job_dir,'stats_summary.{test_id}.json'.format(test_id=job['test_id']))
        stress_log_path = os.path.join(job_dir,'stress_compare.{test_id}.log'.format(test_id=job['test_id']))

        stress_json = json.dumps(dict(revisions=job['revisions'],
                                      operations=job['operations'],
                                      title=job['title'],
                                      leave_data=job.get('leave_data', False),
                                      log=stats_path))

        # Create a temporary location to store the stress_compare json file:
        stress_json_path = os.path.join(job_dir, 'test.{test_id}.json'.format(test_id=job['test_id']))
        with open(stress_json_path, 'w') as f:
            f.write(stress_json)

        # Inform the server we will be streaming the console output to them:
        command = Command.new(self.__ws_client.socket(), action='stream', test_id=job['test_id'],
                              kind='console', name="stress_compare.{test_id}.log".format(test_id=job['test_id']),
                              eof=EOF_MARKER, keepalive=KEEPALIVE_MARKER)
        response = self.__ws_client.send(command, assertions={'message':'ready'})

        # Start a status checking thread.
        # If a user cancel's the job after it's marked in_progress, we
        # need to periodically check for that state change and kill
        # our test:
        cancel_checker = JobCancellationTracker(urlparse.urlparse(self.ws_endpoint).netloc, job['test_id'])
        cancel_checker.start()

        # stats file observer
        # looks for changes to update server with status progress message
        observer = Observer()
        observer.schedule(UpdateServerProgressMessageHandler(job, urlparse.urlparse(self.ws_endpoint).netloc),
                          os.path.join(os.path.expanduser("~"), '.cstar_perf', 'jobs'),
                          recursive=True)
        observer.start()

        # Run stress_compare in a separate process, collecting the
        # output as an artifact:
        try:
            # Run stress_compare with pexpect. subprocess.Popen didn't
            # work due to some kind of tty issue when invoking
            # nodetool.
            stress_proc = pexpect.spawn('cstar_perf_stress {stress_json_path}'.format(stress_json_path=stress_json_path), timeout=None)
            with open(stress_log_path, 'w') as stress_log:
                while True:
                    try:
                        with timeout(25):
                            line = stress_proc.readline()
                            if line == '':
                                break
                            stress_log.write(line)
                            sys.stdout.write(line)
                            self.__ws_client.send(base64.b64encode(line))
                    except TimeoutError:
                        self.__ws_client.send(base64.b64encode(KEEPALIVE_MARKER))
        finally:
            cancel_checker.stop()
            observer.stop()
            self.__ws_client.send(base64.b64encode(EOF_MARKER))

        response = self.__ws_client.receive(response, assertions={'message': 'stream_received', 'done': True})

        # Find the log tarball for each revision by introspecting the stats json:
        system_logs = []
        flamegraph_logs = []
        yourkit_logs = []
        log_dir = CSTAR_PERF_LOGS_DIR
        flamegraph_dir = os.path.join(os.path.expanduser("~"), '.cstar_perf', 'flamegraph')
        yourkit_dir = os.path.join(os.path.expanduser("~"), '.cstar_perf', 'yourkit')
        #Create a stats summary file without voluminous interval data
        if os.path.isfile(stats_path):
            with open(stats_path) as stats:
                stats = json.loads(stats.read())
                for rev in stats['revisions']:
                    last_log_rev_id = rev.get('last_log')
                    if last_log_rev_id:
                        system_logs.append(os.path.join(log_dir, "{name}.tar.gz".format(name=last_log_rev_id)))
                        fg_path = os.path.join(flamegraph_dir, "{name}.tar.gz".format(name=last_log_rev_id))
                        yourkit_path = os.path.join(yourkit_dir, "{name}.tar.gz".format(name=last_log_rev_id))
                        if os.path.exists(fg_path):
                            flamegraph_logs.append(fg_path)
                        if os.path.exists(yourkit_path):
                            yourkit_logs.append(yourkit_path)
                with open(summary_path, 'w') as summary:
                    hadStats = False
                    for op in stats['stats']:
                        if op['type'] == 'stress':
                            try:
                                del op['intervals']
                                hadStats = True
                            except KeyError:
                                pass
                        try:
                            del op['output']
                        except KeyError:
                            pass
                    if hadStats:
                        json.dump(obj=stats, fp=summary, sort_keys=True, indent=4, separators=(',', ': '))

        # Make a new tarball containing all the revision logs:
        tmptardir = tempfile.mkdtemp()
        try:
            startup_log_tarball = self._maybe_get_startup_log_tarball(job['test_id'], log_dir)
            if startup_log_tarball:
                system_logs.append(startup_log_tarball)
            job_log_dir = os.path.join(tmptardir, 'cassandra_logs.{test_id}'.format(test_id=job['test_id']))
            os.mkdir(job_log_dir)
            for x, syslog in enumerate(system_logs, 1):
                with tarfile.open(syslog) as tar:
                    tar.extractall(job_log_dir)
                    os.rename(os.path.join(job_log_dir, tar.getnames()[0]), os.path.join(job_log_dir, 'revision_{x:02d}'.format(x=x)))
            system_logs_path = os.path.join(job_dir, 'cassandra_logs.{test_id}.tar.gz'.format(test_id=job['test_id']))
            with tarfile.open(system_logs_path, 'w:gz') as tar:
                with cd(tmptardir):
                    tar.add('cassandra_logs.{test_id}'.format(test_id=job['test_id']))
            assert os.path.exists(system_logs_path)
        finally:
            shutil.rmtree(tmptardir)

        # Make a new tarball containing all the flamegraph and data
        if flamegraph_logs:
            tmptardir = tempfile.mkdtemp()
            try:
                flamegraph_tmp_dir = os.path.join(tmptardir, 'flamegraph_logs.{test_id}'.format(test_id=job['test_id']))
                os.mkdir(flamegraph_tmp_dir)
                for x, flamegraph in enumerate(flamegraph_logs, 1):
                    with tarfile.open(flamegraph) as tar:
                        tar.extractall(flamegraph_tmp_dir)
                        tmp_dir = os.path.join(flamegraph_tmp_dir, tar.getnames()[0])

                        # Copy all flamegraph as artifacts
                        for node_dir in os.listdir(tmp_dir):
                            glob_match = os.path.join(os.path.join(tmp_dir, node_dir), '*.svg')
                            graphs = glob.glob(glob_match)
                            for graph in graphs:
                                graph_name = os.path.basename(graph).replace(
                                    'flamegraph_', 'flamegraph_{}_{}_'.format(job['test_id'], node_dir))
                                graph_dst_filename = os.path.join(job_dir, graph_name)
                                shutil.copyfile(graph, graph_dst_filename)

                        os.rename(tmp_dir, os.path.join(flamegraph_tmp_dir, 'revision_{x:02d}'.format(x=x)))

                flamegraph_job_path = os.path.join(job_dir, 'flamegraph_logs.{test_id}.tar.gz'.format(test_id=job['test_id']))
                with tarfile.open(flamegraph_job_path, 'w:gz') as tar:
                    with cd(tmptardir):
                        tar.add('flamegraph_logs.{test_id}'.format(test_id=job['test_id']))
                assert os.path.exists(flamegraph_job_path)
            finally:
                shutil.rmtree(tmptardir)

        # Make a new tarball containing all the flamegraph and data
        if yourkit_logs:
            tmptardir = tempfile.mkdtemp()
            try:
                yourkit_tmp_dir = os.path.join(tmptardir, 'yourkit.{test_id}'.format(test_id=job['test_id']))
                os.mkdir(yourkit_tmp_dir)
                for x, yourkit in enumerate(yourkit_logs, 1):
                    with tarfile.open(yourkit) as tar:
                        tar.extractall(yourkit_tmp_dir)
                        tmp_dir = os.path.join(yourkit_tmp_dir, tar.getnames()[0])
                        os.rename(tmp_dir, os.path.join(yourkit_tmp_dir, 'revision_{x:02d}'.format(x=x)))

                yourkit_job_path = os.path.join(job_dir, 'yourkit.{test_id}.tar.gz'.format(test_id=job['test_id']))
                with tarfile.open(yourkit_job_path, 'w:gz') as tar:
                    with cd(tmptardir):
                        tar.add('yourkit.{test_id}'.format(test_id=job['test_id']))
                assert os.path.exists(yourkit_job_path)
            finally:
                shutil.rmtree(tmptardir)

        ## Stream artifacts
        ## Write final job status to 0.job_status file
        final_status = 'local_complete'
        try:
            # Stream artifacts:
            self.stream_artifacts(job['test_id'])
            if self.__ws_client.in_sync():
                final_status = 'server_complete'

            # Spot check stats to ensure it has the data it should
            # contain. Raises JobFailure if something's amiss.
            try:
                self.__spot_check_stats(job, stats_path)
            except JobFailure, e:
                if final_status == 'server_complete':
                    final_status = 'server_fail'
                else:
                    final_status = 'local_fail'
                raise
        finally:
            with open(os.path.join(job_dir, '0.job_status'), 'w') as f:
                f.write(final_status)

Example 3

Project: stalker
Source File: test_budget.py
View license
    def setUp(self):
        """run once
        """
        defaults.timing_resolution = datetime.timedelta(hours=1)

        # create a new session
        db.setup({
            'sqlalchemy.url': 'sqlite://',
            'sqlalchemy.echo': False
        })
        db.init()

        self.status_wfd = Status.query.filter_by(code="WFD").first()
        self.status_rts = Status.query.filter_by(code="RTS").first()
        self.status_wip = Status.query.filter_by(code="WIP").first()
        self.status_prev = Status.query.filter_by(code="PREV").first()
        self.status_hrev = Status.query.filter_by(code="HREV").first()
        self.status_drev = Status.query.filter_by(code="DREV").first()
        self.status_oh = Status.query.filter_by(code="OH").first()
        self.status_stop = Status.query.filter_by(code="STOP").first()
        self.status_cmpl = Status.query.filter_by(code="CMPL").first()

        self.status_new = Status.query.filter_by(code='NEW').first()
        self.status_app = Status.query.filter_by(code='APP').first()

        self.budget_status_list = StatusList(
            name='Budget Statuses',
            target_entity_type='Budget',
            statuses=[self.status_new, self.status_prev, self.status_app]
        )
        db.DBSession.add(self.budget_status_list)

        self.task_status_list = StatusList.query\
            .filter_by(target_entity_type='Task').first()

        self.test_project_status_list = StatusList(
            name="Project Statuses",
            statuses=[self.status_wip,
                      self.status_prev,
                      self.status_cmpl],
            target_entity_type=Project,
        )

        self.test_movie_project_type = Type(
            name="Movie Project",
            code='movie',
            target_entity_type=Project,
        )

        self.test_repository_type = Type(
            name="Test Repository Type",
            code='test',
            target_entity_type=Repository,
        )

        self.test_repository = Repository(
            name="Test Repository",
            type=self.test_repository_type,
            linux_path=tempfile.mkdtemp(),
            windows_path=tempfile.mkdtemp(),
            osx_path=tempfile.mkdtemp()
        )

        self.test_user1 = User(
            name="User1",
            login="user1",
            email="[email protected]",
            password="1234"
        )

        self.test_user2 = User(
            name="User2",
            login="user2",
            email="[email protected]",
            password="1234"
        )

        self.test_user3 = User(
            name="User3",
            login="user3",
            email="[email protected]",
            password="1234"
        )

        self.test_user4 = User(
            name="User4",
            login="user4",
            email="[email protected]",
            password="1234"
        )

        self.test_user5 = User(
            name="User5",
            login="user5",
            email="[email protected]",
            password="1234"
        )

        self.test_project = Project(
            name="Test Project1",
            code='tp1',
            type=self.test_movie_project_type,
            status_list=self.test_project_status_list,
            repository=self.test_repository
        )

        self.kwargs = {
            'project': self.test_project,
            'name': 'Test Budget 1'
        }

        self.test_budget = Budget(**self.kwargs)

        self.test_good = Good(
            name='Some Good',
            cost=100,
            msrp=120,
            unit='$'
        )

Example 4

Project: geonode
Source File: tests.py
View license
    def test_get_files(self):

        # Check that a well-formed Shapefile has its components all picked up
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.shp", "foo.shx", "foo.prj", "foo.dbf"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.shp"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(gotten_files, dict(shp="foo.shp", shx="foo.shx",
                                                 prj="foo.prj", dbf="foo.dbf"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that a Shapefile missing required components raises an
        # exception
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.shp", "foo.shx", "foo.prj"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            self.assertRaises(
                GeoNodeException,
                lambda: get_files(
                    os.path.join(
                        d,
                        "foo.shp")))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including an SLD with a valid shapefile results in the SLD
        # getting picked up
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.shp", "foo.shx", "foo.prj", "foo.dbf", "foo.sld"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.shp"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(
                gotten_files,
                dict(
                    shp="foo.shp",
                    shx="foo.shx",
                    prj="foo.prj",
                    dbf="foo.dbf",
                    sld="foo.sld"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that capitalized extensions are ok
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.SHP"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(gotten_files, dict(shp="foo.SHP", shx="foo.SHX",
                                                 prj="foo.PRJ", dbf="foo.DBF"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that mixed capital and lowercase extensions are ok
        d = None
        try:
            d = tempfile.mkdtemp()
            for f in ("foo.SHP", "foo.shx", "foo.pRJ", "foo.DBF"):
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            gotten_files = get_files(os.path.join(d, "foo.SHP"))
            gotten_files = dict((k, v[len(d) + 1:])
                                for k, v in gotten_files.iteritems())
            self.assertEquals(gotten_files, dict(shp="foo.SHP", shx="foo.shx",
                                                 prj="foo.pRJ", dbf="foo.DBF"))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including both capital and lowercase extensions raises an
        # exception
        d = None
        try:
            d = tempfile.mkdtemp()
            files = (
                "foo.SHP",
                "foo.SHX",
                "foo.PRJ",
                "foo.DBF",
                "foo.shp",
                "foo.shx",
                "foo.prj",
                "foo.dbf")
            for f in files:
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            # Only run the tests if this is a case sensitive OS
            if len(os.listdir(d)) == len(files):
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.SHP")))
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.shp")))

        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including both capital and lowercase PRJ (this is
        # special-cased in the implementation)
        d = None
        try:
            d = tempfile.mkdtemp()
            files = ("foo.SHP", "foo.SHX", "foo.PRJ", "foo.DBF", "foo.prj")
            for f in files:
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            # Only run the tests if this is a case sensitive OS
            if len(os.listdir(d)) == len(files):
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.SHP")))
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.shp")))
        finally:
            if d is not None:
                shutil.rmtree(d)

        # Check that including both capital and lowercase SLD (this is
        # special-cased in the implementation)
        d = None
        try:
            d = tempfile.mkdtemp()
            files = (
                "foo.SHP",
                "foo.SHX",
                "foo.PRJ",
                "foo.DBF",
                "foo.SLD",
                "foo.sld")
            for f in files:
                path = os.path.join(d, f)
                # open and immediately close to create empty file
                open(path, 'w').close()

            # Only run the tests if this is a case sensitive OS
            if len(os.listdir(d)) == len(files):
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.SHP")))
                self.assertRaises(
                    GeoNodeException,
                    lambda: get_files(
                        os.path.join(
                            d,
                            "foo.shp")))
        finally:
            if d is not None:
                shutil.rmtree(d)

Example 5

Project: gsutil
Source File: update.py
View license
  def RunCommand(self):
    """Command entry point for the update command."""

    if gslib.IS_PACKAGE_INSTALL:
      raise CommandException(
          'The update command is only available for gsutil installed from a '
          'tarball. If you installed gsutil via another method, use the same '
          'method to update it.')

    if os.environ.get('CLOUDSDK_WRAPPER') == '1':
      raise CommandException(
          'The update command is disabled for Cloud SDK installs. Please run '
          '"gcloud components update" to update it. Note: the Cloud SDK '
          'incorporates updates to the underlying tools approximately every 2 '
          'weeks, so if you are attempting to update to a recently created '
          'release / pre-release of gsutil it may not yet be available via '
          'the Cloud SDK.')

    https_validate_certificates = CERTIFICATE_VALIDATION_ENABLED
    if not https_validate_certificates:
      raise CommandException(
          'Your boto configuration has https_validate_certificates = False.\n'
          'The update command cannot be run this way, for security reasons.')

    DisallowUpdateIfDataInGsutilDir()

    force_update = False
    no_prompt = False
    if self.sub_opts:
      for o, unused_a in self.sub_opts:
        if o == '-f':
          force_update = True
        if o == '-n':
          no_prompt = True

    dirs_to_remove = []
    tmp_dir = tempfile.mkdtemp()
    dirs_to_remove.append(tmp_dir)
    os.chdir(tmp_dir)

    if not no_prompt:
      self.logger.info('Checking for software update...')
    if self.args:
      update_from_url_str = self.args[0]
      if not update_from_url_str.endswith('.tar.gz'):
        raise CommandException(
            'The update command only works with tar.gz files.')
      for i, result in enumerate(self.WildcardIterator(update_from_url_str)):
        if i > 0:
          raise CommandException(
              'Invalid update URL. Must name a single .tar.gz file.')
        storage_url = result.storage_url
        if storage_url.IsFileUrl() and not storage_url.IsDirectory():
          if not force_update:
            raise CommandException(
                ('"update" command does not support "file://" URLs without the '
                 '-f option.'))
        elif not (storage_url.IsCloudUrl() and storage_url.IsObject()):
          raise CommandException(
              'Invalid update object URL. Must name a single .tar.gz file.')
    else:
      update_from_url_str = GSUTIL_PUB_TARBALL

    # Try to retrieve version info from tarball metadata; failing that; download
    # the tarball and extract the VERSION file. The version lookup will fail
    # when running the update system test, because it retrieves the tarball from
    # a temp file rather than a cloud URL (files lack the version metadata).
    tarball_version = LookUpGsutilVersion(self.gsutil_api, update_from_url_str)
    if tarball_version:
      tf = None
    else:
      tf = self._FetchAndOpenGsutilTarball(update_from_url_str)
      tf.extractall()
      with open(os.path.join('gsutil', 'VERSION'), 'r') as ver_file:
        tarball_version = ver_file.read().strip()

    if not force_update and gslib.VERSION == tarball_version:
      self._CleanUpUpdateCommand(tf, dirs_to_remove)
      if self.args:
        raise CommandException('You already have %s installed.' %
                               update_from_url_str, informational=True)
      else:
        raise CommandException('You already have the latest gsutil release '
                               'installed.', informational=True)

    if not no_prompt:
      CheckAndMaybePromptForAnalyticsEnabling()
      if (2, 6) == sys.version_info[:2]:
        print('\n'.join(textwrap.wrap(
            'WARNING: You are using Python 2.6, which gsutil will stop '
            'supporting on September 1, 2016. If run gsutil update to a '
            'version released after that date, you will need to upgrade your '
            'system\'s Python installation to a supported Python version '
            '(at the time of this writing, version 2.7), or else gsutil will '
            'fail.\n')))
      (_, major) = CompareVersions(tarball_version, gslib.VERSION)
      if major:
        print('\n'.join(textwrap.wrap(
            'This command will update to the "%s" version of gsutil at %s. '
            'NOTE: This a major new version, so it is strongly recommended '
            'that you review the release note details at %s before updating to '
            'this version, especially if you use gsutil in scripts.'
            % (tarball_version, gslib.GSUTIL_DIR, RELEASE_NOTES_URL))))
      else:
        print('This command will update to the "%s" version of\ngsutil at %s'
              % (tarball_version, gslib.GSUTIL_DIR))
    self._ExplainIfSudoNeeded(tf, dirs_to_remove)

    if no_prompt:
      answer = 'y'
    else:
      answer = raw_input('Proceed? [y/N] ')
    if not answer or answer.lower()[0] != 'y':
      self._CleanUpUpdateCommand(tf, dirs_to_remove)
      raise CommandException('Not running update.', informational=True)

    if not tf:
      tf = self._FetchAndOpenGsutilTarball(update_from_url_str)

    # Ignore keyboard interrupts during the update to reduce the chance someone
    # hitting ^C leaves gsutil in a broken state.
    RegisterSignalHandler(signal.SIGINT, signal.SIG_IGN)

    # gslib.GSUTIL_DIR lists the path where the code should end up (like
    # /usr/local/gsutil), which is one level down from the relative path in the
    # tarball (since the latter creates files in ./gsutil). So, we need to
    # extract at the parent directory level.
    gsutil_bin_parent_dir = os.path.normpath(
        os.path.join(gslib.GSUTIL_DIR, '..'))

    # Extract tarball to a temporary directory in a sibling to GSUTIL_DIR.
    old_dir = tempfile.mkdtemp(dir=gsutil_bin_parent_dir)
    new_dir = tempfile.mkdtemp(dir=gsutil_bin_parent_dir)
    dirs_to_remove.append(old_dir)
    dirs_to_remove.append(new_dir)
    self._EnsureDirsSafeForUpdate(dirs_to_remove)
    try:
      tf.extractall(path=new_dir)
    except Exception, e:
      self._CleanUpUpdateCommand(tf, dirs_to_remove)
      raise CommandException('Update failed: %s.' % e)

    # For enterprise mode (shared/central) installation, users with
    # different user/group than the installation user/group must be
    # able to run gsutil so we need to do some permissions adjustments
    # here. Since enterprise mode is not not supported for Windows
    # users, we can skip this step when running on Windows, which
    # avoids the problem that Windows has no find or xargs command.
    if not IS_WINDOWS:
      # Make all files and dirs in updated area owner-RW and world-R, and make
      # all directories owner-RWX and world-RX.
      for dirname, subdirs, filenames in os.walk(new_dir):
        for filename in filenames:
          fd = os.open(os.path.join(dirname, filename), os.O_RDONLY)
          os.fchmod(fd, stat.S_IWRITE | stat.S_IRUSR |
                    stat.S_IRGRP | stat.S_IROTH)
          os.close(fd)
        for subdir in subdirs:
          fd = os.open(os.path.join(dirname, subdir), os.O_RDONLY)
          os.fchmod(fd, stat.S_IRWXU | stat.S_IXGRP | stat.S_IXOTH |
                    stat.S_IRGRP | stat.S_IROTH)
          os.close(fd)

      # Make main gsutil script owner-RWX and world-RX.
      fd = os.open(os.path.join(new_dir, 'gsutil', 'gsutil'), os.O_RDONLY)
      os.fchmod(fd, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
                stat.S_IROTH | stat.S_IXOTH)
      os.close(fd)

    # Move old installation aside and new into place.
    os.rename(gslib.GSUTIL_DIR, os.path.join(old_dir, 'old'))
    os.rename(os.path.join(new_dir, 'gsutil'), gslib.GSUTIL_DIR)
    self._CleanUpUpdateCommand(tf, dirs_to_remove)
    RegisterSignalHandler(signal.SIGINT, signal.SIG_DFL)
    self.logger.info('Update complete.')
    return 0

Example 6

Project: rail
Source File: mapping_accuracy.py
View license
def go(true_bed_stream, sam_stream=sys.stdin, generous=False,
        base_threshold=0.5, clip_threshold=1.0, dump_incorrect=False,
        temp_dir=None, ignore_spliced_reads=False):
    """ Finds relevant and retrieved instance counts.

        true_bed_stream: file handle for BED output of Flux simulation
        sam_stream: where to read in aligner's mappings
        generous: True iff aligner cuts off /1 or /2 of a given read
        base_threshold: proportion of a read's bases that must align
            correctly for a read to be considered a correct mapping
        clip_threshold: proportion of a read's bases that must be clipped
            for a read to be considered unmapped
        dump_incorrect: write incorrect (read) alignments to stderr
        ignore_spliced_reads: ignores all spliced reads
    """
    from tempdel import remove_temporary_directories
    import tempfile
    import atexit
    if temp_dir is None:
        temp_dir_path = tempfile.mkdtemp()
    else:
        try:
            temp_dir_path = tempfile.mkdtemp(dir=temp_dir)
        except:
            temp_dir_path = tempfile.mkdtemp()
    #print >>sys.stderr, temp_dir_path
    atexit.register(remove_temporary_directories, [temp_dir_path])
    # Store everything in one file, then sort it on read name
    combined_file = os.path.join(temp_dir_path, 'combined.temp')
    with open(combined_file, 'w') as temp_stream:
        if ignore_spliced_reads:
            if generous:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    if ',' in tokens[-1]: continue # skip intron line
                    print >>temp_stream, '\t'.join([tokens[3][:-2], '0']
                                                    + tokens[:3] + tokens[4:])
            else:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    if ',' in tokens[-1]: continue # skip intron line
                    print >>temp_stream, '\t'.join(
                                    [tokens[3], '0'] + tokens[:3] + tokens[4:]
                                )
            for line in sam_stream:
                if line[0] == '@' or not line.strip(): continue
                tokens = line.strip().split('\t')
                if 'N' in tokens[5]: continue # skip intron line
                print >>temp_stream, '\t'.join([tokens[0], '1'] + tokens[1:])
        else:
            if generous:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    print >>temp_stream, '\t'.join([tokens[3][:-2], '0']
                                                    + tokens[:3] + tokens[4:])
            else:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    print >>temp_stream, '\t'.join(
                                    [tokens[3], '0'] + tokens[:3] + tokens[4:]
                                )
            for line in sam_stream:
                if line[0] == '@' or not line.strip(): continue
                tokens = line.strip().split('\t')
                print >>temp_stream, '\t'.join([tokens[0], '1'] + tokens[1:])
    import subprocess
    sorted_combined_file = os.path.join(temp_dir_path, 'combined.sorted.temp')
    subprocess.check_call(' '.join(['sort -T %s -k1,1 -k2,2n'
                                        % temp_dir_path, combined_file, 
                                        '>', sorted_combined_file]),
                            bufsize=-1, shell=True)
    basewise_relevant, read_relevant = 0, 0
    # Initialize counters for computing accuracy metrics
    basewise_retrieved, basewise_intersection = 0, 0
    read_retrieved, read_intersection = 0, 0
    with open(sorted_combined_file) as sorted_combined_stream:
        for (name,), xpartition in xstream(sorted_combined_stream, 1):
            '''Dict mapping read names to alignments
            (chrom, 1-based start, 1-based end)'''
            true_maps = []
            saved = []
            for tokens in xpartition:
                saved.append(tokens)
                if tokens[0] == '0':
                    if len(tokens) < 12:
                        continue
                    chrom = tokens[1]
                    chrom_start = int(tokens[2])
                    chrom_end = int(tokens[3])
                    block_sizes = tokens[10].split(',')
                    block_starts = tokens[11].split(',')
                    # Handle trailing commas
                    try:
                        int(block_sizes[-1])
                    except ValueError:
                        block_sizes = block_sizes[:-1]
                    try:
                        int(block_starts[-1])
                    except ValueError:
                        block_starts = block_starts[:-1]
                    block_count = len(block_sizes)
                    assert block_count == len(block_starts)
                    exons = [(chrom,
                                chrom_start + int(block_starts[i]),
                                chrom_start + int(block_starts[i])
                                + int(block_sizes[i]))
                                for i in xrange(block_count)]
                    true_maps.append(exons)
                    basewise_relevant += sum([int(block_size) for block_size
                                                in block_sizes])
                    read_relevant += 1
                elif tokens[0] == '1':
                    flag = int(tokens[1])
                    if flag & 256 or flag & 4:
                        '''Secondary alignment or unmapped and thus not
                        retrieved; ignore'''
                        continue
                    cigar, pos, seq = tokens[5], int(tokens[3]), tokens[9]
                    (dummy_md, mapped,
                        unmapped, clip_count, read_length) \
                        = dummy_md_and_mapped_offsets(
                                            cigar,
                                            clip_threshold=clip_threshold
                                        )
                    if unmapped:
                        # Too much clipping
                        continue
                    basewise_retrieved += read_length - clip_count
                    read_retrieved += 1
                    if not true_maps:
                        assert ignore_spliced_reads
                        continue
                    # Try both /1 and /2; choose the best basewise result
                    intersected_base_count = 0
                    for true_map in true_maps:
                        if tokens[2] != true_map[0][0]:
                            '''chr is wrong, but this is still counted as a
                            retrieval above'''
                            continue
                        base_counter, base_truths = 0, set()
                        '''Each tuple in base_truths is
                        (index of base in read, mapped location)'''
                        for block in true_map:
                            base_truths.update([(base_counter + i, j + 1)
                                                    for i, j in enumerate(
                                                        xrange(
                                                            block[1], block[2]
                                                        ))])
                            base_counter += block[2] - block[1]
                        base_predictions = set()
                        if unmapped:
                            # Too much clipping
                            continue
                        _, _, _, exons, _ = indels_junctions_exons_mismatches(
                                                        cigar,
                                                        dummy_md, pos, seq,
                                                        drop_deletions=True
                                                    )
                        mapped_index = 0
                        for exon in exons:
                            base_predictions.update(
                                        [(mapped[mapped_index + i], j)
                                                  for i, j in enumerate(
                                                    xrange(
                                                        exon[0], exon[1]
                                                    ))])
                            mapped_index += exon[1] - exon[0]
                        intersected_base_count = max(intersected_base_count,
                                len(
                                    base_predictions.intersection(base_truths)
                                ))
                    basewise_intersection += intersected_base_count
                    if intersected_base_count >= read_length * base_threshold:
                        read_intersection += 1
                    elif dump_incorrect:
                        # Incorrect alignment; write to stderr
                        print >>sys.stderr, '\t'.join(
                                ['.'.join(line) for line in saved]
                            )
                else:
                    raise RuntimeError(
                                'Invalid intermediate line.'
                            )
    return (basewise_retrieved, basewise_relevant, basewise_intersection,
            read_retrieved, read_relevant, read_intersection)

Example 7

Project: rail
Source File: mapping_accuracy.py
View license
def go(true_bed_stream, sam_stream=sys.stdin, generous=False,
        base_threshold=0.5, clip_threshold=1.0, dump_incorrect=False,
        temp_dir=None, ignore_spliced_reads=False):
    """ Finds relevant and retrieved instance counts.

        true_bed_stream: file handle for BED output of Flux simulation
        sam_stream: where to read in aligner's mappings
        generous: True iff aligner cuts off /1 or /2 of a given read
        base_threshold: proportion of a read's bases that must align
            correctly for a read to be considered a correct mapping
        clip_threshold: proportion of a read's bases that must be clipped
            for a read to be considered unmapped
        dump_incorrect: write incorrect (read) alignments to stderr
        ignore_spliced_reads: ignores all spliced reads
    """
    from tempdel import remove_temporary_directories
    import tempfile
    import atexit
    if temp_dir is None:
        temp_dir_path = tempfile.mkdtemp()
    else:
        try:
            temp_dir_path = tempfile.mkdtemp(dir=temp_dir)
        except:
            temp_dir_path = tempfile.mkdtemp()
    #print >>sys.stderr, temp_dir_path
    atexit.register(remove_temporary_directories, [temp_dir_path])
    # Store everything in one file, then sort it on read name
    combined_file = os.path.join(temp_dir_path, 'combined.temp')
    with open(combined_file, 'w') as temp_stream:
        if ignore_spliced_reads:
            if generous:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    if ',' in tokens[-1]: continue # skip intron line
                    print >>temp_stream, '\t'.join([tokens[3][:-2], '0']
                                                    + tokens[:3] + tokens[4:])
            else:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    if ',' in tokens[-1]: continue # skip intron line
                    print >>temp_stream, '\t'.join(
                                    [tokens[3], '0'] + tokens[:3] + tokens[4:]
                                )
            for line in sam_stream:
                if line[0] == '@' or not line.strip(): continue
                tokens = line.strip().split('\t')
                if 'N' in tokens[5]: continue # skip intron line
                print >>temp_stream, '\t'.join([tokens[0], '1'] + tokens[1:])
        else:
            if generous:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    print >>temp_stream, '\t'.join([tokens[3][:-2], '0']
                                                    + tokens[:3] + tokens[4:])
            else:
                for line in true_bed_stream:
                    tokens = line.strip().split('\t')
                    print >>temp_stream, '\t'.join(
                                    [tokens[3], '0'] + tokens[:3] + tokens[4:]
                                )
            for line in sam_stream:
                if line[0] == '@' or not line.strip(): continue
                tokens = line.strip().split('\t')
                print >>temp_stream, '\t'.join([tokens[0], '1'] + tokens[1:])
    import subprocess
    sorted_combined_file = os.path.join(temp_dir_path, 'combined.sorted.temp')
    subprocess.check_call(' '.join(['sort -T %s -k1,1 -k2,2n'
                                        % temp_dir_path, combined_file, 
                                        '>', sorted_combined_file]),
                            bufsize=-1, shell=True)
    basewise_relevant, read_relevant = 0, 0
    # Initialize counters for computing accuracy metrics
    basewise_retrieved, basewise_intersection = 0, 0
    read_retrieved, read_intersection = 0, 0
    with open(sorted_combined_file) as sorted_combined_stream:
        for (name,), xpartition in xstream(sorted_combined_stream, 1):
            '''Dict mapping read names to alignments
            (chrom, 1-based start, 1-based end)'''
            true_maps = []
            saved = []
            for tokens in xpartition:
                saved.append(tokens)
                if tokens[0] == '0':
                    if len(tokens) < 12:
                        continue
                    chrom = tokens[1]
                    chrom_start = int(tokens[2])
                    chrom_end = int(tokens[3])
                    block_sizes = tokens[10].split(',')
                    block_starts = tokens[11].split(',')
                    # Handle trailing commas
                    try:
                        int(block_sizes[-1])
                    except ValueError:
                        block_sizes = block_sizes[:-1]
                    try:
                        int(block_starts[-1])
                    except ValueError:
                        block_starts = block_starts[:-1]
                    block_count = len(block_sizes)
                    assert block_count == len(block_starts)
                    exons = [(chrom,
                                chrom_start + int(block_starts[i]),
                                chrom_start + int(block_starts[i])
                                + int(block_sizes[i]))
                                for i in xrange(block_count)]
                    true_maps.append(exons)
                    basewise_relevant += sum([int(block_size) for block_size
                                                in block_sizes])
                    read_relevant += 1
                elif tokens[0] == '1':
                    flag = int(tokens[1])
                    if flag & 256 or flag & 4:
                        '''Secondary alignment or unmapped and thus not
                        retrieved; ignore'''
                        continue
                    cigar, pos, seq = tokens[5], int(tokens[3]), tokens[9]
                    (dummy_md, mapped,
                        unmapped, clip_count, read_length) \
                        = dummy_md_and_mapped_offsets(
                                            cigar,
                                            clip_threshold=clip_threshold
                                        )
                    if unmapped:
                        # Too much clipping
                        continue
                    basewise_retrieved += read_length - clip_count
                    read_retrieved += 1
                    if not true_maps:
                        assert ignore_spliced_reads
                        continue
                    # Try both /1 and /2; choose the best basewise result
                    intersected_base_count = 0
                    for true_map in true_maps:
                        if tokens[2] != true_map[0][0]:
                            '''chr is wrong, but this is still counted as a
                            retrieval above'''
                            continue
                        base_counter, base_truths = 0, set()
                        '''Each tuple in base_truths is
                        (index of base in read, mapped location)'''
                        for block in true_map:
                            base_truths.update([(base_counter + i, j + 1)
                                                    for i, j in enumerate(
                                                        xrange(
                                                            block[1], block[2]
                                                        ))])
                            base_counter += block[2] - block[1]
                        base_predictions = set()
                        if unmapped:
                            # Too much clipping
                            continue
                        _, _, _, exons, _ = indels_junctions_exons_mismatches(
                                                        cigar,
                                                        dummy_md, pos, seq,
                                                        drop_deletions=True
                                                    )
                        mapped_index = 0
                        for exon in exons:
                            base_predictions.update(
                                        [(mapped[mapped_index + i], j)
                                                  for i, j in enumerate(
                                                    xrange(
                                                        exon[0], exon[1]
                                                    ))])
                            mapped_index += exon[1] - exon[0]
                        intersected_base_count = max(intersected_base_count,
                                len(
                                    base_predictions.intersection(base_truths)
                                ))
                    basewise_intersection += intersected_base_count
                    if intersected_base_count >= read_length * base_threshold:
                        read_intersection += 1
                    elif dump_incorrect:
                        # Incorrect alignment; write to stderr
                        print >>sys.stderr, '\t'.join(
                                ['.'.join(line) for line in saved]
                            )
                else:
                    raise RuntimeError(
                                'Invalid intermediate line.'
                            )
    return (basewise_retrieved, basewise_relevant, basewise_intersection,
            read_retrieved, read_relevant, read_intersection)

Example 8

Project: nuxeo-drive
Source File: common.py
View license
    def setUp(self):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        self.full_nuxeo_url = self.nuxeo_url
        if '#' in self.nuxeo_url:
            self.nuxeo_url = self.nuxeo_url.split('#')[0]
        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)

        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        self.version = nxdrive.__version__

        # Long timeout for the root client that is responsible for the test
        # environment set: this client is doing the first query on the Nuxeo
        # server and might need to wait for a long time without failing for
        # Nuxeo to finish initialize the repo on the first request after
        # startup
        root_remote_client = RemoteDocumentClient(
            self.nuxeo_url, self.admin_user,
            u'nxdrive-test-administrator-device', self.version,
            password=self.password, base_folder=u'/', timeout=60)

        # Call the Nuxeo operation to setup the integration test environment
        credentials = root_remote_client.execute(
            "NuxeoDrive.SetupIntegrationTests",
            userNames="user_1, user_2", permission='ReadWrite')

        credentials = [c.strip().split(u":") for c in credentials.split(u",")]
        self.user_1, self.password_1 = credentials[0]
        self.user_2, self.password_2 = credentials[1]

        ws_info = root_remote_client.fetch(TEST_WORKSPACE_PATH)
        self.workspace = ws_info[u'uid']
        self.workspace_title = ws_info[u'title']

        # Document client to be used to create remote test documents
        # and folders
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )

        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.root_remote_client = root_remote_client
        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self.local_client_1 = LocalClient(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = LocalClient(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))
        ndrive_path = os.path.dirname(nxdrive.__file__)
        self.ndrive_exec = os.path.join(ndrive_path, '..', 'scripts', 'ndrive.py')
        cmdline_options = '--log-level-console=%s' % DEFAULT_CONSOLE_LOG_LEVEL
        cmdline_options += ' --log-level-file=TRACE'
        cmdline_options += ' --nxdrive-home="%s"'
        if os.environ.get('PYDEV_DEBUG') == 'True':
            cmdline_options += ' --debug-pydev'
        self.ndrive_1_options = cmdline_options % self.nxdrive_conf_folder_1
        self.ndrive_2_options = cmdline_options % self.nxdrive_conf_folder_2

Example 9

Project: nuxeo-drive
Source File: common_unit_test.py
View license
    def setUpApp(self, server_profile=None):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')
        self.result = None
        self.tearedDown = False

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)
        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        from mock import Mock
        options = Mock()
        options.debug = False
        options.delay = TEST_DEFAULT_DELAY
        options.force_locale = None
        options.proxy_server = None
        options.log_level_file = None
        options.update_site_url = None
        options.beta_update_site_url = None
        options.autolock_interval = 30
        options.nxdrive_home = self.nxdrive_conf_folder_1
        self.manager_1 = Manager(options)
        self.connected = False
        import nxdrive
        nxdrive_path = os.path.dirname(nxdrive.__file__)
        i18n_path = os.path.join(nxdrive_path, 'tests', 'resources', "i18n.js")
        Translator(self.manager_1, i18n_path)
        options.nxdrive_home = self.nxdrive_conf_folder_2
        Manager._singleton = None
        self.manager_2 = Manager(options)
        self.version = __version__
        url = self.nuxeo_url
        log.debug("Will use %s as url", url)
        if '#' in url:
            # Remove the engine type for the rest of the test
            self.nuxeo_url = url.split('#')[0]
        self.setUpServer(server_profile)

        self.engine_1 = self.manager_1.bind_server(self.local_nxdrive_folder_1, url, self.user_1,
                                                   self.password_1, start_engine=False)
        self.engine_2 = self.manager_2.bind_server(self.local_nxdrive_folder_2, url, self.user_2,
                                                   self.password_2, start_engine=False)
        self.engine_1.syncCompleted.connect(self.app.sync_completed)
        self.engine_1.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_1.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_1.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.engine_2.syncCompleted.connect(self.app.sync_completed)
        self.engine_2.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_2.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_2.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.queue_manager_1 = self.engine_1.get_queue_manager()
        self.queue_manager_2 = self.engine_2.get_queue_manager()

        self.sync_root_folder_1 = os.path.join(self.local_nxdrive_folder_1, self.workspace_title_1)
        self.sync_root_folder_2 = os.path.join(self.local_nxdrive_folder_2, self.workspace_title_2)

        self.local_root_client_1 = self.engine_1.get_local_client()
        self.local_root_client_2 = self.engine_2.get_local_client()

        self.local_client_1 = self.get_local_client(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = self.get_local_client(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))

        # Document client to be used to create remote test documents
        # and folders
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace_1,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace_2,
            upload_tmp_dir=self.upload_tmp_dir)
        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )
        self.remote_restapi_client_admin = RestAPIClient(
            self.nuxeo_url, self.admin_user, u'nxdrive-test-device-2',
            self.version,
            password=self.password
        )

        # Register root
        remote_document_client_1.register_as_root(self.workspace_1)
        remote_document_client_2.register_as_root(self.workspace_2)

        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self._wait_sync = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._wait_remote_scan = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._remote_changes_count = {self.engine_1.get_uid(): 0, self.engine_2.get_uid(): 0}
        self._no_remote_changes = {self.engine_1.get_uid(): False, self.engine_2.get_uid(): False}

Example 10

Project: nuxeo-drive
Source File: common.py
View license
    def setUp(self):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        self.full_nuxeo_url = self.nuxeo_url
        if '#' in self.nuxeo_url:
            self.nuxeo_url = self.nuxeo_url.split('#')[0]
        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)

        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        self.version = nxdrive.__version__

        # Long timeout for the root client that is responsible for the test
        # environment set: this client is doing the first query on the Nuxeo
        # server and might need to wait for a long time without failing for
        # Nuxeo to finish initialize the repo on the first request after
        # startup
        root_remote_client = RemoteDocumentClient(
            self.nuxeo_url, self.admin_user,
            u'nxdrive-test-administrator-device', self.version,
            password=self.password, base_folder=u'/', timeout=60)

        # Call the Nuxeo operation to setup the integration test environment
        credentials = root_remote_client.execute(
            "NuxeoDrive.SetupIntegrationTests",
            userNames="user_1, user_2", permission='ReadWrite')

        credentials = [c.strip().split(u":") for c in credentials.split(u",")]
        self.user_1, self.password_1 = credentials[0]
        self.user_2, self.password_2 = credentials[1]

        ws_info = root_remote_client.fetch(TEST_WORKSPACE_PATH)
        self.workspace = ws_info[u'uid']
        self.workspace_title = ws_info[u'title']

        # Document client to be used to create remote test documents
        # and folders
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )

        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.root_remote_client = root_remote_client
        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self.local_client_1 = LocalClient(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = LocalClient(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))
        ndrive_path = os.path.dirname(nxdrive.__file__)
        self.ndrive_exec = os.path.join(ndrive_path, '..', 'scripts', 'ndrive.py')
        cmdline_options = '--log-level-console=%s' % DEFAULT_CONSOLE_LOG_LEVEL
        cmdline_options += ' --log-level-file=TRACE'
        cmdline_options += ' --nxdrive-home="%s"'
        if os.environ.get('PYDEV_DEBUG') == 'True':
            cmdline_options += ' --debug-pydev'
        self.ndrive_1_options = cmdline_options % self.nxdrive_conf_folder_1
        self.ndrive_2_options = cmdline_options % self.nxdrive_conf_folder_2

Example 11

Project: nuxeo-drive
Source File: common_unit_test.py
View license
    def setUpApp(self, server_profile=None):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')
        self.result = None
        self.tearedDown = False

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)
        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        from mock import Mock
        options = Mock()
        options.debug = False
        options.delay = TEST_DEFAULT_DELAY
        options.force_locale = None
        options.proxy_server = None
        options.log_level_file = None
        options.update_site_url = None
        options.beta_update_site_url = None
        options.autolock_interval = 30
        options.nxdrive_home = self.nxdrive_conf_folder_1
        self.manager_1 = Manager(options)
        self.connected = False
        import nxdrive
        nxdrive_path = os.path.dirname(nxdrive.__file__)
        i18n_path = os.path.join(nxdrive_path, 'tests', 'resources', "i18n.js")
        Translator(self.manager_1, i18n_path)
        options.nxdrive_home = self.nxdrive_conf_folder_2
        Manager._singleton = None
        self.manager_2 = Manager(options)
        self.version = __version__
        url = self.nuxeo_url
        log.debug("Will use %s as url", url)
        if '#' in url:
            # Remove the engine type for the rest of the test
            self.nuxeo_url = url.split('#')[0]
        self.setUpServer(server_profile)

        self.engine_1 = self.manager_1.bind_server(self.local_nxdrive_folder_1, url, self.user_1,
                                                   self.password_1, start_engine=False)
        self.engine_2 = self.manager_2.bind_server(self.local_nxdrive_folder_2, url, self.user_2,
                                                   self.password_2, start_engine=False)
        self.engine_1.syncCompleted.connect(self.app.sync_completed)
        self.engine_1.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_1.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_1.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.engine_2.syncCompleted.connect(self.app.sync_completed)
        self.engine_2.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_2.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_2.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.queue_manager_1 = self.engine_1.get_queue_manager()
        self.queue_manager_2 = self.engine_2.get_queue_manager()

        self.sync_root_folder_1 = os.path.join(self.local_nxdrive_folder_1, self.workspace_title_1)
        self.sync_root_folder_2 = os.path.join(self.local_nxdrive_folder_2, self.workspace_title_2)

        self.local_root_client_1 = self.engine_1.get_local_client()
        self.local_root_client_2 = self.engine_2.get_local_client()

        self.local_client_1 = self.get_local_client(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = self.get_local_client(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))

        # Document client to be used to create remote test documents
        # and folders
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace_1,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace_2,
            upload_tmp_dir=self.upload_tmp_dir)
        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )
        self.remote_restapi_client_admin = RestAPIClient(
            self.nuxeo_url, self.admin_user, u'nxdrive-test-device-2',
            self.version,
            password=self.password
        )

        # Register root
        remote_document_client_1.register_as_root(self.workspace_1)
        remote_document_client_2.register_as_root(self.workspace_2)

        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self._wait_sync = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._wait_remote_scan = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._remote_changes_count = {self.engine_1.get_uid(): 0, self.engine_2.get_uid(): 0}
        self._no_remote_changes = {self.engine_1.get_uid(): False, self.engine_2.get_uid(): False}

Example 12

Project: ovirt-node
Source File: install.py
View license
    def ovirt_boot_setup(self, reboot="N"):
        self.generate_paths()
        logger.info("Installing the image.")
        # copy grub.efi to safe location
        if _functions.is_efi_boot():
            if "OVIRT_ISCSI_INSTALL" in OVIRT_VARS:
                _functions.system("umount /boot")
            if os.path.isfile("/boot/efi/%s/grubx64.efi" % self.efi_path):
                shutil.copy("/boot/efi/%s/grubx64.efi" % self.efi_path, "/tmp")
            else:
                shutil.copy("/boot/efi/%s/grub.efi" % self.efi_path, "/tmp")
            _functions.mount_boot()
        if "OVIRT_ROOT_INSTALL" in OVIRT_VARS:
            if OVIRT_VARS["OVIRT_ROOT_INSTALL"] == "n":
                logger.info("Root Installation Not Required, Finished.")
                return True
        self.oldtitle=None
        grub_config_file = None
        if _functions.findfs("Boot") and _functions.is_upgrade():
            grub_config_file = "/boot/grub/grub.conf"
            if not _functions.connect_iscsi_root():
                return False
        _functions.mount_liveos()
        if os.path.ismount("/liveos"):
            if os.path.exists("/liveos/vmlinuz0") \
                              and os.path.exists("/liveos/initrd0.img"):
                grub_config_file = self.grub_config_file
        elif not _functions.is_firstboot():
            # find existing iscsi install
            if _functions.findfs("Boot"):
                grub_config_file = "/boot/grub/grub.conf"
            elif os.path.ismount("/dev/.initramfs/live"):
                if not _functions.grub2_available():
                    grub_config_file = "/dev/.initramfs/live/grub/grub.conf"
                else:
                    grub_config_file = "/dev/.initramfs/live/grub2/grub.cfg"
            elif os.path.ismount("/run/initramfs/live"):
                grub_config_file = "/run/initramfs/live/grub/grub.conf"
            if _functions.is_upgrade() and not _functions.is_iscsi_install():
                _functions.mount_liveos()
                grub_config_file = "/liveos/grub/grub.conf"
        if _functions.is_iscsi_install() or _functions.findfs("Boot") \
            and not _functions.is_efi_boot():
            grub_config_file = "/boot/grub/grub.conf"
        if _functions.is_efi_boot():
            logger.debug(str(os.listdir("/liveos")))
            _functions.system("umount /liveos")
            _functions.mount_efi(target="/liveos")
            if self.efi_name == "fedora":
                grub_config_file = "/liveos/EFI/fedora/grub.cfg"
            else:
                grub_config_file = "/liveos/%s/grub.conf" % self.efi_path
        grub_config_file_exists = grub_config_file is not None \
            and os.path.exists(grub_config_file)
        logger.debug("Grub config file is: %s" % grub_config_file)
        logger.debug("Grub config file exists: %s" % grub_config_file_exists)
        if not grub_config_file is None and os.path.exists(grub_config_file):
            f=open(grub_config_file)
            oldgrub=f.read()
            f.close()
            if _functions.grub2_available():
                m=re.search("^menuentry (.*)$", oldgrub, re.MULTILINE)
            else:
                m=re.search("^title (.*)$", oldgrub, re.MULTILINE)
            if m is not None:
                self.oldtitle=m.group(1)
                # strip off extra title characters
                if _functions.grub2_available():
                    self.oldtitle = self.oldtitle.replace('"','').strip(" {")
        _functions.system("umount /liveos/efi")
        _functions.system("umount /liveos")
        if _functions.is_iscsi_install() or _functions.findfs("Boot"):
            self.boot_candidate = None
            boot_candidate_names = ["BootBackup", "BootUpdate", "BootNew"]
            for trial in range(1, 3):
                time.sleep(1)
                for candidate_name in boot_candidate_names:
                    logger.debug(os.listdir("/dev/disk/by-label"))
                    if _functions.findfs(candidate_name):
                        self.boot_candidate = candidate_name
                        break
                logger.debug("Trial %s to find candidate (%s)" % \
                             (trial, candidate_name))
                if self.boot_candidate:
                    logger.debug("Found candidate: %s" % self.boot_candidate)
                    break

            if not self.boot_candidate:
                logger.error("Unable to find boot partition")
                label_debug = ''
                for label in os.listdir("/dev/disk/by-label"):
                    label_debug += "%s\n" % label
                label_debug += _functions.subprocess_closefds("blkid", \
                                          shell=True, stdout=subprocess.PIPE,
                                          stderr=subprocess.STDOUT).stdout.read()
                logger.debug(label_debug)
                return False
            else:
                boot_candidate_dev = _functions.findfs(self.boot_candidate)
            # prepare Root partition update
            if self.boot_candidate != "BootNew":
                e2label_cmd = "e2label \"%s\" BootNew" % boot_candidate_dev
                logger.debug(e2label_cmd)
                if not _functions.system(e2label_cmd):
                    logger.error("Failed to label new Boot partition")
                    return False
            _functions.system("umount /boot")
            _functions.system("mount %s /boot &>/dev/null" \
                              % boot_candidate_dev)

        candidate = None
        candidate_dev = None
        candidate_names = ["RootBackup", "RootUpdate", "RootNew"]
        for trial in range(1, 3):
            time.sleep(1)
            for candidate_name in candidate_names:
                candidate_dev = _functions.findfs(candidate_name)
                logger.debug("Finding %s: '%s'" % (candidate_name, candidate_dev))
                if candidate_dev:
                    candidate = candidate_name
                    logger.debug("Found: %s" % candidate)
                    break
            logger.debug("Trial %s to find candidate (%s)" % (trial,
                                                              candidate_name))
            if candidate:
                logger.debug("Found candidate: '%s'" % candidate)
                break

        if not candidate:
            logger.error("Unable to find root partition")
            label_debug = ''
            for label in os.listdir("/dev/disk/by-label"):
                label_debug += "%s\n" % label
            label_debug += _functions.subprocess_closefds("blkid", shell=True,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.STDOUT).stdout.read()
            logger.debug(label_debug)
            return False

        try:
            self.disk = candidate_dev
            logger.info("Candidate device: %s" % candidate_dev)
            logger.info("Candidate disk: %s" % self.disk)
            # grub2 starts at part 1
            self.partN = int(self.disk[-1:])
            if not _functions.grub2_available():
                self.partN = self.partN - 1
        except:
            logger.debug("Failed to get partition", exc_info=True)
            return False

        if self.disk is None or self.partN < 0:
            logger.error("Failed to determine Root partition number")
            return False
        # prepare Root partition update
        if candidate != "RootNew":
            e2label_cmd = "e2label \"%s\" RootNew" % candidate_dev
            logger.debug(e2label_cmd)
            if not _functions.system(e2label_cmd):
                logger.error("Failed to label new Root partition")
                return False
        mount_cmd = "mount \"%s\" /liveos" % candidate_dev
        if not _functions.system(mount_cmd):
            logger.error("Failed to mount %s on /liveos" % candidate_dev)
            _functions.system("lsof")
            _functions.system("dmsetup info -c")
            _functions.system("cat /proc/mounts")
            _functions.system("multipath -ll")
            _functions.system("lsblk")
            _functions.system("ls -l /dev/mapper")
        _functions.system("rm -rf /liveos/LiveOS")
        _functions.system("mkdir -p /liveos/LiveOS")
        _functions.mount_live()

        if os.path.isdir(self.grub_dir):
            shutil.rmtree(self.grub_dir)
        if not os.path.exists(self.grub_dir):
            os.makedirs(self.grub_dir)
            if _functions.is_efi_boot():
                logger.info("efi detected, installing efi configuration")
                _functions.system("mkdir /liveos/efi")
                _functions.mount_efi()
                _functions.system("mkdir -p /liveos/efi/%s" % self.efi_path)
                if _functions.is_iscsi_install() or _functions.is_efi_boot():
                    if os.path.isfile("/tmp/grubx64.efi"):
                        shutil.copy("/tmp/grubx64.efi",
                                    "/liveos/efi/%s/grubx64.efi" %
                                    self.efi_path)
                    else:
                        shutil.copy("/tmp/grub.efi",
                                    "/liveos/efi/%s/grub.efi" % self.efi_path)
                elif os.path.isfile("/boot/efi/%s/grubx64.efi" %
                        self.efi_path):
                    shutil.copy("/boot/efi/%s/grubx64.efi" % self.efi_path,
                          "/liveos/efi/%s/grubx64.efi" % self.efi_path)
                else:
                    shutil.copy("/boot/efi/%s/grub.efi" % self.efi_path,
                          "/liveos/efi/%s/grub.efi" % self.efi_path)
                if _functions.is_iscsi_install() or _functions.findfs("BootNew"):
                    self.disk = _functions.findfs("BootNew")
                if not "/dev/mapper/" in self.disk:
                    efi_disk = self.disk[:-1]
                else:
                    efi_disk = re.sub(r'p?[1,2,3]$', "", self.disk)
                # generate grub legacy config for efi partition
                #remove existing efi entries
                _functions.remove_efi_entry(_functions.PRODUCT_SHORT)
                if self.efi_name == "fedora":
                    _functions.add_efi_entry(_functions.PRODUCT_SHORT,
                                             ("\\EFI\\%s\\grubx64.efi" %
                                              self.efi_name),
                                             efi_disk)
                else:
                    if os.path.isfile("/liveos/efi/%s/grubx64.efi" %
                            self.efi_path):
                        _functions.add_efi_entry(_functions.PRODUCT_SHORT,
                                                 ("\\EFI\\%s\\grubx64.efi" %
                                                  self.efi_name),
                                                 efi_disk)
                    else:
                        _functions.add_efi_entry(_functions.PRODUCT_SHORT,
                                                 ("\\EFI\\%s\\grub.efi" %
                                                  self.efi_name),
                                                 efi_disk)
        self.kernel_image_copy()

        # reorder tty0 to allow both serial and phys console after installation
        if _functions.is_iscsi_install() or _functions.findfs("BootNew"):
            self.root_param = "root=live:LABEL=Root"
            if "OVIRT_NETWORK_LAYOUT" in OVIRT_VARS and \
                OVIRT_VARS["OVIRT_NETWORK_LAYOUT"] == "bridged":
                network_conf = "ip=br%s:dhcp bridge=br%s:%s" % \
                                (OVIRT_VARS["OVIRT_BOOTIF"],
                                 OVIRT_VARS["OVIRT_BOOTIF"],
                                 OVIRT_VARS["OVIRT_BOOTIF"])
            else:
                network_conf = "ip=%s:dhcp" % OVIRT_VARS["OVIRT_BOOTIF"]
            self.bootparams = "netroot=iscsi:%s::%s::%s %s " % (
                OVIRT_VARS["OVIRT_ISCSI_TARGET_HOST"],
                OVIRT_VARS["OVIRT_ISCSI_TARGET_PORT"],
                OVIRT_VARS["OVIRT_ISCSI_TARGET_NAME"],
                network_conf)
            if "OVIRT_ISCSI_NAME" in OVIRT_VARS:
                self.bootparams+= "iscsi_initiator=%s " % \
                    OVIRT_VARS["OVIRT_ISCSI_NAME"]
        else:
            self.root_param = "root=live:LABEL=Root"
            self.bootparams = "ro rootfstype=auto rootflags=ro "
        self.bootparams += OVIRT_VARS["OVIRT_BOOTPARAMS"].replace(
                                                            "console=tty0", ""
                                                            ).replace(
                                                            "rd_NO_MULTIPATH",
                                                            "")

        if " " in self.disk:
            # workaround for grub setup failing with spaces in dev.name:
            # use first active sd* device
            self.disk = re.sub("p[1,2,3]$", "", self.disk)
            grub_disk_cmd = ("multipath -l " +
                             "\"" + self.disk + "\" " +
                             "| egrep -o '[0-9]+:.*' " +
                             "| awk '/ active / {print $2}' " +
                             "| head -n1")
            logger.debug(grub_disk_cmd)
            grub_disk = _functions.subprocess_closefds(grub_disk_cmd,
                                            shell=True,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.STDOUT)
            grub_disk_output, grub_disk_err = grub_disk.communicate()
            self.disk = grub_disk_output.strip()
            if "cciss" in self.disk:
                self.disk = self.disk.replace("!", "/")
            # flush to sync DM and blockdev, workaround from rhbz#623846#c14
            sysfs = open("/proc/sys/vm/drop_caches", "w")
            sysfs.write("3")
            sysfs.close()
        if not self.disk.startswith("/dev/"):
            self.disk = "/dev/" + self.disk
        try:
            if stat.S_ISBLK(os.stat(self.disk).st_mode):
                try:
                    if stat.S_ISBLK(os.stat(self.disk[:-1]).st_mode):
                        # e.g. /dev/sda2
                        self.disk = self.disk[:-1]
                except OSError:
                    pass
                try:
                    if stat.S_ISBLK(os.stat(self.disk[:-2]).st_mode):
                        # e.g. /dev/mapper/WWIDp2
                        self.disk = self.disk[:-2]
                except OSError:
                    pass
        except OSError:
            logger.error("Unable to determine disk for grub installation " +
                         traceback.format_exc())
            return False

        self.grub_dict = {
        "product": _functions.PRODUCT_SHORT,
        "version": _functions.PRODUCT_VERSION,
        "release": _functions.PRODUCT_RELEASE,
        "partN": self.partN,
        "root_param": self.root_param,
        "bootparams": self.bootparams,
        "disk": self.disk,
        "grub_dir": self.grub_dir,
        "grub_prefix": self.grub_prefix,
        "efi_hd": self.efi_hd,
        "linux": "linux",
        "initrd": "initrd",
    }
        if not _functions.is_firstboot():
            if os.path.ismount("/live"):
                with open("%s/version" % self.live_path) as version:
                    for line in version.readlines():
                        if "VERSION" in line:
                            key, value = line.split("=")
                            self.grub_dict["version"] = value.strip()
                        if "RELEASE" in line:
                            key, value = line.split("=")
                            self.grub_dict["release"] = value.strip()

        if _functions.grub2_available():
            if not self.grub2_install():
                logger.error("Grub2 Installation Failed ")
                return False
            else:
                 logger.info("Grub2 EFI Installation Completed ")
        else:
            if not self.grub_install():
                logger.error("Grub Installation Failed ")
                return False
            else:
                logger.info("Grub Installation Completed")

        if _functions.is_iscsi_install() or _functions.findfs("BootNew"):
            # copy default for when Root/HostVG is inaccessible(iscsi upgrade)
            shutil.copy(_functions.OVIRT_DEFAULTS, "/boot")
            # mark new Boot ready to go, reboot() in ovirt-function switches it
            # to active
            e2label_cmd = "e2label \"%s\" BootUpdate" % boot_candidate_dev

            if not _functions.system(e2label_cmd):
                logger.error("Unable to relabel " + boot_candidate_dev +
                             " to RootUpdate ")
                return False
        else:
            _functions.system("umount /liveos/efi")
        _functions.system("umount /liveos")
        # mark new Root ready to go, reboot() in ovirt-function switches it
        # to active
        e2label_cmd = "e2label \"%s\" RootUpdate" % candidate_dev
        if not _functions.system(e2label_cmd):
            logger.error("Unable to relabel " + candidate_dev +
                         " to RootUpdate ")
            return False
        _functions.system("udevadm settle --timeout=10")

        #
        # Rebuild the initramfs
        # A few hacks are needed to prep the chroot
        # The general issue is that we need to run dracut in the context fo the new iso
        # and that we need to put the initrd in the right place of the new iso.
        # These two things make the logic a bit more complicated.
        #
        mnts = []
        try:
            if not _functions.system("blkid -L RootUpdate"):
                raise RuntimeError("RootUpdate not found")

            # Let's mount the update fs, and use that kernel version and modules
            # We need this work to help dracut
            isomnt = tempfile.mkdtemp("RootUpdate")
            squashmnt = tempfile.mkdtemp("RootUpdate-LiveOS")
            updfs = tempfile.mkdtemp("RootUpdate-LiveOS-Img")
            mnts += [isomnt, squashmnt, updfs]

            # Unpack the iso
            def _call(args):
                logger.debug("Calling: %s" % args)
                try:
                    out = subprocess.check_output(args)
                    logger.debug("Out: %s" % out)
                except Exception as e:
                    logger.debug("Failed with: %s %s" % (e, e.output))
                    raise

            _call(["mount", "LABEL=RootUpdate", isomnt])
            _call(["mount", "%s/LiveOS/squashfs.img" % isomnt, squashmnt])
            _call(["mount", "%s/LiveOS/ext3fs.img" % squashmnt, updfs])

            # Now mount the update modules into place, and find the
            # correct kver
            def rbind(path, updfs=updfs):
                dst = updfs + "/" + path
                logger.debug("Binding %r to %r" % (path, dst))
                _call(["mount", "--make-rshared", "--rbind", "/" + path, dst])
                return dst

            for path in ["etc", "dev", "proc", "sys", "tmp", "run", "var/tmp"]:
                mnts += [rbind(path)]

            upd_kver = str(_functions.passthrough("ls -1 %s/lib/modules" % updfs)).strip()

            if len(upd_kver.splitlines()) != 1:
                # It would be very unusual to see more than one kver directory
                # in /lib/modules but might happen when using edit-node.
                # Check via check_higher_kernel() the higher version available
                upd_kver = self.check_higher_kernel(updfs)
                if upd_kver is None:
                    raise RuntimeError("Unable to find the kernel version")

            # Update initramfs to pickup multipath wwids
            # Let /boot point to the filesystem on the update candidate partition
            builder = _system.Initramfs(dracut_chroot=updfs, boot_source=isomnt)
            builder.rebuild(kver=upd_kver)

        except Exception as e:
            logger.debug("Failed to build initramfs: %s" % e, exc_info=True)
            output = getattr(e, "output", "")
            if output:
                logger.debug("Output: %s" % output)
            raise


        finally:
            # Clean up all eventual mounts
            pass
            # Disabled for now because akward things happen, we leave it to
            # systemd to unnmount on reboot
            # for mnt in reversed(mnts):
            #     d = _functions.passthrough("umount -fl %s" % mnt, logger.debug)
            #     logger.debug("Returned: %s" % d)

        _functions.disable_firstboot()
        if _functions.finish_install():
            if _functions.is_firstboot():
                _iscsi.iscsi_auto()
            logger.info("Installation of %s Completed" % \
                                                      _functions.PRODUCT_SHORT)
            if reboot is not None and reboot == "Y":
                _system.async_reboot()
            return True
        else:
            return False

Example 13

Project: vumi
Source File: tikz.py
View license
def render_tikz(self, rel_fname, tikz, libs):
    local_fname = path.join(*rel_fname.split('/'))
    relfn = posixpath.join(self.builder.imgpath, rel_fname)
    srcfn = path.join(self.builder.srcdir, local_fname)
    outfn = path.join(self.builder.outdir, '_images', local_fname)
    ensuredir(path.dirname(outfn))

    # check for cached image
    hashkey = tikz.encode('utf-8')
    hashvalue = sha(hashkey).hexdigest()
    hashfn = path.join(self.builder.srcdir, local_fname + ".hash")
    if path.isfile(hashfn):
        oldhashvalue = open(hashfn).read()
        if oldhashvalue == hashvalue:
            shutil.copy(srcfn, outfn)
            return relfn

    if hasattr(self.builder, '_tikz_warned'):
        return None

    curdir = getcwd()

    latex = DOC_HEAD % libs
    latex += self.builder.config.tikz_latex_preamble
    tikzz = tikz % {'wd': curdir}
    latex += DOC_BODY % tikzz
    if isinstance(latex, unicode):
        latex = latex.encode('utf-8')

    if not hasattr(self.builder, '_tikz_tempdir'):
        tempdir = self.builder._tikz_tempdir = tempfile.mkdtemp()
    else:
        tempdir = self.builder._tikz_tempdir

    chdir(tempdir)

    tf = open('tikz.tex', 'w')
    tf.write(latex)
    tf.close()

    try:
        try:
            p = Popen(['pdflatex', '--interaction=nonstopmode', 'tikz.tex'],
                      stdout=PIPE, stderr=PIPE)
        except OSError, err:
            if err.errno != ENOENT:   # No such file or directory
                raise
            self.builder.warn('LaTeX command cannot be run')
            self.builder._tikz_warned = True
            return None
    finally:
        chdir(curdir)

    stdout, stderr = p.communicate()
    if p.returncode != 0:
        raise TikzExtError('latex exited with error:\n[stderr]\n%s\n'
                           '[stdout]\n%s' % (stderr, stdout))

    chdir(tempdir)

    try:
        p = Popen(['pdftoppm', '-r', '120', 'tikz.pdf', 'tikz'],
                  stdout=PIPE, stderr=PIPE)
    except OSError, e:
        if e.errno != ENOENT:   # No such file or directory
            raise
        self.builder.warn('pdftoppm command cannot be run')
        self.builder.warn(err)
        self.builder._tikz_warned = True
        chdir(curdir)
        return None
    stdout, stderr = p.communicate()
    if p.returncode != 0:
        self.builder._tikz_warned = True
        raise TikzExtError('pdftoppm exited with error:\n[stderr]\n%s\n'
                           '[stdout]\n%s' % (stderr, stdout))

    try:
        p1 = Popen(['pnmcrop', 'tikz-1.ppm'], stdout=PIPE, stderr=PIPE)
    except OSError, err:
        if err.errno != ENOENT:   # No such file or directory
            raise
        self.builder.warn('pnmcrop command cannot be run:')
        self.builder.warn(err)
        self.builder._tikz_warned = True
        chdir(curdir)
        return None

    if self.builder.config.tikz_transparent:
        pnm_args = ['pnmtopng', '-transparent', 'white']
    else:
        pnm_args = ['pnmtopng']

    try:
        p2 = Popen(pnm_args, stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
    except OSError, err:
        if err.errno != ENOENT:   # No such file or directory
            raise
        self.builder.warn('pnmtopng command cannot be run:')
        self.builder.warn(err)
        self.builder._tikz_warned = True
        chdir(curdir)
        return None

    pngdata, stderr2 = p2.communicate()
    dummy, stderr1 = p1.communicate()
    if p1.returncode != 0:
        self.builder._tikz_warned = True
        raise TikzExtError('pnmcrop exited with error:\n[stderr]\n%s'
                           % (stderr1))
    if p2.returncode != 0:
        self.builder._tikz_warned = True
        raise TikzExtError('pnmtopng exited with error:\n[stderr]\n%s'
                           % (stderr2))
    f = open(outfn, 'wb')
    f.write(pngdata)
    f.close()
    chdir(curdir)

    f = open(srcfn, 'wb')
    f.write(pngdata)
    f.close()

    f = open(hashfn, 'wb')
    f.write(hashvalue)
    f.close()

    return relfn

Example 14

Project: ochopod
Source File: marathon.py
View license
    def boot(self, lifecycle, model=Reactive, tools=None, local=False):

        #
        # - quick check to make sure we get the right implementations
        #
        assert issubclass(model, Model), 'model must derive from ochopod.api.Model'
        assert issubclass(lifecycle, LifeCycle), 'lifecycle must derive from ochopod.api.LifeCycle'

        #
        # - instantiate our flask endpoint
        # - default to a json handler for all HTTP errors (including an unexpected 500)
        #
        def _handler(error):
            http = error.code if isinstance(error, HTTPException) else 500
            return '{}', http, {'Content-Type': 'application/json; charset=utf-8'}

        web = Flask(__name__)
        for code in default_exceptions.iterkeys():
            web.error_handler_spec[None][code] = _handler

        #
        # - default presets in case we run outside of marathon (local vm testing)
        # - any environment variable prefixed with "ochopod." is of interest for us (e.g this is what the user puts
        #   in the marathon application configuration for instance)
        # - the other settings come from marathon (namely the port bindings & application/task identifiers)
        # - the MESOS_TASK_ID is important to keep around to enable task deletion via the marathon REST API
        #
        env = \
            {
                'ochopod_application':  '',
                'ochopod_cluster':      'default',
                'ochopod_debug':        'true',
                'ochopod_local':        'false',
                'ochopod_namespace':    'marathon',
                'ochopod_port':         '8080',
                'ochopod_start':        'true',
                'ochopod_task':         '',
                'ochopod_zk':           '',
                'PORT_8080':            '8080'
            }

        env.update(os.environ)
        ochopod.enable_cli_log(debug=env['ochopod_debug'] == 'true')
        try:

            #
            # - grab our environment variables (which are set by the marathon executor)
            # - extract the mesos PORT_* bindings and construct a small remapping dict
            #
            ports = {}
            logger.debug('environment ->\n%s' % '\n'.join(['\t%s -> %s' % (k, v) for k, v in env.items()]))
            for key, val in env.items():
                if key.startswith('PORT_'):
                    ports[key[5:]] = int(val)

            #
            # - keep any "ochopod_" environment variable & trim its prefix
            # - default all our settings, especially the mandatory ones
            # - the ip and zookeeper are defaulted to localhost to enable easy testing
            #
            hints = {k[8:]: v for k, v in env.items() if k.startswith('ochopod_')}
            if local or hints['local'] == 'true':

                #
                # - we are running in local mode (e.g on a dev workstation)
                # - default everything to localhost
                #
                logger.info('running in local mode (make sure you run a standalone zookeeper)')
                hints.update(
                    {
                        'fwk':          'marathon (debug)',
                        'ip':           '127.0.0.1',
                        'node':         'local',
                        'ports':        ports,
                        'public':       '127.0.0.1',
                        'zk':           '127.0.0.1:2181'
                    })
            else:

                #
                # - extend our hints
                # - add the application + task
                #
                hints.update(
                    {
                        'application':  env['MARATHON_APP_ID'][1:],
                        'fwk':          'marathon',
                        'ip':           '',
                        'node':         '',
                        'ports':        ports,
                        'public':       '',
                        'task':         env['MESOS_TASK_ID'],
                        'zk':           ''
                    })

                #
                # - use whatever subclass is implementing us to infer 'ip', 'node' and 'public'
                #
                hints.update(self.get_node_details())

                #
                # - lookup for the zookeeper connection string from environment variable or on disk
                # - we have to look into different places depending on how mesos was installed
                #
                def _1():

                    #
                    # - most recent DCOS release
                    # - $MESOS_MASTER is located in /opt/mesosphere/etc/mesos-slave-common
                    # - the snippet in there is prefixed by MESOS_MASTER=zk://<ip:port>/mesos
                    #
                    logger.debug('checking /opt/mesosphere/etc/mesos-slave-common...')
                    _, lines = shell("grep MESOS_MASTER /opt/mesosphere/etc/mesos-slave-common")
                    return lines[0][13:]

                def _2():

                    #
                    # - same as above except for slightly older DCOS releases
                    # - $MESOS_MASTER is located in /opt/mesosphere/etc/mesos-slave
                    #
                    logger.debug('checking /opt/mesosphere/etc/mesos-slave...')
                    _, lines = shell("grep MESOS_MASTER /opt/mesosphere/etc/mesos-slave")
                    return lines[0][13:]

                def _3():

                    #
                    # - a regular package install will write the slave settings under /etc/mesos/zk (the snippet in
                    #   there looks like zk://10.0.0.56:2181/mesos)
                    #
                    logger.debug('checking /etc/mesos/zk...')
                    _, lines = shell("cat /etc/mesos/zk")
                    return lines[0]

                def _4():

                    #
                    # - look for ZK from environment variables
                    # - user can pass down ZK using $ochopod_zk
                    # - this last-resort situation is used mostly for debugging
                    #
                    logger.debug('checking $ochopod_zk environment variable...')
                    return env['ochopod_zk']

                #
                # - depending on how the slave has been installed we might have to look in various places
                #   to find out what our zookeeper connection string is
                # - use urlparse to keep the host:port part of the URL (possibly including a login+password)
                #
                for method in [_1, _2, _3, _4]:
                    try:
                        hints['zk'] = urlparse(method()).netloc
                        break

                    except Exception:
                        pass

            #
            # - the cluster must be fully qualified with a namespace (which is defaulted anyway)
            #
            assert hints['zk'], 'unable to determine where zookeeper is located (unsupported/bogus mesos setup ?)'
            assert hints['cluster'] and hints['namespace'], 'no cluster and/or namespace defined (user error ?)'

            #
            # - load the tools
            #
            if tools:
                tools = {tool.tag: tool for tool in [clz() for clz in tools if issubclass(clz, Tool)] if tool.tag}
                logger.info('supporting tools %s' % ', '.join(tools.keys()))

            #
            # - start the life-cycle actor which will pass our hints (as a json object) to its underlying sub-process
            # - start our coordinator which will connect to zookeeper and attempt to lead the cluster
            # - upon grabbing the lock the model actor will start and implement the configuration process
            # - the hints are a convenient bag for any data that may change at runtime and needs to be returned (via
            #   the HTTP POST /info request)
            # - what's being registered in zookeeper is immutable though and decorated with additional details by
            #   the coordinator (especially the pod index which is derived from zookeeper)
            #
            latch = ThreadingFuture()
            logger.info('starting %s.%s (marathon) @ %s' % (hints['namespace'], hints['cluster'], hints['node']))
            breadcrumbs = deepcopy(hints)
            hints['metrics'] = {}
            hints['dependencies'] = model.depends_on
            env.update({'ochopod': json.dumps(hints)})
            executor = lifecycle.start(env, latch, hints)
            coordinator = Coordinator.start(
                hints['zk'].split(','),
                hints['namespace'],
                hints['cluster'],
                int(hints['port']),
                breadcrumbs,
                model,
                hints)

            #
            # - external hook forcing a coordinator reset
            # - this will force a re-connection to zookeeper and pod registration
            # - please note this will not impact the pod lifecycle (e.g the underlying sub-process will be
            #   left running)
            #
            @web.route('/reset', methods=['POST'])
            def _reset():

                logger.debug('http in -> /reset')
                coordinator.tell({'request': 'reset'})
                return '{}', 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - external hook exposing information about our pod
            # - this is a subset of what's registered in zookeeper at boot-time
            # - the data is dynamic and updated from time to time by the model and executor actors
            # - from @pferro -> the pod's dependencies defined in the model are now added as well
            #
            @web.route('/info', methods=['POST'])
            def _info():

                logger.debug('http in -> /info')
                keys = \
                    [
                        'application',
                        'dependencies',
                        'ip',
                        'metrics',
                        'node',
                        'port',
                        'ports',
                        'process',
                        'public',
                        'state',
                        'status',
                        'task'
                    ]

                subset = dict(filter(lambda i: i[0] in keys, hints.iteritems()))
                return json.dumps(subset), 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - external hook exposing our circular log
            # - reverse and dump ochopod.log as a json array
            #
            @web.route('/log', methods=['POST'])
            def _log():

                logger.debug('http in -> /log')
                with open(ochopod.LOG, 'r+') as log:
                    lines = [line for line in log]
                    return json.dumps(lines), 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - RPC call to run a custom tool within the pod
            #
            @web.route('/exec', methods=['POST'])
            def _exec():

                logger.debug('http in -> /exec')

                #
                # - make sure the command (first token in the X-Shell header) maps to a tool
                # - if no match abort on a 404
                #
                line = request.headers['X-Shell']
                tokens = line.split(' ')
                cmd = tokens[0]
                if not tools or cmd not in tools:
                    return '{}', 404, {'Content-Type': 'application/json; charset=utf-8'}

                code = 1
                tool = tools[cmd]

                #
                # - make sure the parser does not sys.exit()
                #
                class _Parser(ArgumentParser):
                    def exit(self, status=0, message=None):
                        raise ValueError(message)

                #
                # - prep a temporary directory
                # - invoke define_cmdline_parsing()
                # - switch off parsing if NotImplementedError is raised
                #
                use_parser = 1
                parser = _Parser(prog=tool.tag)
                try:
                    tool.define_cmdline_parsing(parser)

                except NotImplementedError:
                    use_parser = 0

                tmp = tempfile.mkdtemp()
                try:

                    #
                    # - parse the command line
                    # - upload any attachment
                    #
                    args = parser.parse_args(tokens[1:]) if use_parser else ' '.join(tokens[1:])
                    for tag, upload in request.files.items():
                        where = path.join(tmp, tag)
                        logger.debug('uploading %s @ %s' % (tag, tmp))
                        upload.save(where)

                    #
                    # - run the tool method
                    # - pass the temporary directory as well
                    #
                    logger.info('invoking "%s"' % line)
                    code, lines = tool.body(args, tmp)

                except ValueError as failure:

                    lines = [parser.format_help() if failure.message is None else failure.message]

                except Exception as failure:

                    lines = ['unexpected failure -> %s' % failure]

                finally:

                    #
                    # - make sure to cleanup our temporary directory
                    #
                    shutil.rmtree(tmp)

                out = \
                    {
                        'code': code,
                        'stdout': lines
                    }

                return json.dumps(out), 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - web-hook used to receive requests from the leader or the CLI tools
            # - those requests are passed down to the executor actor
            # - any non HTTP 200 response is a failure
            # - failure to acknowledge within the specified timeout will result in a HTTP 408 (REQUEST TIMEOUT)
            # - attempting to send a control request to a dead pod will result in a HTTP 410 (GONE)
            #
            @web.route('/control/<task>', methods=['POST'])
            @web.route('/control/<task>/<timeout>', methods=['POST'])
            def _control(task, timeout='60'):

                logger.debug('http in -> /control/%s' % task)
                if task not in ['check', 'on', 'off', 'ok', 'kill', 'signal']:

                    #
                    # - fail on a HTTP 400 if the request is not supported
                    #
                    return '{}', 400, {'Content-Type': 'application/json; charset=utf-8'}

                try:

                    ts = time.time()
                    latch = ThreadingFuture()
                    executor.tell({'request': task, 'latch': latch, 'data': request.data})
                    js, code = latch.get(timeout=int(timeout))
                    ms = time.time() - ts
                    logger.debug('http out -> HTTP %s (%d ms)' % (code, ms))
                    return json.dumps(js), code, {'Content-Type': 'application/json; charset=utf-8'}

                except Timeout:

                    #
                    # - we failed to match the specified timeout
                    # - gracefully fail on a HTTP 408
                    #
                    return '{}', 408, {'Content-Type': 'application/json; charset=utf-8'}

                except ActorDeadError:

                    #
                    # - the executor has been shutdown (probably after a /control/kill)
                    # - gracefully fail on a HTTP 410
                    #
                    return '{}', 410, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - internal hook required to shutdown the web-server
            # - it's not possible to do it outside of a request handler
            # - make sure this calls only comes from localhost (todo)
            #
            @web.route('/terminate', methods=['POST'])
            def _terminate():

                request.environ.get('werkzeug.server.shutdown')()
                return '{}', 200, {'Content-Type': 'application/json; charset=utf-8'}

            #
            # - run werkzeug from a separate thread to avoid blocking the main one
            # - we'll have to shut it down using a dedicated HTTP POST
            #
            class _Runner(threading.Thread):

                def run(self):
                    web.run(host='0.0.0.0', port=int(hints['port']), threaded=True)

            try:

                #
                # - block on the lifecycle actor until it goes down (usually after a /control/kill request)
                #
                _Runner().start()
                spin_lock(latch)
                logger.debug('pod is dead, idling')
                while 1:

                    #
                    # - simply idle forever (since the framework would restart any container that terminates)
                    # - /log and /hints HTTP requests will succeed (and show the pod as being killed)
                    # - any control request will now fail
                    #
                    time.sleep(60.0)

            finally:

                #
                # - when we exit the block first shutdown our executor (which may probably be already down)
                # - then shutdown the coordinator to un-register from zookeeper
                # - finally ask werkzeug to shutdown via a REST call
                #
                shutdown(executor)
                shutdown(coordinator)
                post('http://127.0.0.1:%s/terminate' % env['ochopod_port'])

        except KeyboardInterrupt:

            logger.fatal('CTRL-C pressed')

        except Exception as failure:

            logger.fatal('unexpected condition -> %s' % diagnostic(failure))

Example 15

Project: ochothon
Source File: cli.py
View license
def cli(args):

    tmp = tempfile.mkdtemp()
    try:

        class Shell(cmd.Cmd):

            def __init__(self, ip, token=None):
                cmd.Cmd.__init__(self)
                self.prompt = '%s > ' % ip
                self.ruler = '-'
                self.token = token

            def precmd(self, line):
                return 'shell %s' % line if line not in ['exit'] else line

            def emptyline(self):
                pass

            def do_exit(self, _):
                raise KeyboardInterrupt

            def do_shell(self, line):
                if line:
                    tokens = line.split(' ')

                    #
                    # - update from @stphung -> reformat the input line to handle indirect paths transparently
                    # - for instance ../foo.bar will become foo.bar with the actual file included in the multi-part post
                    #
                    files = {}
                    substituted = tokens[:1]
                    for token in tokens[1:]:
                        expanded = expanduser(token)
                        full = abspath(expanded)
                        tag = basename(full)
                        if isfile(expanded):

                            #
                            # - if the token maps to a local file upload it
                            # - this is for instance what happens when you do 'deploy foo.yml'
                            #
                            files[tag] = abspath(full)
                            substituted += [tag]

                        elif isdir(expanded):

                            #
                            # - if the token maps to a local directory TGZ & upload it
                            # - this is typically used to upload settings & script for our CD pipeline
                            # - the TGZ is stored in our temp. directory
                            #
                            path = join(tmp, '%s.tgz' % tag)
                            shell('tar zcf %s *' % path, cwd=full)
                            files['%s.tgz' % tag] = path
                            substituted += ['%s.tgz' % tag]

                        else:
                            substituted += [token]

                    #
                    # - compute the SHA1 signature if we have a token
                    # - prep the CURL statement and run it
                    # - we should always get a HTTP 200 back with some UTF-8 json payload
                    # - parse & print
                    #
                    line = ' '.join(substituted)
                    unrolled = ['-F %[email protected]%s' % (k, v) for k, v in files.items()]
                    digest = 'sha1=' + hmac.new(self.token, line, hashlib.sha1).hexdigest() if self.token else ''
                    snippet = 'curl -X POST -H "X-Shell:%s" -H "X-Signature:%s" %s %s:9000/shell' % (line, digest, ' '.join(unrolled), ip)
                    code, out = shell(snippet, cwd=tmp)
                    js = json.loads(out.decode('utf-8'))
                    print(js['out'] if code is 0 else 'i/o failure (is the proxy down ?)')

        #
        # - partition ip and args by looking for OCHOPOD_PROXY first
        # - if OCHOPOD_PROXY is not used, treat the first argument as the ip
        #
        ip = None
        if 'OCHOPOD_PROXY' in os.environ:
            ip = os.environ['OCHOPOD_PROXY']
        elif len(args):
            ip = args[0]
            args = args[1:] if len(args) > 1 else []

        #
        # - fail if left undefined
        #
        assert ip is not None, 'either set $OCHOPOD_PROXY or pass the proxy IP as an argument'

        #
        # - set the secret token if specified via the $OCHOPOD_TOKEN variable
        # - if not defined or set to an empty string the SHA1 signature will not be performed
        #
        token = os.environ['OCHOPOD_TOKEN'] if 'OCHOPOD_TOKEN' in os.environ else None

        #
        # - determine whether to run in interactive or non-interactive mode
        #
        if len(args):
            command = " ".join(args)
            Shell(ip, token).do_shell(command)
        else:
            print('welcome to the ocho CLI ! (CTRL-C or exit to get out)')
            if token is None:
                print 'warning, $OCHOPOD_TOKEN is undefined'
            Shell(ip, token).cmdloop()

    except KeyboardInterrupt:
        exit(0)

    except Exception as failure:
        print('internal failure <- %s' % str(failure))
        exit(1)

    finally:
        shutil.rmtree(tmp)

Example 16

Project: atomic-reactor
Source File: test_tag_and_push.py
View license
@pytest.mark.parametrize("use_secret", [
    True,
    False,
])
@pytest.mark.parametrize(("image_name", "logs", "should_raise", "has_config"), [
    (TEST_IMAGE, PUSH_LOGS_1_X, False, False),
    (TEST_IMAGE, PUSH_LOGS_1_9, False, False),
    (TEST_IMAGE, PUSH_LOGS_1_10, False, True),
    (TEST_IMAGE, PUSH_LOGS_1_10_NOT_IN_STATUS, False, False),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_X, True, False),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_9, True, False),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_10, True, True),
    (DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_10_NOT_IN_STATUS, True, True),
    (TEST_IMAGE, PUSH_ERROR_LOGS, True, False),
])
def test_tag_and_push_plugin(
        tmpdir, monkeypatch, image_name, logs, should_raise, has_config, use_secret):

    if MOCK:
        mock_docker()
        flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs),
                 login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'})

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    workflow.tag_conf.add_primary_image(image_name)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "user", "email": "[email protected]", "password": "mypassword"}}
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:2c782e3a93d34d89ea4cf54052768be117caed54803263dd1f3798ce42aac14e'
    media_type = 'application/vnd.docker.distribution.manifest.v2+json'

    response_config_json = {
        'config': {
            'digest': CONFIG_DIGEST,
            'mediaType': 'application/octet-stream',
            'size': 4132
        },
        'layers': [
            {
                'digest': 'sha256:16dc1f96e3a1bb628be2e00518fec2bb97bd5933859de592a00e2eb7774b6ecf',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 71907148
            },
            {
                'digest': 'sha256:cebc0565e1f096016765f55fde87a6f60fdb1208c0b5017e35a856ff578f5ccb',
                'mediaType': 'application/vnd.docker.image.rootfs.diff.tar.gzip',
                'size': 3945724
            }
        ],
        'mediaType': media_type,
        'schemaVersion': 2
    }

    response_json = {
        'config': {
            'Size': 12509448,
            'architecture': 'amd64',
            'author': 'Red Hat, Inc.',
            'config': {
                'Cmd': ['/bin/rsyslog.sh'],
                'Entrypoint': None,
                'Image': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88',
                'Labels': {
                    'Architecture': 'x86_64',
                    'Authoritative_Registry': 'registry.access.redhat.com',
                    'BZComponent': 'rsyslog-docker',
                    'Name': 'rhel7/rsyslog',
                    'Release': '28.vrutkovs.31',
                    'Vendor': 'Red Hat, Inc.',
                    'Version': '7.2',
                },
            },
            'created': '2016-10-07T10:20:05.38595Z',
            'docker_version': '1.9.1',
            'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
            'os': 'linux',
            'parent': '47eed7a8ea3f131e9281ae09fcbfb0041872fd8b74a048f1c739302c8148505d'
        },
        'container_config': {
            'foo': 'bar',
            'spam': 'maps'
        },
        'id': '1ca220fbc2aed7c141b236c8729fe59db5771b32bf2da74e4a663407f32ec2a2',
        'parent_id': 'c3fb36aafd5692d2a45115d32bb120edb6edf6c0c3c783ed6592a8dab969fb88'
    }

    if not has_config:
        response_json = None

    config_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE,)
    config_url = "https://{}/v2/{}/manifests/{}".format(LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_V2)
    blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    config_response_config_v1 = requests.Response()
    (flexmock(config_response_config_v1,
              raise_for_status=lambda: None,
              json=response_config_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v1+json',
                'Docker-Content-Digest': DIGEST_V1
              }
    ))

    config_response_config_v2 = requests.Response()
    (flexmock(config_response_config_v2,
              raise_for_status=lambda: None,
              json=response_config_json,
              headers={
                'Content-Type': 'application/vnd.docker.distribution.manifest.v2+json',
                'Docker-Content-Digest': DIGEST_V2
              }
    ))

    blob_config = requests.Response()
    (flexmock(blob_config, raise_for_status=lambda: None, json=response_json))

    def custom_get(url, headers, **kwargs):
        if url == config_latest_url:
            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v1+json':
                return config_response_config_v1

            if headers['Accept'] == 'application/vnd.docker.distribution.manifest.v2+json':
                return config_response_config_v2

        if url == config_url:
            return config_response_config_v2

        if url == blob_url:
            return blob_config

    (flexmock(requests)
        .should_receive('get')
        .replace_with(custom_get)
    )

    runner = PostBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': TagAndPushPlugin.key,
            'args': {
                'registries': {
                    LOCALHOST_REGISTRY: {
                        'insecure': True,
                        'secret': secret_path
                    }
                }
            },
        }]
    )

    if should_raise:
        with pytest.raises(Exception):
            runner.run()
    else:
        output = runner.run()
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        if MOCK:
            # we only test this when mocking docker because we don't expect
            # running actual docker against v2 registry
            expected_digest = ManifestDigest(v1=DIGEST_V1, v2=DIGEST_V2)
            assert workflow.push_conf.docker_registries[0].digests[image_name].v1 == expected_digest.v1
            assert workflow.push_conf.docker_registries[0].digests[image_name].v2 == expected_digest.v2

            if has_config:
                assert isinstance(workflow.push_conf.docker_registries[0].config, dict)
            else:
                assert workflow.push_conf.docker_registries[0].config is None

Example 17

Project: rpm-ostree-toolbox
Source File: taskbase.py
View license
    def __init__(self, args, cmd, profile=None):
        self.workdir = None
        self.tree_file = None
        self.rpmostree_cache_dir = None
        self.pkgdatadir = None
        self.os_name = None
        self.ostree_remote = None
        self.os_pretty_name = None
        self.tree_name = None
        self.tree_file = None
        self.arch = None
        self.release = None
        self.ref = None
        self.yum_baseurl = None
        self.lorax_additional_repos = None
        self.is_final = None
        self.lorax_inherit_repos = None
        self.lorax_exclude_packages = None
        self.lorax_include_packages = None
        self.lorax_rootfs_size = None
        self.local_overrides = None
        self.http_proxy = None
        self.selinux = None
        self.configdir = None
        self.docker_os_name = None

        self._repo = None
        self.args = args

        configfile = args.config
        assert profile is not None
        defaults = { 'workdir': None,
                     'pkgdatadir':  os.environ['OSTBUILD_DATADIR'],
                     'yum_baseurl': None,
                     'local_overrides': None,
                     'selinux': True
                   }

        if not os.path.isfile(configfile):
            fail_msg("No config file: " + configfile)
        settings = iniparse.ConfigParser()
        try: 
            settings.read(configfile)
        except ConfigParser.ParsingError as e:
            fail_msg("Error parsing your config file {0}: {1}".format(configfile, e.message))            

        self.outputdir = os.getcwd()

        if os.path.isdir(self.outputdir + "/.git"):
            fail_msg("Found .git in the current directory; you most likely don't want to build in source directory")

        for attr in self.ATTRS:
            val = self.getConfigValue(attr, settings, profile, defValue=defaults.get(attr))
            setattr(self, attr, val)

        # Checking ostreerepo
        self.ostree_port = None
        self.ostree_repo_is_remote = False
        self.httpd_path = ""
        self.httpd_host = ""
        if args.ostreerepo is not None:
            self.ostree_repo = args.ostreerepo
            # The ostree_repo is given in URL format
            if 'http' in self.ostree_repo:
                self.ostree_repo_is_remote = True
                urlp = urlparse.urlparse(self.ostree_repo)
                # FIXME
                # When ostree creates the summary file by default, re-enable this.
                # try:
                #     summaryfile = urllib2.urlopen(urlparse.urljoin(self.ostree_repo, "summary")).read()

                # except urllib2.HTTPError, e:
                #     fail_msg("Unable to open the ostree sumarry file with the URL {0} due to {1}".format(self.ostree_repo, str(e)))

                # except urllib2.URLError, e:
                #     fail_msg("Unable to open the ostree summary file with the URL {0} due to {1}".format(self.ostree_repo, str(e)))
                self.httpd_port = str(urlp.port if urlp.port is not None else 80)
                self.httpd_path = urlp.path
                self.httpd_host = urlp.hostname

                # FIXME
                # When ostree creates the summary file by default, re-enable this.
                # if not self.checkRefExists(getattr(self,'ref'), summaryfile):
                #     fail_msg("The ref {0} cannot be found in in the URL {1}".format(getattr(self,'ref'), self.ostree_repo))
        if not self.ostree_repo:
            self.ostree_repo = os.environ.get('OSTREE_REPO')
        if not self.ostree_repo:
            self.ostree_repo = self.outputdir + '/repo'
        if not self.ostree_remote:
            self.ostree_remote = self.os_name
        release = self.release
        # Check for configdir in attrs, else fallback to dir holding config
        if self.configdir is None:
            self.configdir = os.path.dirname(os.path.realpath(configfile))

        if self.tree_file is None:
            fail_msg("No tree file was provided")
        else:
            self.tree_file = os.path.join(self.configdir, self.tree_file)

        # Look for virtnetwork

        if 'virtnetwork' in args:
            self.virtnetwork = args.virtnetwork
        else:
            self.virtnetwork = None

        self.os_nr = "{0}-{1}".format(self.os_name, self.release)

        # Set name from args, else fallback to default
        if 'name' in args and args.name is not None:
            self.name = args.name
        else:
            self.name = self.os_nr

        if cmd == "installer":
            if not self.yum_baseurl and args.yum_baseurl == None:
                fail_msg("No yum_baseurl was provided in your config.ini or with installer -b.")

        if self.http_proxy:
            os.environ['http_proxy'] = self.http_proxy

        self.workdir_is_tmp = False
        if self.workdir is None:
            self.workdir = tempfile.mkdtemp('.tmp', 'atomic-treecompose')
            self.workdir_is_tmp = True
        self.buildjson()

        return

Example 18

Project: stoq-plugins-public
Source File: decompress.py
View license
    def extract(self, payload, **kwargs):
        """
        Decompress a payload

        :param bytes payload: Content to be decompressed
        :param str filename: Filename of compressed archive
        :param list archive_passwords: List of passwords to attempt against the archive

        :returns: Metadata and content extracted
        :rtype: list of tuples

        """

        # Make sure the payload is not larger that what is permitted
        if len(payload) > int(self.maximum_size):
            self.log.warn("Compressed file too large: {}".format(kwargs))
            return None

        if 'filename' in kwargs:
            filename = kwargs['filename']
        else:
            filename = self.stoq.get_uuid

        if 'archive_passwords' in kwargs:
            archive_passwords = kwargs['archive_passwords']
            if type(archive_passwords) is not (list, tuple):
                archive_passwords = archive_passwords.split(",")
        else:
            archive_passwords = self.password_list

        results = None

        # Determine the mimetype of the payload so we can identify the
        # correct archiver
        mimetype = get_magic(payload)
        self.log.debug("Mimetype: {}".format(mimetype))
        if mimetype in archive_magic:
            archive_type = archive_magic[mimetype]
            if archive_type in archive_cmds:
                archiver = archive_cmds[archive_type]
            else:
                self.log.warn("Unknown archive type: {}".format(archive_type))
                return None
        else:
            self.log.warn("Unknown MIME type: {}".format(mimetype))
            return None

        # Build our temporary directory and file structure
        tmp_archive_dir = tempfile.mkdtemp(dir=self.stoq.temp_dir)
        extract_dir = tmp_archive_dir
        archive_file = os.path.join(tmp_archive_dir, filename)

        with open(archive_file, "wb") as f:
            f.write(payload)

        for password in archive_passwords:
            # Check to see what kind of archive we have and build the
            # command as appropriate
            cmd = archiver.replace('%INFILE%', shlex.quote(archive_file))
            cmd = cmd.replace('%OUTDIR%', shlex.quote(extract_dir))
            cmd = cmd.replace('%PASSWORD%', shlex.quote(password))
            cmd = cmd.split(" ")

            # Start the process
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
            try:
                # Monitor the command and wait for it to complete within a set
                # timeout
                outs, errs = p.communicate(timeout=45)
            except TimeoutExpired:
                p.kill()
                self.log.error("Timed out decompressing {}".format(archive_file))

            # Attempt to list contents of extract_dir, if files exist,
            # then let's break out of the loop and continue on
            # as it would mean the file extracted successfully
            if p.returncode == 0:
                break

        # Looks like we are ready, let's step through each file
        for root, dirs, files in os.walk(extract_dir):
            for f in files:
                # We are going to skip this file if the filename is the same as
                # our original file
                if f != filename:
                    base_path = os.path.join(extract_dir, root)
                    path = os.path.join(base_path, f)
                    extracted_filename = os.path.basename(path)

                    try:
                        # Open the file so we can return the content
                        with open(path, "rb") as extracted_file:
                            # Generate relevant metadata
                            meta = {}
                            content = extracted_file.read()
                            meta['filename'] = extracted_filename
                            meta['size'] = len(content)

                            # Since we defined results as None above, we need to
                            # ensure it is a list now that we have results
                            if not results:
                                results = []

                            # Construct our set for return
                            results.append((meta, content))

                            self.log.info("Extracted file {} ({} bytes) from "
                                          "{}".format(meta['filename'],
                                                      meta['size'],
                                                      filename))
                    except Exception as err:
                        self.log.warn("Unable to access extracted content: {}".format(err))

        # Cleanup the extracted content
        if os.path.isdir(tmp_archive_dir):
            shutil.rmtree(tmp_archive_dir)

        return results

Example 19

Project: autotest
Source File: server_job.py
View license
    def run(self, cleanup=False, install_before=False, install_after=False,
            collect_crashdumps=True, namespace={}, control=None,
            control_file_dir=None, only_collect_crashinfo=False):
        # for a normal job, make sure the uncollected logs file exists
        # for a crashinfo-only run it should already exist, bail out otherwise
        created_uncollected_logs = False
        if self.resultdir and not os.path.exists(self._uncollected_log_file):
            if only_collect_crashinfo:
                # if this is a crashinfo-only run, and there were no existing
                # uncollected logs, just bail out early
                logging.info("No existing uncollected logs, "
                             "skipping crashinfo collection")
                return
            else:
                log_file = open(self._uncollected_log_file, "w")
                pickle.dump([], log_file)
                log_file.close()
                created_uncollected_logs = True

        # use a copy so changes don't affect the original dictionary
        namespace = namespace.copy()
        machines = self.machines
        if control is None:
            if self.control is None:
                control = ''
            else:
                control = self._load_control_file(self.control)
        if control_file_dir is None:
            control_file_dir = self.resultdir

        self.aborted = False
        namespace['machines'] = machines
        namespace['args'] = self.args
        namespace['job'] = self
        namespace['ssh_user'] = self._ssh_user
        namespace['ssh_port'] = self._ssh_port
        namespace['ssh_pass'] = self._ssh_pass
        test_start_time = int(time.time())

        if self.resultdir:
            os.chdir(self.resultdir)
            # touch status.log so that the parser knows a job is running here
            open(self.get_status_log_path(), 'a').close()
            self.enable_external_logging()

        collect_crashinfo = True
        temp_control_file_dir = None
        try:
            try:
                if install_before and machines:
                    self._execute_code(INSTALL_CONTROL_FILE, namespace)

                if only_collect_crashinfo:
                    return

                # determine the dir to write the control files to
                cfd_specified = (control_file_dir and control_file_dir is not
                                 self._USE_TEMP_DIR)
                if cfd_specified:
                    temp_control_file_dir = None
                else:
                    temp_control_file_dir = tempfile.mkdtemp(
                        suffix='temp_control_file_dir')
                    control_file_dir = temp_control_file_dir
                server_control_file = os.path.join(control_file_dir,
                                                   self._control_filename)
                client_control_file = os.path.join(control_file_dir,
                                                   CLIENT_CONTROL_FILENAME)
                if self._client:
                    namespace['control'] = control
                    utils.open_write_close(client_control_file, control)
                    shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
                                    server_control_file)
                else:
                    utils.open_write_close(server_control_file, control)
                logging.info("Processing control file")
                self._execute_code(server_control_file, namespace)
                logging.info("Finished processing control file")

                # no error occurred, so we don't need to collect crashinfo
                collect_crashinfo = False
            except Exception, e:
                try:
                    logging.exception(
                        'Exception escaped control file, job aborting:')
                    self.record('INFO', None, None, str(e),
                                {'job_abort_reason': str(e)})
                except:
                    pass  # don't let logging exceptions here interfere
                raise
        finally:
            if temp_control_file_dir:
                # Clean up temp directory used for copies of the control files
                try:
                    shutil.rmtree(temp_control_file_dir)
                except Exception, e:
                    logging.warn('Could not remove temp directory %s: %s',
                                 temp_control_file_dir, e)

            if machines and (collect_crashdumps or collect_crashinfo):
                namespace['test_start_time'] = test_start_time
                if collect_crashinfo:
                    # includes crashdumps
                    self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
                else:
                    self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
            if self._uncollected_log_file and created_uncollected_logs:
                os.remove(self._uncollected_log_file)
            self.disable_external_logging()
            if cleanup and machines:
                self._execute_code(CLEANUP_CONTROL_FILE, namespace)
            if install_after and machines:
                self._execute_code(INSTALL_CONTROL_FILE, namespace)

Example 20

Project: tp-qemu
Source File: cdrom.py
View license
@error.context_aware
def run(test, params, env):
    """
    KVM cdrom test:

    1) Boot up a VM, with one iso image (optional).
    2) Check if VM identifies correctly the iso file.
    3) Verifies that device is unlocked <300s after boot (optional, if
       cdrom_test_autounlock is set).
    4) Eject cdrom using monitor.
    5) Change cdrom image with another iso several times.
    5) Test tray reporting function (optional, if cdrom_test_tray_status is set)
    6) Try to format cdrom and check the return string.
    7) Mount cdrom device.
    8) Copy file from cdrom and compare files.
    9) Umount and mount cdrom in guest for several times.
    10) Check if the cdrom lock works well when iso file is not inserted.
    11) Reboot vm after vm resume from s3/s4.
        Note: This case requires a qemu cli without setting file property
        for -drive option, and will be separated to a different cfg item.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.

    :param cfg: workaround_eject_time - Some versions of qemu are unable to
                                        eject CDROM directly after insert
    :param cfg: cdrom_test_autounlock - Test whether guest OS unlocks cdrom
                                        after boot (<300s after VM is booted)
    :param cfg: cdrom_test_tray_status - Test tray reporting (eject and insert
                                         CD couple of times in guest).
    :param cfg: cdrom_test_locked -     Test whether cdrom tray lock function
                                        work well in guest.
    :param cfg: cdrom_test_eject -      Test whether cdrom works well after
                                        several times of eject action.
    :param cfg: cdrom_test_file_operation - Test file operation for cdrom,
                                            such as mount/umount, reading files
                                            on cdrom.

    @warning: Check dmesg for block device failures
    """
    # Some versions of qemu are unable to eject CDROM directly after insert
    workaround_eject_time = float(params.get('workaround_eject_time', 0))

    login_timeout = int(params.get("login_timeout", 360))
    cdrom_prepare_timeout = int(params.get("cdrom_preapre_timeout", 360))

    def generate_serial_num():
        length = int(params.get("length", "10"))
        id_leng = random.randint(6, length)
        ignore_str = ",!\"#$%&\'()*+./:;<=>[email protected][\\]^`{|}~"
        return utils_misc.generate_random_string(id_leng, ignore_str)

    def list_guest_cdroms(session):
        """
        Get cdrom lists from guest os;

        :param session: ShellSession object;
        :param params: test params dict;
        :return: list of cdroms;
        :rtype: list
        """
        list_cdrom_cmd = "wmic cdrom get Drive"
        filter_cdrom_re = "\w:"
        if params["os_type"] != "windows":
            list_cdrom_cmd = "ls /dev/cdrom*"
            filter_cdrom_re = r"/dev/cdrom-\w+|/dev/cdrom\d*"
        output = session.cmd_output(list_cdrom_cmd)
        cdroms = re.findall(filter_cdrom_re, output)
        cdroms.sort()
        return cdroms

    def get_cdrom_mount_point(session, drive_letter, params):
        """
        Get default cdrom mount point;
        """
        mount_point = "/mnt"
        if params["os_type"] == "windows":
            cmd = "wmic volume where DriveLetter='%s' " % drive_letter
            cmd += "get DeviceID | more +1"
            mount_point = session.cmd_output(cmd).strip()
        return mount_point

    @error.context_aware
    def create_iso_image(params, name, prepare=True, file_size=None):
        """
        Creates 'new' iso image with one file on it

        :param params: parameters for test
        :param name: name of new iso image file
        :param preapre: if True then it prepare cd images.
        :param file_size: Size of iso image in MB

        :return: path to new iso image file.
        """
        error.context("Creating test iso image '%s'" % name, logging.info)
        cdrom_cd = params["target_cdrom"]
        cdrom_cd = params[cdrom_cd]
        if not os.path.isabs(cdrom_cd):
            cdrom_cd = utils_misc.get_path(data_dir.get_data_dir(), cdrom_cd)
        iso_image_dir = os.path.dirname(cdrom_cd)
        if file_size is None:
            file_size = 10
        g_mount_point = tempfile.mkdtemp("gluster")
        image_params = params.object_params(name)
        if image_params.get("enable_gluster") == "yes":
            if params.get("gluster_server"):
                gluster_server = params.get("gluster_server")
            else:
                gluster_server = "localhost"
            volume_name = params["gluster_volume_name"]
            g_mount_link = "%s:/%s" % (gluster_server, volume_name)
            mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link, g_mount_point)
            utils.system(mount_cmd, timeout=60)
            file_name = os.path.join(g_mount_point, "%s.iso" % name)
        else:
            file_name = utils_misc.get_path(iso_image_dir, "%s.iso" % name)
        if prepare:
            cmd = "dd if=/dev/urandom of=%s bs=1M count=%d"
            utils.run(cmd % (name, file_size))
            utils.run("mkisofs -o %s %s" % (file_name, name))
            utils.run("rm -rf %s" % (name))
        if image_params.get("enable_gluster") == "yes":
            gluster_uri = gluster.create_gluster_uri(image_params)
            file_name = "%s%s.iso" % (gluster_uri, name)
            try:
                umount_cmd = "umount %s" % g_mount_point
                utils.system(umount_cmd, timeout=60)
                os.rmdir(g_mount_point)
            except Exception, err:
                msg = "Fail to clean up %s" % g_mount_point
                msg += "Error message %s" % err
                logging.warn(msg)
        return file_name

    def cleanup_cdrom(path):
        """ Removes created iso image """
        if path:
            error.context("Cleaning up temp iso image '%s'" % path,
                          logging.info)
            if "gluster" in path:
                g_mount_point = tempfile.mkdtemp("gluster")
                g_server, v_name, f_name = path.split("/")[-3:]
                if ":" in g_server:
                    g_server = g_server.split(":")[0]
                g_mount_link = "%s:/%s" % (g_server, v_name)
                mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link,
                                                          g_mount_point)
                utils.system(mount_cmd, timeout=60)
                path = os.path.join(g_mount_point, f_name)
            try:
                logging.debug("Remove the file with os.remove().")
                os.remove("%s" % path)
            except OSError, err:
                logging.warn("Fail to delete %s" % path)
            if "gluster" in path:
                try:
                    umount_cmd = "umount %s" % g_mount_point
                    utils.system(umount_cmd, timeout=60)
                    os.rmdir(g_mount_point)
                except Exception, err:
                    msg = "Fail to clean up %s" % g_mount_point
                    msg += "Error message %s" % err
                    logging.warn(msg)

    def get_cdrom_file(vm, qemu_cdrom_device):
        """
        :param vm: VM object
        :param qemu_cdrom_device: qemu monitor device
        :return: file associated with $qemu_cdrom_device device
        """
        blocks = vm.monitor.info("block")
        cdfile = None
        if isinstance(blocks, str):
            tmp_re_str = r'%s: .*file=(\S*) ' % qemu_cdrom_device
            file_list = re.findall(tmp_re_str, blocks)
            if file_list:
                cdfile = file_list[0]
            else:
                # try to deal with new qemu
                tmp_re_str = r'%s: (\S*) \(.*\)' % qemu_cdrom_device
                file_list = re.findall(tmp_re_str, blocks)
                if file_list:
                    cdfile = file_list[0]
        else:
            for block in blocks:
                if block['device'] == qemu_cdrom_device:
                    try:
                        cdfile = block['inserted']['file']
                        break
                    except KeyError:
                        continue
        return cdfile

    def _get_tray_stat_via_monitor(vm, qemu_cdrom_device):
        """
        Get the cdrom tray status via qemu monitor
        """
        is_open, checked = (None, False)

        blocks = vm.monitor.info("block")
        if isinstance(blocks, str):
            for block in blocks.splitlines():
                if qemu_cdrom_device in block:
                    if "tray-open=1" in block:
                        is_open, checked = (True, True)
                    elif "tray-open=0" in block:
                        is_open, checked = (False, True)
            # fallback to new qemu
            tmp_block = ""
            for block_new in blocks.splitlines():
                if tmp_block and "Removable device" in block_new:
                    if "tray open" in block_new:
                        is_open, checked = (True, True)
                    elif "tray closed" in block_new:
                        is_open, checked = (False, True)
                if qemu_cdrom_device in block_new:
                    tmp_block = block_new
                else:
                    tmp_block = ""
        else:
            for block in blocks:
                if block['device'] == qemu_cdrom_device:
                    key = filter(lambda x: re.match(r"tray.*open", x),
                                 block.keys())
                    # compatible rhel6 and rhel7 diff qmp output
                    if not key:
                        break
                    is_open, checked = (block[key[0]], True)
        return (is_open, checked)

    def is_tray_opened(vm, qemu_cdrom_device, mode='monitor',
                       dev_name="/dev/sr0"):
        """
        Checks whether the tray is opend

        :param vm: VM object
        :param qemu_cdrom_device: cdrom image file name.
        :param mode: tray status checking mode, now support:
                     "monitor": get tray status from monitor.
                     "session": get tray status from guest os.
                     "mixed": get tray status first, if failed, try to
                              get the status in guest os again.
        :param dev_name: cdrom device name in guest os.

        :return: True if cdrom tray is open, otherwise False.
                 None if failed to get the tray status.
        """
        is_open, checked = (None, False)

        if mode in ['monitor', 'mixed']:
            is_open, checked = _get_tray_stat_via_monitor(
                vm, qemu_cdrom_device)

        if (mode in ['session', 'mixed']) and not checked:
            session = vm.wait_for_login(timeout=login_timeout)
            tray_cmd = params["tray_check_cmd"] % dev_name
            o = session.cmd_output(tray_cmd)
            if "cdrom is open" in o:
                is_open, checked = (True, True)
            else:
                is_open, checked = (False, True)
        if checked:
            return is_open
        return None

    @error.context_aware
    def check_cdrom_lock(vm, cdrom):
        """
        Checks whether the cdrom is locked

        :param vm: VM object
        :param cdrom: cdrom object

        :return: Cdrom state if locked return True
        """
        error.context("Check cdrom state of locing.")
        blocks = vm.monitor.info("block")
        if isinstance(blocks, str):
            for block in blocks.splitlines():
                if cdrom in block:
                    if "locked=1" in block:
                        return True
                    elif "locked=0" in block:
                        return False
            # deal with new qemu
            lock_str_new = "locked"
            no_lock_str = "not locked"
            tmp_block = ""
            for block_new in blocks.splitlines():
                if tmp_block and "Removable device" in block_new:
                    if no_lock_str in block_new:
                        return False
                    elif lock_str_new in block_new:
                        return True
                if cdrom in block_new:
                    tmp_block = block_new
                else:
                    tmp_block = ""
        else:
            for block in blocks:
                if block['device'] == cdrom and 'locked' in block.keys():
                    return block['locked']
        return None

    @error.context_aware
    def get_device(vm, dev_file_path):
        """
        Get vm device class from device path.

        :param vm: VM object.
        :param dev_file_path: Device file path.
        :return: device object
        """
        error.context("Get cdrom device object")
        device = vm.get_block({'file': dev_file_path})
        if not device:
            device = vm.get_block({'backing_file': dev_file_path})
            if not device:
                raise error.TestFail("Could not find a valid cdrom device")
        return device

    def get_match_cdrom(vm, session, serial_num):
        """
        Find the cdrom in guest which is corresponding with the CML
        according to the serial number.

        :param session: VM session.
        :param serial num: serial number of the cdrom.
        :return match_cdrom: the cdrom in guest which is corresponding
                             with the CML according to the serial number.
        """
        error.context("Get matching cdrom in guest", logging.info)
        show_serial_num = "ls -l /dev/disk/by-id"
        serial_num_output = session.cmd_output(show_serial_num)
        if serial_num_output:
            serial_cdrom = ""
            for line in serial_num_output.splitlines():
                if utils_misc.find_substring(str(line), str(serial_num)):
                    serial_cdrom = line.split(" ")[-1].split("/")[-1]
                    break
            if not serial_cdrom:
                qtree_info = vm.monitor.info("qtree")
                raise error.TestFail("Could not find the device whose "
                                     "serial number %s is same in Qemu"
                                     " CML.\n Qtree info: %s" %
                                     (serial_num, qtree_info))

        show_cdrom_cmd = "ls -l /dev/cdrom*"
        dev_cdrom_output = session.cmd_output(show_cdrom_cmd)
        if dev_cdrom_output:
            for line in dev_cdrom_output.splitlines():
                if utils_misc.find_substring(str(line), str(serial_cdrom)):
                    match_cdrom = line.split(" ")[-3]
                    return match_cdrom
            raise error.TestFail("Could not find the corresponding cdrom"
                                 "in guest which is same in Qemu CML.")

    def get_testing_cdrom_device(vm, session, cdrom_dev_list, serial_num=None):
        """
        Get the testing cdrom used for eject
        :param session: VM session
        :param cdrom_dev_list: cdrom_dev_list
        """
        try:
            if params["os_type"] == "windows":
                winutil_drive = utils_misc.get_winutils_vol(session)
                winutil_drive = "%s:" % winutil_drive
                cdrom_dev_list.remove(winutil_drive)
                testing_cdrom_device = cdrom_dev_list[-1]
            else:
                testing_cdrom_device = get_match_cdrom(vm, session, serial_num)
        except IndexError:
            raise error.TestFail("Could not find the testing cdrom device")

        return testing_cdrom_device

    def disk_copy(vm, src_path, dst_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param dst_path: Path to destination
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodical copied.
        """
        if copy_timeout is None:
            copy_timeout = 120
        session = vm.wait_for_login(timeout=login_timeout)
        copy_file_cmd = (
            "nohup cp %s %s 2> /dev/null &" % (src_path, dst_path))
        get_pid_cmd = "echo $!"
        if params["os_type"] == "windows":
            copy_file_cmd = "start cmd /c copy /y %s %s" % (src_path, dst_path)
            get_pid_cmd = "wmic process where name='cmd.exe' get ProcessID"
        session.cmd(copy_file_cmd, timeout=copy_timeout)
        pid = re.findall(r"\d+", session.cmd_output(get_pid_cmd))[-1]
        return pid

    def get_empty_cdrom_device(vm):
        """
        Get cdrom device when cdrom is not insert.
        """
        device = None
        blocks = vm.monitor.info("block")
        if isinstance(blocks, str):
            for block in blocks.strip().split('\n'):
                if 'not inserted' in block:
                    device = block.split(':')[0]
        else:
            for block in blocks:
                if 'inserted' not in block.keys():
                    device = block['device']
        return device

    def eject_test_via_monitor(vm, qemu_cdrom_device, guest_cdrom_device,
                               iso_image_orig, iso_image_new, max_times):
        """
        Test cdrom eject function via qemu monitor.
        """
        error.context("Eject the iso image in monitor %s times" % max_times,
                      logging.info)
        session = vm.wait_for_login(timeout=login_timeout)
        iso_image = iso_image_orig
        for i in range(1, max_times):
            session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device)
            vm.eject_cdrom(qemu_cdrom_device)
            time.sleep(2)
            if get_cdrom_file(vm, qemu_cdrom_device) is not None:
                raise error.TestFail("Device %s was not ejected"
                                     " (round %s)" % (iso_image, i))

            iso_image = iso_image_new
            # On even attempts, try to change the iso image
            if i % 2 == 0:
                iso_image = iso_image_orig
            vm.change_media(qemu_cdrom_device, iso_image)
            if get_cdrom_file(vm, qemu_cdrom_device) != iso_image:
                raise error.TestFail("Could not change iso image %s"
                                     " (round %s)" % (iso_image, i))
            time.sleep(workaround_eject_time)

    def check_tray_status_test(vm, qemu_cdrom_device, guest_cdrom_device,
                               max_times, iso_image_new):
        """
        Test cdrom tray status reporting function.
        """
        error.context("Change cdrom media via monitor", logging.info)
        iso_image_orig = get_cdrom_file(vm, qemu_cdrom_device)
        if not iso_image_orig:
            raise error.TestError("no media in cdrom")
        vm.change_media(qemu_cdrom_device, iso_image_new)
        is_opened = is_tray_opened(vm, qemu_cdrom_device)
        if is_opened:
            raise error.TestFail("cdrom tray not opened after change media")
        try:
            error.context("Copy test script to guest")
            tray_check_src = params.get("tray_check_src")
            if tray_check_src:
                tray_check_src = os.path.join(data_dir.get_deps_dir(), "cdrom",
                                              tray_check_src)
                vm.copy_files_to(tray_check_src, params["tmp_dir"])

            if is_tray_opened(vm, qemu_cdrom_device) is None:
                logging.warn("Tray status reporting is not supported by qemu!")
                logging.warn("cdrom_test_tray_status test is skipped...")
                return

            error.context("Eject the cdrom in guest %s times" % max_times,
                          logging.info)
            session = vm.wait_for_login(timeout=login_timeout)
            for i in range(1, max_times):
                session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device)
                if not is_tray_opened(vm, qemu_cdrom_device):
                    raise error.TestFail("Monitor reports tray closed"
                                         " when ejecting (round %s)" % i)
                if params["os_type"] != "windows":
                    cmd = "dd if=%s of=/dev/null count=1" % guest_cdrom_device
                else:
                    # windows guest does not support auto close door when reading
                    # cdrom, so close it by eject command;
                    cmd = params["close_cdrom_cmd"] % guest_cdrom_device
                session.cmd(cmd)
                if is_tray_opened(vm, qemu_cdrom_device):
                    raise error.TestFail("Monitor reports tray opened when close"
                                         " cdrom in guest (round %s)" % i)
                time.sleep(workaround_eject_time)
        finally:
            vm.change_media(qemu_cdrom_device, iso_image_orig)

    def check_tray_locked_test(vm, qemu_cdrom_device, guest_cdrom_device):
        """
        Test cdrom tray locked function.
        """
        error.context("Check cdrom tray status after cdrom is locked",
                      logging.info)
        session = vm.wait_for_login(timeout=login_timeout)
        tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed',
                                          dev_name=guest_cdrom_device)
        if tmp_is_trap_open is None:
            logging.warn("Tray status reporting is not supported by qemu!")
            logging.warn("cdrom_test_locked test is skipped...")
            return

        eject_failed = False
        eject_failed_msg = "Tray should be closed even in locked status"
        session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device)
        tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed',
                                          dev_name=guest_cdrom_device)
        if not tmp_is_trap_open:
            raise error.TestFail("Tray should not in closed status")
        session.cmd(params["lock_cdrom_cmd"] % guest_cdrom_device)
        try:
            session.cmd(params["close_cdrom_cmd"] % guest_cdrom_device)
        except aexpect.ShellCmdError, e:
            eject_failed = True
            eject_failed_msg += ", eject command failed: %s" % str(e)

        tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed',
                                          dev_name=guest_cdrom_device)
        if (eject_failed or tmp_is_trap_open):
            raise error.TestFail(eject_failed_msg)
        session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device)
        session.cmd(params["close_cdrom_cmd"] % guest_cdrom_device)

    def file_operation_test(session, guest_cdrom_device, max_times):
        """
        Cdrom file operation test.
        """
        filename = "new"
        mount_point = get_cdrom_mount_point(session,
                                            guest_cdrom_device, params)
        mount_cmd = params["mount_cdrom_cmd"] % (guest_cdrom_device,
                                                 mount_point)
        umount_cmd = params["umount_cdrom_cmd"] % guest_cdrom_device
        src_file = params["src_file"] % (mount_point, filename)
        dst_file = params["dst_file"] % filename
        copy_file_cmd = params["copy_file_cmd"] % (mount_point, filename)
        remove_file_cmd = params["remove_file_cmd"] % filename
        show_mount_cmd = params["show_mount_cmd"]
        md5sum_cmd = params["md5sum_cmd"]

        if params["os_type"] != "windows":
            error.context("Mounting the cdrom under %s" % mount_point,
                          logging.info)
            session.cmd(mount_cmd, timeout=30)
        error.context("File copying test", logging.info)
        session.cmd(copy_file_cmd)
        f1_hash = session.cmd(md5sum_cmd % dst_file).split()[0].strip()
        f2_hash = session.cmd(md5sum_cmd % src_file).split()[0].strip()
        if f1_hash != f2_hash:
            raise error.TestFail("On disk and on cdrom files are different, "
                                 "md5 mismatch")
        session.cmd(remove_file_cmd)
        error.context("Mount/Unmount cdrom for %s times" % max_times,
                      logging.info)
        for _ in range(1, max_times):
            try:
                session.cmd(umount_cmd)
                session.cmd(mount_cmd)
            except aexpect.ShellError, detail:
                logging.error("Mount/Unmount fail, detail: '%s'", detail)
                logging.debug(session.cmd(show_mount_cmd))
                raise
        if params["os_type"] != "windows":
            session.cmd("umount %s" % guest_cdrom_device)

    # Test main body start.
    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    class test_singlehost(MiniSubtest):

        def test(self):
            self.iso_image_orig = create_iso_image(params, "orig")
            self.iso_image_new = create_iso_image(params, "new")
            self.cdrom_dir = os.path.dirname(self.iso_image_new)
            if params.get("not_insert_at_start") == "yes":
                target_cdrom = params["target_cdrom"]
                params[target_cdrom] = ""
            params["start_vm"] = "yes"
            serial_num = generate_serial_num()
            cdrom = params.get("cdroms", "").split()[-1]
            params["drive_serial_%s" % cdrom] = serial_num
            env_process.preprocess_vm(test, params, env, params["main_vm"])
            vm = env.get_vm(params["main_vm"])

            self.session = vm.wait_for_login(timeout=login_timeout)
            pre_cmd = params.get("pre_cmd")
            if pre_cmd:
                self.session.cmd(pre_cmd, timeout=120)
                self.session = vm.reboot()
            iso_image = self.iso_image_orig
            error.context("Query cdrom devices in guest")
            cdrom_dev_list = list_guest_cdroms(self.session)
            logging.debug("cdrom_dev_list: '%s'", cdrom_dev_list)

            if params.get('not_insert_at_start') == "yes":
                error.context("Locked without media present", logging.info)
                # XXX: The device got from monitor might not match with the guest
                # defice if there are multiple cdrom devices.
                qemu_cdrom_device = get_empty_cdrom_device(vm)
                guest_cdrom_device = get_testing_cdrom_device(vm,
                                                              self.session,
                                                              cdrom_dev_list,
                                                              serial_num)
                if vm.check_block_locked(qemu_cdrom_device):
                    raise error.TestFail("Device should not be locked just"
                                         " after booting up")
                cmd = params["lock_cdrom_cmd"] % guest_cdrom_device
                self.session.cmd(cmd)
                if not vm.check_block_locked(qemu_cdrom_device):
                    raise error.TestFail("Device is not locked as expect.")
                return

            error.context("Detecting the existence of a cdrom (guest OS side)",
                          logging.info)
            cdrom_dev_list = list_guest_cdroms(self.session)
            guest_cdrom_device = get_testing_cdrom_device(vm,
                                                          self.session,
                                                          cdrom_dev_list,
                                                          serial_num)
            error.context("Detecting the existence of a cdrom (qemu side)",
                          logging.info)
            qemu_cdrom_device = get_device(vm, iso_image)
            if params["os_type"] != "windows":
                self.session.get_command_output("umount %s" % guest_cdrom_device)
            if params.get('cdrom_test_autounlock') == 'yes':
                error.context("Trying to unlock the cdrom", logging.info)
                if not utils_misc.wait_for(lambda: not
                                           vm.check_block_locked(qemu_cdrom_device),
                                           300):
                    raise error.TestFail("Device %s could not be"
                                         " unlocked" % (qemu_cdrom_device))

            max_test_times = int(params.get("cdrom_max_test_times", 100))
            if params.get("cdrom_test_eject") == "yes":
                eject_test_via_monitor(vm, qemu_cdrom_device,
                                       guest_cdrom_device, self.iso_image_orig,
                                       self.iso_image_new, max_test_times)

            if params.get('cdrom_test_tray_status') == 'yes':
                check_tray_status_test(vm, qemu_cdrom_device,
                                       guest_cdrom_device, max_test_times,
                                       self.iso_image_new)

            if params.get('cdrom_test_locked') == 'yes':
                check_tray_locked_test(vm, qemu_cdrom_device,
                                       guest_cdrom_device)

            error.context("Check whether the cdrom is read-only", logging.info)
            cmd = params["readonly_test_cmd"] % guest_cdrom_device
            try:
                self.session.cmd(cmd)
                raise error.TestFail("Attempt to format cdrom %s succeeded" %
                                     (guest_cdrom_device))
            except aexpect.ShellError:
                pass

            sub_test = params.get("sub_test")
            if sub_test:
                error.context("Run sub test '%s' before doing file"
                              " operation" % sub_test, logging.info)
                utils_test.run_virt_sub_test(test, params, env, sub_test)

            if params.get("cdrom_test_file_operation") == "yes":
                file_operation_test(self.session, guest_cdrom_device,
                                    max_test_times)

            error.context("Cleanup")
            # Return the self.iso_image_orig
            cdfile = get_cdrom_file(vm, qemu_cdrom_device)
            if cdfile != self.iso_image_orig:
                time.sleep(workaround_eject_time)
                self.session.cmd(params["eject_cdrom_cmd"] %
                                 guest_cdrom_device)
                vm.eject_cdrom(qemu_cdrom_device)
                if get_cdrom_file(vm, qemu_cdrom_device) is not None:
                    raise error.TestFail("Device %s was not ejected"
                                         " in clearup stage" % qemu_cdrom_device)

                vm.change_media(qemu_cdrom_device, self.iso_image_orig)
                if get_cdrom_file(vm, qemu_cdrom_device) != self.iso_image_orig:
                    raise error.TestFail("It wasn't possible to change"
                                         " cdrom %s" % iso_image)
            post_cmd = params.get("post_cmd")
            if post_cmd:
                self.session.cmd(post_cmd)
            if params.get("guest_suspend_type"):
                self.session = vm.reboot()

        def clean(self):
            self.session.close()
            cleanup_cdrom(self.iso_image_orig)
            cleanup_cdrom(self.iso_image_new)

    class Multihost(MiniSubtest):

        def test(self):
            error.context("Preparing migration env and cdroms.", logging.info)
            mig_protocol = params.get("mig_protocol", "tcp")
            self.mig_type = migration.MultihostMigration
            if mig_protocol == "fd":
                self.mig_type = migration.MultihostMigrationFd
            if mig_protocol == "exec":
                self.mig_type = migration.MultihostMigrationExec
            if "rdma" in mig_protocol:
                self.mig_type = migration.MultihostMigrationRdma

            self.vms = params.get("vms").split(" ")
            self.srchost = params.get("hosts")[0]
            self.dsthost = params.get("hosts")[1]
            self.is_src = params.get("hostid") == self.srchost
            self.mig = self.mig_type(test, params, env, False, )
            self.cdrom_size = int(params.get("cdrom_size", 10))
            cdrom = params.objects("cdroms")[-1]
            self.serial_num = params.get("drive_serial_%s" % cdrom)

            if self.is_src:
                self.cdrom_orig = create_iso_image(params, "orig",
                                                   file_size=self.cdrom_size)
                self.cdrom_dir = os.path.dirname(self.cdrom_orig)
                vm = env.get_vm(self.vms[0])
                vm.destroy()
                params["start_vm"] = "yes"
                env_process.process(test, params, env,
                                    env_process.preprocess_image,
                                    env_process.preprocess_vm)
                vm = env.get_vm(self.vms[0])
                vm.wait_for_login(timeout=login_timeout)
            else:
                self.cdrom_orig = create_iso_image(params, "orig", False)
                self.cdrom_dir = os.path.dirname(self.cdrom_orig)

        def clean(self):
            self.mig.cleanup()
            if self.is_src:
                cleanup_cdrom(self.cdrom_orig)

    class test_multihost_locking(Multihost):

        def test(self):
            super(test_multihost_locking, self).test()

            error.context("Lock cdrom in VM.", logging.info)
            # Starts in source
            if self.is_src:
                vm = env.get_vm(params["main_vm"])
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                guest_cdrom_device = get_testing_cdrom_device(vm,
                                                              session,
                                                              cdrom_dev_list,
                                                              self.serial_num)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                device = get_device(vm, self.cdrom_orig)

                session.cmd(params["lock_cdrom_cmd"] % guest_cdrom_device)
                locked = check_cdrom_lock(vm, device)
                if locked:
                    logging.debug("Cdrom device is successfully locked in VM.")
                else:
                    raise error.TestFail("Cdrom device should be locked"
                                         " in VM.")

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'cdrom_dev', cdrom_prepare_timeout)

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            # Starts in dest
            if not self.is_src:
                vm = env.get_vm(params["main_vm"])
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                device = get_device(vm, self.cdrom_orig)

                locked = check_cdrom_lock(vm, device)
                if locked:
                    logging.debug("Cdrom device stayed locked after "
                                  "migration in VM.")
                else:
                    raise error.TestFail("Cdrom device should stayed locked"
                                         " after migration in VM.")

                error.context("Unlock cdrom from VM.", logging.info)
                cdrom_dev_list = list_guest_cdroms(session)
                guest_cdrom_device = get_testing_cdrom_device(vm,
                                                              session,
                                                              cdrom_dev_list,
                                                              self.serial_num)
                session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device)
                locked = check_cdrom_lock(vm, device)
                if not locked:
                    logging.debug("Cdrom device is successfully unlocked"
                                  " from VM.")
                else:
                    raise error.TestFail("Cdrom device should be unlocked"
                                         " in VM.")

            self.mig.migrate_wait([self.vms[0]], self.dsthost, self.srchost)

            if self.is_src:
                vm = env.get_vm(params["main_vm"])
                locked = check_cdrom_lock(vm, device)
                if not locked:
                    logging.debug("Cdrom device stayed unlocked after "
                                  "migration in VM.")
                else:
                    raise error.TestFail("Cdrom device should stayed unlocked"
                                         " after migration in VM.")

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'Finish_cdrom_test', login_timeout)

        def clean(self):
            super(test_multihost_locking, self).clean()

    class test_multihost_ejecting(Multihost):

        def test(self):
            super(test_multihost_ejecting, self).test()

            self.cdrom_new = create_iso_image(params, "new")

            if not self.is_src:
                self.cdrom_new = create_iso_image(params, "new", False)
                self.cdrom_dir = os.path.dirname(self.cdrom_new)
                params["cdrom_cd1"] = params.get("cdrom_cd1_host2")

            if self.is_src:
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                device = get_device(vm, self.cdrom_orig)
                cdrom = get_testing_cdrom_device(vm,
                                                 session,
                                                 cdrom_dev_list,
                                                 self.serial_num)

                error.context("Eject cdrom.", logging.info)
                session.cmd(params["eject_cdrom_cmd"] % cdrom)
                vm.eject_cdrom(device)
                time.sleep(2)
                if get_cdrom_file(vm, device) is not None:
                    raise error.TestFail("Device %s was not ejected" % (cdrom))

                cdrom = self.cdrom_new

                error.context("Change cdrom.", logging.info)
                vm.change_media(device, cdrom)
                if get_cdrom_file(vm, device) != cdrom:
                    raise error.TestFail("It wasn't possible to change "
                                         "cdrom %s" % (cdrom))
                time.sleep(workaround_eject_time)

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'cdrom_dev', cdrom_prepare_timeout)

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:
                vm = env.get_vm(self.vms[0])
                vm.reboot()

        def clean(self):
            if self.is_src:
                cleanup_cdrom(self.cdrom_new)
            super(test_multihost_ejecting, self).clean()

    class test_multihost_copy(Multihost):

        def test(self):
            super(test_multihost_copy, self).test()
            copy_timeout = int(params.get("copy_timeout", 480))
            checksum_timeout = int(params.get("checksum_timeout", 180))

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            remove_file_cmd = params["remove_file_cmd"] % filename
            dst_file = params["dst_file"] % filename

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                vm.monitor.migrate_set_speed("1G")
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                cdrom = get_testing_cdrom_device(vm,
                                                 session,
                                                 cdrom_dev_list,
                                                 self.serial_num)
                mount_point = get_cdrom_mount_point(session, cdrom, params)
                mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point)
                src_file = params["src_file"] % (mount_point, filename)
                copy_file_cmd = params[
                    "copy_file_cmd"] % (mount_point, filename)
                if params["os_type"] != "windows":
                    error.context("Mount and copy data", logging.info)
                    session.cmd(mount_cmd, timeout=30)

                error.context("File copying test", logging.info)
                session.cmd(remove_file_cmd)
                session.cmd(copy_file_cmd)

                pid = disk_copy(vm, src_file, dst_file, copy_timeout)

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                error.context("Wait for copy finishing.", logging.info)
                cdrom_dev_list = list_guest_cdroms(session)
                cdrom = get_testing_cdrom_device(vm,
                                                 session,
                                                 cdrom_dev_list,
                                                 self.serial_num)
                mount_point = get_cdrom_mount_point(session, cdrom, params)
                mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point)
                src_file = params["src_file"] % (mount_point, filename)
                md5sum_cmd = params["md5sum_cmd"]

                def is_copy_done():
                    if params["os_type"] == "windows":
                        cmd = "tasklist /FI \"PID eq %s\"" % pid
                    else:
                        cmd = "ps -p %s" % pid
                    return session.cmd_status(cmd) != 0

                if not utils_misc.wait_for(is_copy_done, timeout=copy_timeout):
                    raise error.TestFail("Wait for file copy finish timeout")

                error.context("Compare file on disk and on cdrom", logging.info)
                f1_hash = session.cmd(md5sum_cmd % dst_file,
                                      timeout=checksum_timeout).split()[0]
                f2_hash = session.cmd(md5sum_cmd % src_file,
                                      timeout=checksum_timeout).split()[0]
                if f1_hash.strip() != f2_hash.strip():
                    raise error.TestFail("On disk and on cdrom files are"
                                         " different, md5 mismatch")
                session.cmd(remove_file_cmd)

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'Finish_cdrom_test', login_timeout)

        def clean(self):
            super(test_multihost_copy, self).clean()

    test_type = params.get("test_type", "test_singlehost")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 21

Project: tp-qemu
Source File: qemu_img.py
View license
@error.context_aware
def run(test, params, env):
    """
    'qemu-img' functions test:
    1) Judge what subcommand is going to be tested
    2) Run subcommand test

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    qemu_img_binary = utils_misc.get_qemu_img_binary(params)
    cmd = qemu_img_binary
    if not os.path.exists(cmd):
        raise error.TestError("Binary of 'qemu-img' not found")
    image_format = params["image_format"]
    image_size = params.get("image_size", "10G")
    enable_gluster = params.get("enable_gluster", "no") == "yes"
    image_name = storage.get_image_filename(params, data_dir.get_data_dir())

    def remove(path):
        try:
            os.remove(path)
        except OSError:
            pass

    def _get_image_filename(img_name, enable_gluster=False, img_fmt=None):
        """
        Generate an image path.

        :param image_name: Force name of image.
        :param enable_gluster: Enable gluster or not.
        :param image_format: Format for image.
        """
        if enable_gluster:
            gluster_uri = gluster.create_gluster_uri(params)
            image_filename = "%s%s" % (gluster_uri, img_name)
            if img_fmt:
                image_filename += ".%s" % img_fmt
        else:
            if img_fmt:
                img_name = "%s.%s" % (img_name, img_fmt)
            image_filename = utils_misc.get_path(data_dir.get_data_dir(),
                                                 img_name)
        return image_filename

    def _check(cmd, img):
        """
        Simple 'qemu-img check' function implementation.

        :param cmd: qemu-img base command.
        :param img: image to be checked
        """
        cmd += " check %s" % img
        error.context("Checking image '%s' by command '%s'" % (img, cmd),
                      logging.info)
        try:
            output = utils.system_output(cmd, verbose=False)
        except error.CmdError, err:
            if "does not support checks" in str(err):
                return (True, "")
            else:
                return (False, str(err))
        return (True, output)

    def check_test(cmd):
        """
        Subcommand 'qemu-img check' test.

        This tests will 'dd' to create a specified size file, and check it.
        Then convert it to supported image_format in each loop and check again.

        :param cmd: qemu-img base command.
        """
        test_image = _get_image_filename(params["image_name_dd"],
                                         enable_gluster)
        create_image_cmd = params["create_image_cmd"]
        create_image_cmd = create_image_cmd % test_image
        msg = " Create image %s by command %s" % (test_image, create_image_cmd)
        error.context(msg, logging.info)
        utils.system(create_image_cmd, verbose=False)
        status, output = _check(cmd, test_image)
        if not status:
            raise error.TestFail("Check image '%s' failed with error: %s" %
                                 (test_image, output))
        for fmt in params["supported_image_formats"].split():
            output_image = test_image + ".%s" % fmt
            _convert(cmd, fmt, test_image, output_image)
            status, output = _check(cmd, output_image)
            if not status:
                raise error.TestFail("Check image '%s' got error: %s" %
                                     (output_image, output))
            remove(output_image)
        remove(test_image)

    def _create(cmd, img_name, fmt, img_size=None, base_img=None,
                base_img_fmt=None, encrypted="no",
                preallocated="off", cluster_size=None):
        """
        Simple wrapper of 'qemu-img create'

        :param cmd: qemu-img base command.
        :param img_name: name of the image file
        :param fmt: image format
        :param img_size:  image size
        :param base_img: base image if create a snapshot image
        :param base_img_fmt: base image format if create a snapshot image
        :param encrypted: indicates whether the created image is encrypted
        :param preallocated: if preallocation when create image,
                             allowed values: off, metadata. Default is "off"
        :param cluster_size: the cluster size for the image
        """
        cmd += " create"

        if encrypted == "yes":
            cmd += " -e"
        if base_img:
            cmd += " -b %s" % base_img
            if base_img_fmt:
                cmd += " -F %s" % base_img_fmt

        cmd += " -f %s" % fmt

        options = []
        if preallocated != "off":
            options.append("preallocation=%s" % preallocated)
        if cluster_size is not None:
            options.append("cluster_size=%s" % cluster_size)
        if options:
            cmd += " -o %s" % ",".join(options)

        cmd += " %s" % img_name
        if img_size:
            cmd += " %s" % img_size

        msg = "Creating image %s by command %s" % (img_name, cmd)
        error.context(msg, logging.info)
        utils.system(cmd, verbose=False)
        status, out = _check(qemu_img_binary, img_name)
        if not status:
            raise error.TestFail("Check image '%s' got error: %s" %
                                 (img_name, out))

    def create_test(cmd):
        """
        Subcommand 'qemu-img create' test.

        :param cmd: qemu-img base command.
        """
        image_large = params["image_name_large"]
        device = params.get("device")
        if not device:
            img = _get_image_filename(image_large, enable_gluster,
                                      image_format)
        else:
            img = device
        _create(cmd, img_name=img, fmt=image_format,
                img_size=params["image_size_large"],
                preallocated=params.get("preallocated", "off"),
                cluster_size=params.get("image_cluster_size"))
        remove(img)

    def send_signal(timeout=360):
        """
        send signal "SIGUSR1" to qemu-img without the option -p
        to report progress
        """
        logging.info("Send signal to qemu-img")
        end_time = time.time() + timeout
        while time.time() < end_time:
            time.sleep(1)
            status = utils.system("kill -SIGUSR1 `pidof qemu-img`",
                                  ignore_status=True)
            if status == 0:
                return None
        logging.info("Fail to get pid of qemu-img")

    def check_command_output(CmdResult):
        """
        Check standard error or standard output of command
        : param CmdResult: a list of CmdResult objects
        """
        logging.info("Check result of command")
        check_output = params.get("check_output", "exit_status")
        if not hasattr(CmdResult, check_output):
            raise error.TestError("Unknown check output '%s'" % check_output)
        output = getattr(CmdResult, check_output)
        if check_output == "exit_status" and output == 0:
            return None
        if check_output == "exit_status" and output != 0:
            err_msg = "Get nonzero exit status(%d) '%s'"
            raise error.TestFail(err_msg % (output, CmdResult.command))
        pattern = params.get("command_result_pattern")
        if not re.findall(pattern, output):
            err_msg = "Fail to get expected result!"
            err_msg += "Output: %s, expected pattern: %s" % (output, pattern)
            raise error.TestFail(err_msg)

    def _convert(cmd, output_fmt, img_name, output_filename,
                 fmt=None, compressed="no", encrypted="no"):
        """
        Simple wrapper of 'qemu-img convert' function.

        :param cmd: qemu-img base command.
        :param output_fmt: the output format of converted image
        :param img_name: image name that to be converted
        :param output_filename: output image name that converted
        :param fmt: output image format
        :param compressed: whether output image is compressed
        :param encrypted: whether output image is encrypted
        """
        cmd += " convert"
        if compressed == "yes":
            cmd += " -c"
        if encrypted == "yes":
            cmd += " -e"
        show_progress = params.get("show_progress", "")
        if show_progress == "on":
            cmd += " -p"
        if fmt:
            cmd += " -f %s" % fmt
        cmd += " -O %s" % output_fmt
        options = params.get("qemu_img_options")
        if options:
            options = options.split()
            cmd += " -o "
            for option in options:
                value = params.get(option)
                cmd += "%s=%s," % (option, value)
            cmd = cmd.rstrip(",")
        cmd += " %s %s" % (img_name, output_filename)
        msg = "Converting '%s' from format '%s'" % (img_name, fmt)
        msg += " to '%s'" % output_fmt
        error.context(msg, logging.info)
        if show_progress == "off":
            bg = utils.InterruptedThread(send_signal)
            bg.start()
        check_command_output(utils.run(cmd, ignore_status=True))

    def convert_test(cmd):
        """
        Subcommand 'qemu-img convert' test.

        :param cmd: qemu-img base command.
        """
        dest_img_fmt = params["dest_image_format"]
        output_filename = "%s.converted_%s.%s" % (image_name,
                                                  dest_img_fmt, dest_img_fmt)

        _convert(cmd, dest_img_fmt, image_name, output_filename,
                 image_format, params["compressed"], params["encrypted"])
        orig_img_name = params.get("image_name")
        img_name = "%s.%s.converted_%s" % (orig_img_name,
                                           image_format, dest_img_fmt)
        _boot(img_name, dest_img_fmt)

        if dest_img_fmt == "qcow2":
            status, output = _check(cmd, output_filename)
            if status:
                remove(output_filename)
            else:
                raise error.TestFail("Check image '%s' failed with error: %s" %
                                     (output_filename, output))
        else:
            remove(output_filename)

    def _info(cmd, img, sub_info=None, fmt=None):
        """
        Simple wrapper of 'qemu-img info'.

        :param cmd: qemu-img base command.
        :param img: image file
        :param sub_info: sub info, say 'backing file'
        :param fmt: image format
        """
        cmd += " info"
        if fmt:
            cmd += " -f %s" % fmt
        cmd += " %s" % img

        try:
            output = utils.system_output(cmd)
        except error.CmdError, err:
            logging.error("Get info of image '%s' failed: %s", img, str(err))
            return None

        if not sub_info:
            return output

        sub_info += ": (.*)"
        matches = re.findall(sub_info, output)
        if "virtual size" in sub_info:
            p = re.compile(r'\.0*(G|K)$')
            return p.sub(r'\1', matches[0].split()[0])
        if matches:
            return matches[0]
        return None

    def info_test(cmd):
        """
        Subcommand 'qemu-img info' test.

        :param cmd: qemu-img base command.
        """
        img_info = _info(cmd, image_name)
        logging.info("Info of image '%s':\n%s", image_name, img_info)
        if image_format not in img_info:
            raise error.TestFail("Got unexpected format of image '%s'"
                                 " in info test" % image_name)
        if image_size not in img_info:
            raise error.TestFail("Got unexpected size of image '%s'"
                                 " in info test" % image_name)

    def snapshot_test(cmd):
        """
        Subcommand 'qemu-img snapshot' test.

        :param cmd: qemu-img base command.
        """
        cmd += " snapshot"
        for i in range(2):
            crtcmd = cmd
            sn_name = "snapshot%d" % i
            crtcmd += " -c %s %s" % (sn_name, image_name)
            msg = "Created snapshot '%s' in '%s' by command %s" % (sn_name,
                                                                   image_name,
                                                                   crtcmd)
            error.context(msg, logging.info)
            status, output = commands.getstatusoutput(crtcmd)
            if status != 0:
                raise error.TestFail("Create snapshot failed via command: %s;"
                                     "Output is: %s" % (crtcmd, output))
        listcmd = cmd
        listcmd += " -l %s" % image_name
        status, out = commands.getstatusoutput(listcmd)
        if not ("snapshot0" in out and "snapshot1" in out and status == 0):
            raise error.TestFail("Snapshot created failed or missed;"
                                 "snapshot list is: \n%s" % out)
        for i in range(2):
            sn_name = "snapshot%d" % i
            delcmd = cmd
            delcmd += " -d %s %s" % (sn_name, image_name)
            msg = "Delete snapshot '%s' by command %s" % (sn_name, delcmd)
            error.context(msg, logging.info)
            status, output = commands.getstatusoutput(delcmd)
            if status != 0:
                raise error.TestFail("Delete snapshot '%s' failed: %s" %
                                     (sn_name, output))

    def commit_test(cmd):
        """
        Subcommand 'qemu-img commit' test.
        1) Create a overlay file of the qemu harddisk specified by image_name.
        2) Start a VM using the overlay file as its harddisk.
        3) Touch a file "commit_testfile" in the overlay file, and shutdown the
           VM.
        4) Commit the change to the backing harddisk by executing
           "qemu-img commit" command.
        5) Start the VM using the backing harddisk.
        6) Check if the file "commit_testfile" exists.

        :param cmd: qemu-img base command.
        """

        logging.info("Commit testing started!")
        image_name = storage.get_image_filename(params,
                                                data_dir.get_data_dir())
        pre_name = '.'.join(image_name.split('.')[:-1])
        image_format = params.get("image_format", "qcow2")
        overlay_file_name = "%s_overlay.%s" % (pre_name, image_format)
        file_create_cmd = params.get("file_create_cmd",
                                     "touch /commit_testfile")
        file_info_cmd = params.get("file_info_cmd",
                                   "ls / | grep commit_testfile")
        file_exist_chk_cmd = params.get("file_exist_chk_cmd",
                                        "[ -e /commit_testfile ] && echo $?")
        file_del_cmd = params.get("file_del_cmd",
                                  "rm -f /commit_testfile")
        try:
            # Remove the existing overlay file
            if os.path.isfile(overlay_file_name):
                remove(overlay_file_name)

            # Create the new overlay file
            create_cmd = "%s create -b %s -f %s %s" % (cmd, image_name,
                                                       image_format,
                                                       overlay_file_name)
            msg = "Create overlay file by command: %s" % create_cmd
            error.context(msg, logging.info)
            try:
                utils.system(create_cmd, verbose=False)
            except error.CmdError:
                raise error.TestFail("Could not create a overlay file!")
            logging.info("overlay file (%s) created!" % overlay_file_name)

            # Set the qemu harddisk to the overlay file
            logging.info(
                "Original image_name is: %s", params.get('image_name'))
            params['image_name'] = '.'.join(overlay_file_name.split('.')[:-1])
            logging.info("Param image_name changed to: %s",
                         params.get('image_name'))

            msg = "Start a new VM, using overlay file as its harddisk"
            error.context(msg, logging.info)
            vm_name = params['main_vm']
            env_process.preprocess_vm(test, params, env, vm_name)
            vm = env.get_vm(vm_name)
            vm.verify_alive()
            timeout = int(params.get("login_timeout", 360))
            session = vm.wait_for_login(timeout=timeout)

            # Do some changes to the overlay_file harddisk
            try:
                output = session.cmd(file_create_cmd)
                logging.info("Output of %s: %s", file_create_cmd, output)
                output = session.cmd(file_info_cmd)
                logging.info("Output of %s: %s", file_info_cmd, output)
            except Exception, err:
                raise error.TestFail("Could not create commit_testfile in the "
                                     "overlay file %s" % err)
            vm.destroy()

            # Execute the commit command
            cmitcmd = "%s commit -f %s %s" % (cmd, image_format,
                                              overlay_file_name)
            error.context("Committing image by command %s" % cmitcmd,
                          logging.info)
            try:
                utils.system(cmitcmd, verbose=False)
            except error.CmdError:
                raise error.TestFail("Could not commit the overlay file")
            logging.info("overlay file (%s) committed!" % overlay_file_name)

            msg = "Start a new VM, using image_name as its harddisk"
            error.context(msg, logging.info)
            params['image_name'] = pre_name
            vm_name = params['main_vm']
            env_process.preprocess_vm(test, params, env, vm_name)
            vm = env.get_vm(vm_name)
            vm.verify_alive()
            timeout = int(params.get("login_timeout", 360))
            session = vm.wait_for_login(timeout=timeout)
            try:
                output = session.cmd(file_exist_chk_cmd)
                logging.info("Output of %s: %s", file_exist_chk_cmd, output)
                session.cmd(file_del_cmd)
            except Exception:
                raise error.TestFail("Could not find commit_testfile after a "
                                     "commit")
            vm.destroy()

        finally:
            # Remove the overlay file
            if os.path.isfile(overlay_file_name):
                remove(overlay_file_name)

    def _rebase(cmd, img_name, base_img, backing_fmt, mode="unsafe"):
        """
        Simple wrapper of 'qemu-img rebase'.

        :param cmd: qemu-img base command.
        :param img_name: image name to be rebased
        :param base_img: indicates the base image
        :param backing_fmt: the format of base image
        :param mode: rebase mode: safe mode, unsafe mode
        """
        cmd += " rebase"
        if mode == "unsafe":
            cmd += " -u"
        cmd += " -b %s -F %s %s" % (base_img, backing_fmt, img_name)
        msg = "Trying to rebase '%s' to '%s' by command %s" % (img_name,
                                                               base_img, cmd)
        error.context(msg, logging.info)
        status, output = commands.getstatusoutput(cmd)
        if status != 0:
            raise error.TestError("Failed to rebase '%s' to '%s': %s" %
                                  (img_name, base_img, output))

    def rebase_test(cmd):
        """
        Subcommand 'qemu-img rebase' test

        Change the backing file of a snapshot image in "unsafe mode":
        Assume the previous backing file had missed and we just have to change
        reference of snapshot to new one. After change the backing file of a
        snapshot image in unsafe mode, the snapshot should work still.

        :param cmd: qemu-img base command.
        """
        if 'rebase' not in utils.system_output(cmd + ' --help',
                                               ignore_status=True):
            raise error.TestNAError("Current kvm user space version does not"
                                    " support 'rebase' subcommand")
        sn_fmt = params.get("snapshot_format", "qcow2")
        sn1 = params["image_name_snapshot1"]
        sn1 = _get_image_filename(sn1, enable_gluster, sn_fmt)
        base_img = storage.get_image_filename(params, data_dir.get_data_dir())
        _create(cmd, sn1, sn_fmt, base_img=base_img, base_img_fmt=image_format)

        # Create snapshot2 based on snapshot1
        sn2 = params["image_name_snapshot2"]
        sn2 = _get_image_filename(sn2, enable_gluster, sn_fmt)
        _create(cmd, sn2, sn_fmt, base_img=sn1, base_img_fmt=sn_fmt)

        rebase_mode = params.get("rebase_mode")
        if rebase_mode == "unsafe":
            remove(sn1)

        _rebase(cmd, sn2, base_img, image_format, mode=rebase_mode)
        # Boot snapshot image after rebase
        img_format = sn2.split('.')[-1]
        img_name = ".".join(sn2.split('.')[:-1])
        _boot(img_name, img_format)

        # Check sn2's format and backing_file
        actual_base_img = _info(cmd, sn2, "backing file")
        base_img_name = os.path.basename(base_img)
        if base_img_name not in actual_base_img:
            raise error.TestFail("After rebase the backing_file of 'sn2' is "
                                 "'%s' which is not expected as '%s'"
                                 % (actual_base_img, base_img_name))
        status, output = _check(cmd, sn2)
        if not status:
            raise error.TestFail("Check image '%s' failed after rebase;"
                                 "got error: %s" % (sn2, output))
        remove(sn2)
        remove(sn1)

    def _amend(cmd, img_name, img_fmt, options):
        """
        Simple wrapper of 'qemu-img amend'.

        :param cmd: qemu-img base command
        :param img_name: image name that should be amended
        :param img_fmt: image format
        :param options: a comma separated list of format specific options
        """

        msg = "Amend '%s' with options '%s'" % (img_name, options)
        cmd += " amend"
        if img_fmt:
            cmd += " -f %s" % img_fmt
        cache = params.get("cache_mode", '')
        if cache:
            cmd += " -t %s" % cache
        if options:
            cmd += " -o "
            for option in options:
                cmd += "%s=%s," % (option, params.get(option))
            cmd = cmd.rstrip(',')
        cmd += " %s" % img_name
        error.context(msg, logging.info)
        check_command_output(utils.run(cmd, ignore_status=True))

    def amend_test(cmd):
        """
        Subcommand 'qemu-img amend' test
        Amend the image format specific options for the image file

        :param cmd: qemu-img base command.
        """
        img_name = params.get("image_name_stg")
        img_fmt = params.get("image_format_stg", "qcow2")
        options = params.get("qemu_img_options", "").split()
        check_output = params.get("check_output", "exit_status")
        img = _get_image_filename(img_name, img_fmt=img_fmt)
        _amend(cmd, img, img_fmt, options)
        if check_output == "exit_status":
            for option in options:
                expect = params.get(option)
                if option == "size":
                    option = "virtual size"
                actual = _info(cmd, img, option)
                if actual is not None and actual != expect:
                    msg = "Get wrong %s from image %s!" % (option, img_name)
                    msg += "Expect: %s, actual: %s" % (expect, actual)
                    raise error.TestFail(msg)
        status, output = _check(cmd, img)
        if not status:
            raise error.TestFail("Check image '%s' failed after rebase;"
                                 "got error: %s" % (img, output))

    def _boot(img_name, img_fmt):
        """
        Boot test:
        1) Login guest
        2) Run dd in rhel guest
        3) Shutdown guest

        :param img_name: image name
        :param img_fmt: image format
        """
        params['image_name'] = img_name
        params['image_format'] = img_fmt
        image_name = "%s.%s" % (img_name, img_fmt)
        msg = "Try to boot vm with image %s" % image_name
        error.context(msg, logging.info)
        vm_name = params.get("main_vm")
        dd_timeout = int(params.get("dd_timeout", 60))
        params['vms'] = vm_name
        env_process.preprocess_vm(test, params, env, vm_name)
        vm = env.get_vm(params.get("main_vm"))
        vm.verify_alive()
        login_timeout = int(params.get("login_timeout", 360))
        session = vm.wait_for_login(timeout=login_timeout)

        # Run dd in linux guest
        if params.get("os_type") == 'linux':
            cmd = "dd if=/dev/zero of=/mnt/test bs=1000 count=1000"
            status = session.get_command_status(cmd, timeout=dd_timeout)
            if status != 0:
                raise error.TestError("dd failed")

        error.context("Shutdown guest", logging.info)
        try:
            vm.graceful_shutdown(timeout=login_timeout)
        except Exception:
            image_filename = _get_image_filename(img_name,
                                                 enable_gluster,
                                                 img_fmt)
            backup_img_chain(image_filename)
            raise
        finally:
            vm.destroy(gracefully=True)
            utils.system("sync")

    def backup_img_chain(image_file):
        """
        Backup whole image in a image chain;
        """
        mount_point = tempfile.mkdtemp(dir=test.resultsdir)
        qemu_img = utils_misc.get_qemu_img_binary(params)
        if enable_gluster:
            g_uri = gluster.create_gluster_uri(params)
            gluster.glusterfs_mount(g_uri, mount_point)
            image_name = os.path.basename(image_file)
            image_file = os.path.join(mount_point, image_name)
        logging.warn("backup %s to %s" % (image_file, test.resultsdir))
        shutil.copy(image_file, test.resultsdir)
        backing_file = _info(qemu_img, image_file, "backing file", None)
        if backing_file:
            backup_img_chain(backing_file)
        elif enable_gluster:
            utils_misc.umount(g_uri, mount_point,
                              "glusterfs", False,
                              "fuse.glusterfs")
            shutil.rmtree(mount_point)
        return None

    # Here starts test
    subcommand = params["subcommand"]
    error.context("Running %s_test(cmd)" % subcommand, logging.info)
    eval("%s_test(cmd)" % subcommand)

Example 22

Project: PyClassLessons
Source File: download.py
View license
def unpack_http_url(link, location, download_cache, download_dir=None,
                    session=None):
    if session is None:
        session = PipSession()

    temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
    temp_location = None
    target_url = link.url.split('#', 1)[0]
    already_cached = False
    cache_file = None
    cache_content_type_file = None
    download_hash = None

    # If a download cache is specified, is the file cached there?
    if download_cache:
        cache_file = os.path.join(download_cache,
                                   urllib.quote(target_url, ''))
        cache_content_type_file = cache_file + '.content-type'
        already_cached = (
            os.path.exists(cache_file) and
            os.path.exists(cache_content_type_file)
            )
        if not os.path.isdir(download_cache):
            create_download_cache_folder(download_cache)

    # If a download dir is specified, is the file already downloaded there?
    already_downloaded = None
    if download_dir:
        already_downloaded = os.path.join(download_dir, link.filename)
        if not os.path.exists(already_downloaded):
            already_downloaded = None

    # If already downloaded, does it's hash match?
    if already_downloaded:
        temp_location = already_downloaded
        content_type = mimetypes.guess_type(already_downloaded)[0]
        logger.notify('File was already downloaded %s' % already_downloaded)
        if link.hash:
            download_hash = _get_hash_from_file(temp_location, link)
            try:
                _check_hash(download_hash, link)
            except HashMismatch:
                logger.warn(
                    'Previously-downloaded file %s has bad hash, '
                    're-downloading.' % temp_location
                    )
                temp_location = None
                os.unlink(already_downloaded)
                already_downloaded = None

    # If not a valid download, let's confirm the cached file is valid
    if already_cached and not temp_location:
        with open(cache_content_type_file) as fp:
            content_type = fp.read().strip()
        temp_location = cache_file
        logger.notify('Using download cache from %s' % cache_file)
        if link.hash and link.hash_name:
            download_hash = _get_hash_from_file(cache_file, link)
            try:
                _check_hash(download_hash, link)
            except HashMismatch:
                logger.warn(
                    'Cached file %s has bad hash, '
                    're-downloading.' % temp_location
                    )
                temp_location = None
                os.unlink(cache_file)
                os.unlink(cache_content_type_file)
                already_cached = False

    # We don't have either a cached or a downloaded copy
    # let's download to a tmp dir
    if not temp_location:
        try:
            resp = session.get(target_url, stream=True)
            resp.raise_for_status()
        except requests.HTTPError as exc:
            logger.fatal("HTTP error %s while getting %s" %
                         (exc.response.status_code, link))
            raise

        content_type = resp.headers.get('content-type', '')
        filename = link.filename  # fallback
        # Have a look at the Content-Disposition header for a better guess
        content_disposition = resp.headers.get('content-disposition')
        if content_disposition:
            type, params = cgi.parse_header(content_disposition)
            # We use ``or`` here because we don't want to use an "empty" value
            # from the filename param.
            filename = params.get('filename') or filename
        ext = splitext(filename)[1]
        if not ext:
            ext = mimetypes.guess_extension(content_type)
            if ext:
                filename += ext
        if not ext and link.url != resp.url:
            ext = os.path.splitext(resp.url)[1]
            if ext:
                filename += ext
        temp_location = os.path.join(temp_dir, filename)
        download_hash = _download_url(resp, link, temp_location)
        if link.hash and link.hash_name:
            _check_hash(download_hash, link)

    # a download dir is specified; let's copy the archive there
    if download_dir and not already_downloaded:
        _copy_file(temp_location, download_dir, content_type, link)

    # unpack the archive to the build dir location. even when only downloading
    # archives, they have to be unpacked to parse dependencies
    unpack_file(temp_location, location, content_type, link)

    # if using a download cache, cache it, if needed
    if cache_file and not already_cached:
        cache_download(cache_file, temp_location, content_type)

    if not (already_cached or already_downloaded):
        os.unlink(temp_location)

    os.rmdir(temp_dir)

Example 23

Project: PyClassLessons
Source File: wheel.py
View license
    def install(self, paths, maker, **kwargs):
        """
        Install a wheel to the specified paths. If kwarg ``warner`` is
        specified, it should be a callable, which will be called with two
        tuples indicating the wheel version of this software and the wheel
        version in the file, if there is a discrepancy in the versions.
        This can be used to issue any warnings to raise any exceptions.
        If kwarg ``lib_only`` is True, only the purelib/platlib files are
        installed, and the headers, scripts, data and dist-info metadata are
        not written.

        The return value is a :class:`InstalledDistribution` instance unless
        ``options.lib_only`` is True, in which case the return value is ``None``.
        """

        dry_run = maker.dry_run
        warner = kwargs.get('warner')
        lib_only = kwargs.get('lib_only', False)

        pathname = os.path.join(self.dirname, self.filename)
        name_ver = '%s-%s' % (self.name, self.version)
        data_dir = '%s.data' % name_ver
        info_dir = '%s.dist-info' % name_ver

        metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
        wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
        record_name = posixpath.join(info_dir, 'RECORD')

        wrapper = codecs.getreader('utf-8')

        with ZipFile(pathname, 'r') as zf:
            with zf.open(wheel_metadata_name) as bwf:
                wf = wrapper(bwf)
                message = message_from_file(wf)
            wv = message['Wheel-Version'].split('.', 1)
            file_version = tuple([int(i) for i in wv])
            if (file_version != self.wheel_version) and warner:
                warner(self.wheel_version, file_version)

            if message['Root-Is-Purelib'] == 'true':
                libdir = paths['purelib']
            else:
                libdir = paths['platlib']

            records = {}
            with zf.open(record_name) as bf:
                with CSVReader(stream=bf) as reader:
                    for row in reader:
                        p = row[0]
                        records[p] = row

            data_pfx = posixpath.join(data_dir, '')
            info_pfx = posixpath.join(info_dir, '')
            script_pfx = posixpath.join(data_dir, 'scripts', '')

            # make a new instance rather than a copy of maker's,
            # as we mutate it
            fileop = FileOperator(dry_run=dry_run)
            fileop.record = True    # so we can rollback if needed

            bc = not sys.dont_write_bytecode    # Double negatives. Lovely!

            outfiles = []   # for RECORD writing

            # for script copying/shebang processing
            workdir = tempfile.mkdtemp()
            # set target dir later
            # we default add_launchers to False, as the
            # Python Launcher should be used instead
            maker.source_dir = workdir
            maker.target_dir = None
            try:
                for zinfo in zf.infolist():
                    arcname = zinfo.filename
                    if isinstance(arcname, text_type):
                        u_arcname = arcname
                    else:
                        u_arcname = arcname.decode('utf-8')
                    # The signature file won't be in RECORD,
                    # and we  don't currently don't do anything with it
                    if u_arcname.endswith('/RECORD.jws'):
                        continue
                    row = records[u_arcname]
                    if row[2] and str(zinfo.file_size) != row[2]:
                        raise DistlibException('size mismatch for '
                                               '%s' % u_arcname)
                    if row[1]:
                        kind, value = row[1].split('=', 1)
                        with zf.open(arcname) as bf:
                            data = bf.read()
                        _, digest = self.get_hash(data, kind)
                        if digest != value:
                            raise DistlibException('digest mismatch for '
                                                   '%s' % arcname)

                    if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
                        logger.debug('lib_only: skipping %s', u_arcname)
                        continue
                    is_script = (u_arcname.startswith(script_pfx)
                                 and not u_arcname.endswith('.exe'))

                    if u_arcname.startswith(data_pfx):
                        _, where, rp = u_arcname.split('/', 2)
                        outfile = os.path.join(paths[where], convert_path(rp))
                    else:
                        # meant for site-packages.
                        if u_arcname in (wheel_metadata_name, record_name):
                            continue
                        outfile = os.path.join(libdir, convert_path(u_arcname))
                    if not is_script:
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, outfile)
                        outfiles.append(outfile)
                        # Double check the digest of the written file
                        if not dry_run and row[1]:
                            with open(outfile, 'rb') as bf:
                                data = bf.read()
                                _, newdigest = self.get_hash(data, kind)
                                if newdigest != digest:
                                    raise DistlibException('digest mismatch '
                                                           'on write for '
                                                           '%s' % outfile)
                        if bc and outfile.endswith('.py'):
                            try:
                                pyc = fileop.byte_compile(outfile)
                                outfiles.append(pyc)
                            except Exception:
                                # Don't give up if byte-compilation fails,
                                # but log it and perhaps warn the user
                                logger.warning('Byte-compilation failed',
                                               exc_info=True)
                    else:
                        fn = os.path.basename(convert_path(arcname))
                        workname = os.path.join(workdir, fn)
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, workname)

                        dn, fn = os.path.split(outfile)
                        maker.target_dir = dn
                        filenames = maker.make(fn)
                        fileop.set_executable_mode(filenames)
                        outfiles.extend(filenames)

                if lib_only:
                    logger.debug('lib_only: returning None')
                    dist = None
                else:
                    # Generate scripts

                    # Try to get pydist.json so we can see if there are
                    # any commands to generate. If this fails (e.g. because
                    # of a legacy wheel), log a warning but don't give up.
                    commands = None
                    file_version = self.info['Wheel-Version']
                    if file_version == '1.0':
                        # Use legacy info
                        ep = posixpath.join(info_dir, 'entry_points.txt')
                        try:
                            with zf.open(ep) as bwf:
                                epdata = read_exports(bwf)
                            commands = {}
                            for key in ('console', 'gui'):
                                k = '%s_scripts' % key
                                if k in epdata:
                                    commands['wrap_%s' % key] = d = {}
                                    for v in epdata[k].values():
                                        s = '%s:%s' % (v.prefix, v.suffix)
                                        if v.flags:
                                            s += ' %s' % v.flags
                                        d[v.name] = s
                        except Exception:
                            logger.warning('Unable to read legacy script '
                                           'metadata, so cannot generate '
                                           'scripts')
                    else:
                        try:
                            with zf.open(metadata_name) as bwf:
                                wf = wrapper(bwf)
                                commands = json.load(wf).get('commands')
                        except Exception:
                            logger.warning('Unable to read JSON metadata, so '
                                           'cannot generate scripts')
                    if commands:
                        console_scripts = commands.get('wrap_console', {})
                        gui_scripts = commands.get('wrap_gui', {})
                        if console_scripts or gui_scripts:
                            script_dir = paths.get('scripts', '')
                            if not os.path.isdir(script_dir):
                                raise ValueError('Valid script path not '
                                                 'specified')
                            maker.target_dir = script_dir
                            for k, v in console_scripts.items():
                                script = '%s = %s' % (k, v)
                                filenames = maker.make(script)
                                fileop.set_executable_mode(filenames)

                            if gui_scripts:
                                options = {'gui': True }
                                for k, v in gui_scripts.items():
                                    script = '%s = %s' % (k, v)
                                    filenames = maker.make(script, options)
                                    fileop.set_executable_mode(filenames)

                    p = os.path.join(libdir, info_dir)
                    dist = InstalledDistribution(p)

                    # Write SHARED
                    paths = dict(paths)     # don't change passed in dict
                    del paths['purelib']
                    del paths['platlib']
                    paths['lib'] = libdir
                    p = dist.write_shared_locations(paths, dry_run)
                    if p:
                        outfiles.append(p)

                    # Write RECORD
                    dist.write_installed_files(outfiles, paths['prefix'],
                                               dry_run)
                return dist
            except Exception:  # pragma: no cover
                logger.exception('installation failed.')
                fileop.rollback()
                raise
            finally:
                shutil.rmtree(workdir)

Example 24

View license
def getPmPerceptualError(mesh, pm_filebuf, mipmap_tarfilebuf):
    perceptualdiff = which('perceptualdiff')
    if perceptualdiff is None:
        raise Exception("perceptualdiff exectuable not found on path")
    
    pm_chunks = []
    
    if pm_filebuf is not None:
        data = pm_filebuf.read(PM_CHUNK_SIZE)
        refinements_read = 0
        num_refinements = None
        while len(data) > 0:
            (refinements_read, num_refinements, pm_refinements, data_left) = pdae_utils.readPDAEPartial(data, refinements_read, num_refinements)
            pm_chunks.append(pm_refinements)
            data = data_left + pm_filebuf.read(PM_CHUNK_SIZE)
    
    tar = tarfile.TarFile(fileobj=mipmap_tarfilebuf)
    texsizes = []
    largest_tarinfo = (0, None)
    for tarinfo in tar:
        tarinfo.xsize = int(tarinfo.name.split('x')[0])
        if tarinfo.xsize > largest_tarinfo[0]:
            largest_tarinfo = (tarinfo.xsize, tarinfo)
        if tarinfo.xsize >= 128:
            texsizes.append(tarinfo)
    if len(texsizes) == 0:
        texsizes.append(largest_tarinfo[1])
    
    texsizes = sorted(texsizes, key=lambda t: t.xsize)
    texims = []
    first_image_data = None
    for tarinfo in texsizes:
        f = tar.extractfile(tarinfo)
        texdata = f.read()
        if first_image_data is None:
            first_image_data = texdata
        
        texpnm = PNMImage()
        texpnm.read(StringStream(texdata), 'something.jpg')
        newtex = Texture()
        newtex.load(texpnm)
        texims.append(newtex)
    
    mesh.images[0].setData(first_image_data)
    
    scene_members = getSceneMembers(mesh)
    
    # turn off panda3d printing to stdout
    nout = MultiplexStream()
    Notify.ptr().setOstreamPtr(nout, 0)
    nout.addFile(Filename(os.devnull))
    
    base = ShowBase()
    
    rotateNode = GeomNode("rotater")
    rotatePath = base.render.attachNewNode(rotateNode)
    matrix = numpy.identity(4)
    if mesh.assetInfo.upaxis == collada.asset.UP_AXIS.X_UP:
        r = collada.scene.RotateTransform(0,1,0,90)
        matrix = r.matrix
    elif mesh.assetInfo.upaxis == collada.asset.UP_AXIS.Y_UP:
        r = collada.scene.RotateTransform(1,0,0,90)
        matrix = r.matrix
    rotatePath.setMat(Mat4(*matrix.T.flatten().tolist()))
    geom, renderstate, mat4 = scene_members[0]
    node = GeomNode("primitive")
    node.addGeom(geom)
    if renderstate is not None:
        node.setGeomState(0, renderstate)
    geomPath = rotatePath.attachNewNode(node)
    geomPath.setMat(mat4)
        
    wrappedNode = ensureCameraAt(geomPath, base.camera)
    base.disableMouse()
    attachLights(base.render)
    base.render.setShaderAuto()
    base.render.setTransparency(TransparencyAttrib.MNone)
    base.render.setColorScaleOff(9999)
    
    controls.KeyboardMovement()
    controls.MouseDrag(wrappedNode)
    controls.MouseScaleZoom(wrappedNode)
    controls.ButtonUtils(wrappedNode)
    controls.MouseCamera()
    
    error_data = []
    
    try:
        tempdir = tempfile.mkdtemp(prefix='meshtool-print-pm-perceptual-error')
        
        triangleCounts = []
        
        hprs = [(0, 0, 0),
                (0, 90, 0),
                (0, 180, 0),
                (0, 270, 0),
                (90, 0, 0),
                (-90, 0, 0)]
        
        for texim in texims:
            np = base.render.find("**/rotater/collada")
            np.setTextureOff(1)
            np.setTexture(texim, 1)
            for angle, hpr in enumerate(hprs):
                wrappedNode.setHpr(*hpr)
                takeScreenshot(tempdir, base, geomPath, texim, angle)
        triangleCounts.append(getNumTriangles(geomPath))
        
        for pm_chunk in pm_chunks:
            pdae_panda.add_refinements(geomPath, pm_chunk)
            
            for texim in texims:
                np = base.render.find("**/rotater/collada")
                np.setTextureOff(1)
                np.setTexture(texim, 1)
                for angle, hpr in enumerate(hprs):
                    wrappedNode.setHpr(*hpr)
                    takeScreenshot(tempdir, base, geomPath, texim, angle)
            triangleCounts.append(getNumTriangles(geomPath))
        
        full_tris = triangleCounts[-1]
        full_tex = texims[-1]
        
        for numtris in triangleCounts:
            for texim in texims:
                pixel_diff = 0
                for angle, hpr in enumerate(hprs):
                    curFile = '%d_%d_%d_%d.png' % (numtris, texim.getXSize(), texim.getYSize(), angle)
                    curFile = os.path.join(tempdir, curFile)
                    
                    fullFile = '%d_%d_%d_%d.png' % (full_tris, full_tex.getXSize(), full_tex.getYSize(), angle)
                    fullFile = os.path.join(tempdir, fullFile)
                    
                    try:
                        output = subprocess.check_output([perceptualdiff, '-threshold', '1', fullFile, curFile])
                    except subprocess.CalledProcessError, ex:
                        output = ex.output
                    
                    output = output.strip()
                    if len(output) > 0:
                        pixel_diff = max(pixel_diff, int(output.split('\n')[1].split()[0]))
                    
                error_data.append({'triangles': numtris,
                                   'width': texim.getXSize(),
                                   'height': texim.getYSize(),
                                   'pixel_error': pixel_diff})
    
    finally:
        shutil.rmtree(tempdir, ignore_errors=True)
        
    return error_data

Example 25

Project: pyomo
Source File: instance_factory.py
View license
def _extract_pathspec(
        pathspec,
        default_basename,
        archives=None):
    """Obtain a file location from a pathspec.

    Extracts a file location from the provided input
    path specification by normalizing the name or by
    opening an archive reader.

    Args:
        pathspec (str): The path specification. This can
            be a standard path to a file or represent a
            file contained within an archive. In the
            case of an archived file, the input string
            consist of two parts separated by a comma,
            where the first part represents the path to
            the archive and the second part represents
            the relative path to a file or directory
            within that archive.
        default_basename (str): The default filename to
            search for when the pathspec represents a
            directory (or a directory within an
            archive). This name must have an extension,
            which is used by this function to interpret
            whether the pathspec ends in a filename or a
            directory name. If this argument is None, the
            function will attempt to extract a directory
            name instead of a file.
        archives (list): A list of currently open
            archive readers to check before opening a
            new archive. If a new archive is opened, it will
            be appended to this list.

    Returns:
        A tuple consisting of the normalized absolute
        path to the file followed by the current list of
        open archives that can be passed into this function
        the next time it is called.
    """

    logger.debug("expanding pathspec %s to %s"
                 % (pathspec, os.path.expanduser(pathspec)))
    pathspec = os.path.expanduser(pathspec)

    if archives is None:
        archives = []

    filename = None
    normalized_location = None
    archive = None
    archive_subdir = None
    unarchived_dir = None
    basename = None

    if not os.path.exists(pathspec):
        logger.debug("pathspec does not exist, normalizing name")
        (normalized_location, _, archive_subdir) = \
            ArchiveReader.normalize_name(pathspec).rpartition(',')
        if default_basename is not None:
            extension = os.path.splitext(default_basename)[1].strip()
            assert extension != ''
            if archive_subdir.endswith(extension):
                logger.debug("recognized extension type '%s' appears "
                             "after comma, treating as file" % (extension))
                basename = os.path.basename(archive_subdir)
                archive_subdir = os.path.dirname(archive_subdir).strip()
        if archive_subdir == '':
            archive_subdir = None
    else:
        logger.debug("pathspec exists, normalizing name")
        normalized_location = \
            ArchiveReader.normalize_name(pathspec)

    logger.debug("normalized pathspec: (%s, %s, %s)"
                 % (normalized_location, archive_subdir, basename))
    if ArchiveReader.isArchivedFile(normalized_location):
        logger.debug("pathspec defines a recognized archive type")
        for prev_archive_inputs, prev_archive, prev_unarchived_dir \
              in archives:
            if (normalized_location == \
                prev_archive_inputs[0]) and \
                ((prev_archive_inputs[1] is None) or \
                 ((archive_subdir is not None) and \
                  (archive_subdir.startswith(prev_archive_inputs[1]+'/')))):
                logger.debug("pathspec matches previous archive")
                unarchived_dir = prev_unarchived_dir
                if archive_subdir is not None:
                    if prev_archive_inputs[1] is not None:
                        unarchived_dir = posixpath.join(
                            unarchived_dir,
                            os.path.relpath(archive_subdir,
                                            start=prev_archive_inputs[1]))
                    else:
                        unarchived_dir = posixpath.join(unarchived_dir,
                                                        archive_subdir)
                logger.debug("unarchived directory: %s" % (unarchived_dir))
                break
        else: # if no break occurs in previous for-loop
            archive = ArchiveReaderFactory(
                normalized_location,
                subdir=archive_subdir)
            unarchived_dir = archive.normalize_name(
                tempfile.mkdtemp(prefix='pysp_unarchived'))
            archives.append(((normalized_location, archive_subdir),
                             archive,
                             unarchived_dir))
            logger.debug("New archive opened. Temporary archive "
                         "extraction directory: %s" % (unarchived_dir))
            archive.extractall(path=unarchived_dir)
        if basename is not None:
            filename = posixpath.join(unarchived_dir, basename)
        elif default_basename is not None:
            filename = posixpath.join(unarchived_dir, default_basename)
        else:
            filename = unarchived_dir
        logger.debug("extracted filename: %s" % (filename))
    else:
        logger.debug("pathspec defines a standard path")
        if archive_subdir is not None:
            unarchived_dir = posixpath.join(normalized_location,
                                            archive_subdir)
        else:
            unarchived_dir = normalized_location

        if not os.path.isfile(unarchived_dir):
            if basename is not None:
                filename = posixpath.join(unarchived_dir, basename)
            elif default_basename is not None:
                filename = posixpath.join(unarchived_dir, default_basename)
            else:
                filename = unarchived_dir
        else:
            filename = unarchived_dir

    return filename, archives

Example 26

Project: pip
Source File: install.py
View license
    def run(self, options, args):
        cmdoptions.resolve_wheel_no_use_binary(options)
        cmdoptions.check_install_build_global(options)

        if options.as_egg:
            warnings.warn(
                "--egg has been deprecated and will be removed in the future. "
                "This flag is mutually exclusive with large parts of pip, and "
                "actually using it invalidates pip's ability to manage the "
                "installation process.",
                RemovedInPip10Warning,
            )

        if options.allow_external:
            warnings.warn(
                "--allow-external has been deprecated and will be removed in "
                "the future. Due to changes in the repository protocol, it no "
                "longer has any effect.",
                RemovedInPip10Warning,
            )

        if options.allow_all_external:
            warnings.warn(
                "--allow-all-external has been deprecated and will be removed "
                "in the future. Due to changes in the repository protocol, it "
                "no longer has any effect.",
                RemovedInPip10Warning,
            )

        if options.allow_unverified:
            warnings.warn(
                "--allow-unverified has been deprecated and will be removed "
                "in the future. Due to changes in the repository protocol, it "
                "no longer has any effect.",
                RemovedInPip10Warning,
            )

        if options.download_dir:
            warnings.warn(
                "pip install --download has been deprecated and will be "
                "removed in the future. Pip now has a download command that "
                "should be used instead.",
                RemovedInPip10Warning,
            )
            options.ignore_installed = True

        if options.build_dir:
            options.build_dir = os.path.abspath(options.build_dir)

        options.src_dir = os.path.abspath(options.src_dir)
        install_options = options.install_options or []
        if options.use_user_site:
            if options.prefix_path:
                raise CommandError(
                    "Can not combine '--user' and '--prefix' as they imply "
                    "different installation locations"
                )
            if virtualenv_no_global():
                raise InstallationError(
                    "Can not perform a '--user' install. User site-packages "
                    "are not visible in this virtualenv."
                )
            install_options.append('--user')
            install_options.append('--prefix=')

        temp_target_dir = None
        if options.target_dir:
            options.ignore_installed = True
            temp_target_dir = tempfile.mkdtemp()
            options.target_dir = os.path.abspath(options.target_dir)
            if (os.path.exists(options.target_dir) and not
                    os.path.isdir(options.target_dir)):
                raise CommandError(
                    "Target path exists but is not a directory, will not "
                    "continue."
                )
            install_options.append('--home=' + temp_target_dir)

        global_options = options.global_options or []

        with self._build_session(options) as session:

            finder = self._build_package_finder(options, session)
            build_delete = (not (options.no_clean or options.build_dir))
            wheel_cache = WheelCache(options.cache_dir, options.format_control)
            if options.cache_dir and not check_path_owner(options.cache_dir):
                logger.warning(
                    "The directory '%s' or its parent directory is not owned "
                    "by the current user and caching wheels has been "
                    "disabled. check the permissions and owner of that "
                    "directory. If executing pip with sudo, you may want "
                    "sudo's -H flag.",
                    options.cache_dir,
                )
                options.cache_dir = None

            with BuildDirectory(options.build_dir,
                                delete=build_delete) as build_dir:
                requirement_set = RequirementSet(
                    build_dir=build_dir,
                    src_dir=options.src_dir,
                    download_dir=options.download_dir,
                    upgrade=options.upgrade,
                    upgrade_strategy=options.upgrade_strategy,
                    as_egg=options.as_egg,
                    ignore_installed=options.ignore_installed,
                    ignore_dependencies=options.ignore_dependencies,
                    ignore_requires_python=options.ignore_requires_python,
                    force_reinstall=options.force_reinstall,
                    use_user_site=options.use_user_site,
                    target_dir=temp_target_dir,
                    session=session,
                    pycompile=options.compile,
                    isolated=options.isolated_mode,
                    wheel_cache=wheel_cache,
                    require_hashes=options.require_hashes,
                )

                self.populate_requirement_set(
                    requirement_set, args, options, finder, session, self.name,
                    wheel_cache
                )

                if not requirement_set.has_requirements:
                    return

                try:
                    if (options.download_dir or not wheel or not
                            options.cache_dir):
                        # on -d don't do complex things like building
                        # wheels, and don't try to build wheels when wheel is
                        # not installed.
                        requirement_set.prepare_files(finder)
                    else:
                        # build wheels before install.
                        wb = WheelBuilder(
                            requirement_set,
                            finder,
                            build_options=[],
                            global_options=[],
                        )
                        # Ignore the result: a failed wheel will be
                        # installed from the sdist/vcs whatever.
                        wb.build(autobuilding=True)

                    if not options.download_dir:
                        requirement_set.install(
                            install_options,
                            global_options,
                            root=options.root_path,
                            prefix=options.prefix_path,
                        )

                        possible_lib_locations = get_lib_location_guesses(
                            user=options.use_user_site,
                            home=temp_target_dir,
                            root=options.root_path,
                            prefix=options.prefix_path,
                            isolated=options.isolated_mode,
                        )
                        reqs = sorted(
                            requirement_set.successfully_installed,
                            key=operator.attrgetter('name'))
                        items = []
                        for req in reqs:
                            item = req.name
                            try:
                                installed_version = get_installed_version(
                                    req.name, possible_lib_locations
                                )
                                if installed_version:
                                    item += '-' + installed_version
                            except Exception:
                                pass
                            items.append(item)
                        installed = ' '.join(items)
                        if installed:
                            logger.info('Successfully installed %s', installed)
                    else:
                        downloaded = ' '.join([
                            req.name
                            for req in requirement_set.successfully_downloaded
                        ])
                        if downloaded:
                            logger.info(
                                'Successfully downloaded %s', downloaded
                            )
                except PreviousBuildDirError:
                    options.no_clean = True
                    raise
                finally:
                    # Clean up
                    if not options.no_clean:
                        requirement_set.cleanup_files()

        if options.target_dir:
            ensure_dir(options.target_dir)

            # Checking both purelib and platlib directories for installed
            # packages to be moved to target directory
            lib_dir_list = []

            purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
            platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']

            if os.path.exists(purelib_dir):
                lib_dir_list.append(purelib_dir)
            if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
                lib_dir_list.append(platlib_dir)

            for lib_dir in lib_dir_list:
                for item in os.listdir(lib_dir):
                    target_item_dir = os.path.join(options.target_dir, item)
                    if os.path.exists(target_item_dir):
                        if not options.upgrade:
                            logger.warning(
                                'Target directory %s already exists. Specify '
                                '--upgrade to force replacement.',
                                target_item_dir
                            )
                            continue
                        if os.path.islink(target_item_dir):
                            logger.warning(
                                'Target directory %s already exists and is '
                                'a link. Pip will not automatically replace '
                                'links, please remove if replacement is '
                                'desired.',
                                target_item_dir
                            )
                            continue
                        if os.path.isdir(target_item_dir):
                            shutil.rmtree(target_item_dir)
                        else:
                            os.remove(target_item_dir)

                    shutil.move(
                        os.path.join(lib_dir, item),
                        target_item_dir
                    )
            shutil.rmtree(temp_target_dir)
        return requirement_set

Example 27

Project: pip
Source File: wheel.py
View license
    def install(self, paths, maker, **kwargs):
        """
        Install a wheel to the specified paths. If kwarg ``warner`` is
        specified, it should be a callable, which will be called with two
        tuples indicating the wheel version of this software and the wheel
        version in the file, if there is a discrepancy in the versions.
        This can be used to issue any warnings to raise any exceptions.
        If kwarg ``lib_only`` is True, only the purelib/platlib files are
        installed, and the headers, scripts, data and dist-info metadata are
        not written.

        The return value is a :class:`InstalledDistribution` instance unless
        ``options.lib_only`` is True, in which case the return value is ``None``.
        """

        dry_run = maker.dry_run
        warner = kwargs.get('warner')
        lib_only = kwargs.get('lib_only', False)

        pathname = os.path.join(self.dirname, self.filename)
        name_ver = '%s-%s' % (self.name, self.version)
        data_dir = '%s.data' % name_ver
        info_dir = '%s.dist-info' % name_ver

        metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
        wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
        record_name = posixpath.join(info_dir, 'RECORD')

        wrapper = codecs.getreader('utf-8')

        with ZipFile(pathname, 'r') as zf:
            with zf.open(wheel_metadata_name) as bwf:
                wf = wrapper(bwf)
                message = message_from_file(wf)
            wv = message['Wheel-Version'].split('.', 1)
            file_version = tuple([int(i) for i in wv])
            if (file_version != self.wheel_version) and warner:
                warner(self.wheel_version, file_version)

            if message['Root-Is-Purelib'] == 'true':
                libdir = paths['purelib']
            else:
                libdir = paths['platlib']

            records = {}
            with zf.open(record_name) as bf:
                with CSVReader(stream=bf) as reader:
                    for row in reader:
                        p = row[0]
                        records[p] = row

            data_pfx = posixpath.join(data_dir, '')
            info_pfx = posixpath.join(info_dir, '')
            script_pfx = posixpath.join(data_dir, 'scripts', '')

            # make a new instance rather than a copy of maker's,
            # as we mutate it
            fileop = FileOperator(dry_run=dry_run)
            fileop.record = True    # so we can rollback if needed

            bc = not sys.dont_write_bytecode    # Double negatives. Lovely!

            outfiles = []   # for RECORD writing

            # for script copying/shebang processing
            workdir = tempfile.mkdtemp()
            # set target dir later
            # we default add_launchers to False, as the
            # Python Launcher should be used instead
            maker.source_dir = workdir
            maker.target_dir = None
            try:
                for zinfo in zf.infolist():
                    arcname = zinfo.filename
                    if isinstance(arcname, text_type):
                        u_arcname = arcname
                    else:
                        u_arcname = arcname.decode('utf-8')
                    # The signature file won't be in RECORD,
                    # and we  don't currently don't do anything with it
                    if u_arcname.endswith('/RECORD.jws'):
                        continue
                    row = records[u_arcname]
                    if row[2] and str(zinfo.file_size) != row[2]:
                        raise DistlibException('size mismatch for '
                                               '%s' % u_arcname)
                    if row[1]:
                        kind, value = row[1].split('=', 1)
                        with zf.open(arcname) as bf:
                            data = bf.read()
                        _, digest = self.get_hash(data, kind)
                        if digest != value:
                            raise DistlibException('digest mismatch for '
                                                   '%s' % arcname)

                    if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
                        logger.debug('lib_only: skipping %s', u_arcname)
                        continue
                    is_script = (u_arcname.startswith(script_pfx)
                                 and not u_arcname.endswith('.exe'))

                    if u_arcname.startswith(data_pfx):
                        _, where, rp = u_arcname.split('/', 2)
                        outfile = os.path.join(paths[where], convert_path(rp))
                    else:
                        # meant for site-packages.
                        if u_arcname in (wheel_metadata_name, record_name):
                            continue
                        outfile = os.path.join(libdir, convert_path(u_arcname))
                    if not is_script:
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, outfile)
                        outfiles.append(outfile)
                        # Double check the digest of the written file
                        if not dry_run and row[1]:
                            with open(outfile, 'rb') as bf:
                                data = bf.read()
                                _, newdigest = self.get_hash(data, kind)
                                if newdigest != digest:
                                    raise DistlibException('digest mismatch '
                                                           'on write for '
                                                           '%s' % outfile)
                        if bc and outfile.endswith('.py'):
                            try:
                                pyc = fileop.byte_compile(outfile)
                                outfiles.append(pyc)
                            except Exception:
                                # Don't give up if byte-compilation fails,
                                # but log it and perhaps warn the user
                                logger.warning('Byte-compilation failed',
                                               exc_info=True)
                    else:
                        fn = os.path.basename(convert_path(arcname))
                        workname = os.path.join(workdir, fn)
                        with zf.open(arcname) as bf:
                            fileop.copy_stream(bf, workname)

                        dn, fn = os.path.split(outfile)
                        maker.target_dir = dn
                        filenames = maker.make(fn)
                        fileop.set_executable_mode(filenames)
                        outfiles.extend(filenames)

                if lib_only:
                    logger.debug('lib_only: returning None')
                    dist = None
                else:
                    # Generate scripts

                    # Try to get pydist.json so we can see if there are
                    # any commands to generate. If this fails (e.g. because
                    # of a legacy wheel), log a warning but don't give up.
                    commands = None
                    file_version = self.info['Wheel-Version']
                    if file_version == '1.0':
                        # Use legacy info
                        ep = posixpath.join(info_dir, 'entry_points.txt')
                        try:
                            with zf.open(ep) as bwf:
                                epdata = read_exports(bwf)
                            commands = {}
                            for key in ('console', 'gui'):
                                k = '%s_scripts' % key
                                if k in epdata:
                                    commands['wrap_%s' % key] = d = {}
                                    for v in epdata[k].values():
                                        s = '%s:%s' % (v.prefix, v.suffix)
                                        if v.flags:
                                            s += ' %s' % v.flags
                                        d[v.name] = s
                        except Exception:
                            logger.warning('Unable to read legacy script '
                                           'metadata, so cannot generate '
                                           'scripts')
                    else:
                        try:
                            with zf.open(metadata_name) as bwf:
                                wf = wrapper(bwf)
                                commands = json.load(wf).get('extensions')
                                if commands:
                                    commands = commands.get('python.commands')
                        except Exception:
                            logger.warning('Unable to read JSON metadata, so '
                                           'cannot generate scripts')
                    if commands:
                        console_scripts = commands.get('wrap_console', {})
                        gui_scripts = commands.get('wrap_gui', {})
                        if console_scripts or gui_scripts:
                            script_dir = paths.get('scripts', '')
                            if not os.path.isdir(script_dir):
                                raise ValueError('Valid script path not '
                                                 'specified')
                            maker.target_dir = script_dir
                            for k, v in console_scripts.items():
                                script = '%s = %s' % (k, v)
                                filenames = maker.make(script)
                                fileop.set_executable_mode(filenames)

                            if gui_scripts:
                                options = {'gui': True }
                                for k, v in gui_scripts.items():
                                    script = '%s = %s' % (k, v)
                                    filenames = maker.make(script, options)
                                    fileop.set_executable_mode(filenames)

                    p = os.path.join(libdir, info_dir)
                    dist = InstalledDistribution(p)

                    # Write SHARED
                    paths = dict(paths)     # don't change passed in dict
                    del paths['purelib']
                    del paths['platlib']
                    paths['lib'] = libdir
                    p = dist.write_shared_locations(paths, dry_run)
                    if p:
                        outfiles.append(p)

                    # Write RECORD
                    dist.write_installed_files(outfiles, paths['prefix'],
                                               dry_run)
                return dist
            except Exception:  # pragma: no cover
                logger.exception('installation failed.')
                fileop.rollback()
                raise
            finally:
                shutil.rmtree(workdir)

Example 28

Project: pagure
Source File: git.py
View license
def merge_pull_request(
        session, request, username, request_folder, domerge=True):
    ''' Merge the specified pull-request.
    '''
    if request.remote:
        # Get the fork
        repopath = pagure.get_remote_repo_path(
            request.remote_git, request.branch_from)
    else:
        # Get the fork
        repopath = pagure.get_repo_path(request.project_from)

    fork_obj = PagureRepo(repopath)

    # Get the original repo
    parentpath = pagure.get_repo_path(request.project)

    # Clone the original repo into a temp folder
    newpath = tempfile.mkdtemp(prefix='pagure-pr-merge')
    new_repo = pygit2.clone_repository(parentpath, newpath)

    # Update the start and stop commits in the DB, one last time
    diff_commits = diff_pull_request(
        session, request, fork_obj, PagureRepo(parentpath),
        requestfolder=request_folder, with_diff=False)[0]

    if request.project.settings.get(
            'Enforce_signed-off_commits_in_pull-request', False):
        for commit in diff_commits:
            if 'signed-off-by' not in commit.message.lower():
                shutil.rmtree(newpath)
                raise pagure.exceptions.PagureException(
                    'This repo enforces that all commits are '
                    'signed off by their author. ')

    # Checkout the correct branch
    branch_ref = get_branch_ref(new_repo, request.branch)
    if not branch_ref:
        shutil.rmtree(newpath)
        raise pagure.exceptions.BranchNotFoundException(
            'Branch %s could not be found in the repo %s' % (
                request.branch, request.project.fullname
            ))

    new_repo.checkout(branch_ref)

    branch = get_branch_ref(fork_obj, request.branch_from)
    if not branch:
        shutil.rmtree(newpath)
        raise pagure.exceptions.BranchNotFoundException(
            'Branch %s could not be found in the repo %s' % (
                request.branch_from, request.project_from.fullname
                if request.project_from else request.remote_git
            ))

    repo_commit = fork_obj[branch.get_object().hex]

    ori_remote = new_repo.remotes[0]
    # Add the fork as remote repo
    reponame = '%s_%s' % (request.user.user, request.uid)

    remote = new_repo.create_remote(reponame, repopath)

    # Fetch the commits
    remote.fetch()

    merge = new_repo.merge(repo_commit.oid)
    if merge is None:
        mergecode = new_repo.merge_analysis(repo_commit.oid)[0]

    refname = '%s:refs/heads/%s' % (branch_ref.name, request.branch)
    if (
            (merge is not None and merge.is_uptodate)
            or
            (merge is None and
             mergecode & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE)):

        if domerge:
            pagure.lib.close_pull_request(
                session, request, username,
                requestfolder=request_folder)
            shutil.rmtree(newpath)
            try:
                session.commit()
            except SQLAlchemyError as err:  # pragma: no cover
                session.rollback()
                pagure.APP.logger.exception(err)
                raise pagure.exceptions.PagureException(
                    'Could not close this pull-request')
            raise pagure.exceptions.PagureException(
                'Nothing to do, changes were already merged')
        else:
            request.merge_status = 'NO_CHANGE'
            session.commit()
            shutil.rmtree(newpath)
            return 'NO_CHANGE'

    elif (
            (merge is not None and merge.is_fastforward)
            or
            (merge is None and
             mergecode & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD)):

        if domerge:
            head = new_repo.lookup_reference('HEAD').get_object()
            if not request.project.settings.get('always_merge', False):
                if merge is not None:
                    # This is depending on the pygit2 version
                    branch_ref.target = merge.fastforward_oid
                elif merge is None and mergecode is not None:
                    branch_ref.set_target(repo_commit.oid.hex)
                commit = repo_commit.oid.hex
            else:
                tree = new_repo.index.write_tree()
                user_obj = pagure.lib.get_user(session, username)
                author = pygit2.Signature(
                    user_obj.fullname.encode('utf-8'),
                    user_obj.default_email.encode('utf-8'))
                commit = new_repo.create_commit(
                    'refs/heads/%s' % request.branch,
                    author,
                    author,
                    'Merge #%s `%s`' % (request.id, request.title),
                    tree,
                    [head.hex, repo_commit.oid.hex])

            PagureRepo.push(ori_remote, refname)
            fork_obj.run_hook(
                head.hex, commit, 'refs/heads/%s' % request.branch,
                username)
        else:
            request.merge_status = 'FFORWARD'
            session.commit()
            shutil.rmtree(newpath)
            return 'FFORWARD'

    else:
        tree = None
        try:
            tree = new_repo.index.write_tree()
        except pygit2.GitError:
            shutil.rmtree(newpath)
            if domerge:
                raise pagure.exceptions.PagureException('Merge conflicts!')
            else:
                request.merge_status = 'CONFLICTS'
                session.commit()
                return 'CONFLICTS'

        if domerge:
            head = new_repo.lookup_reference('HEAD').get_object()
            user_obj = pagure.lib.get_user(session, username)
            author = pygit2.Signature(
                user_obj.fullname.encode('utf-8'),
                user_obj.default_email.encode('utf-8'))
            commit = new_repo.create_commit(
                'refs/heads/%s' % request.branch,
                author,
                author,
                'Merge #%s `%s`' % (request.id, request.title),
                tree,
                [head.hex, repo_commit.oid.hex])

            PagureRepo.push(ori_remote, refname)
            fork_obj.run_hook(
                head.hex, commit, 'refs/heads/%s' % request.branch,
                username)

        else:
            request.merge_status = 'MERGE'
            session.commit()
            shutil.rmtree(newpath)
            return 'MERGE'

    # Update status
    pagure.lib.close_pull_request(
        session, request, username,
        requestfolder=request_folder,
    )
    try:
        # Reset the merge_status of all opened PR to refresh their cache
        pagure.lib.reset_status_pull_request(session, request.project)
        session.commit()
    except SQLAlchemyError as err:  # pragma: no cover
        session.rollback()
        pagure.APP.logger.exception(err)
        shutil.rmtree(newpath)
        raise pagure.exceptions.PagureException(
            'Could not update this pull-request in the database')
    shutil.rmtree(newpath)

    return 'Changes merged!'

Example 29

View license
    @patch('pagure.lib.notify.send_email')
    def test_dumping_reloading_ticket(self, send_email):
        """ Test dumping a ticket into a JSON blob. """
        send_email.return_value = True

        tests.create_projects(self.session)

        # Create repo
        self.gitrepo = os.path.join(self.path, 'tickets', 'test.git')
        repopath = os.path.join(self.path, 'tickets')
        os.makedirs(self.gitrepo)
        repo_obj = pygit2.init_repository(self.gitrepo, bare=True)

        repo = pagure.lib.get_project(self.session, 'test')
        # Create an issue to play with
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue',
            content='We should work on this',
            user='pingou',
            ticketfolder=repopath
        )
        self.assertEqual(msg.title, 'Test issue')

        # Need another two issue to test the dependencie chain
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue #2',
            content='Another bug',
            user='pingou',
            ticketfolder=repopath
        )
        self.assertEqual(msg.title, 'Test issue #2')
        msg = pagure.lib.new_issue(
            session=self.session,
            repo=repo,
            title='Test issue #3',
            content='That would be nice feature no?',
            user='foo',
            ticketfolder=repopath
        )
        self.assertEqual(msg.title, 'Test issue #3')

        issue = pagure.lib.search_issues(self.session, repo, issueid=1)
        issue2 = pagure.lib.search_issues(self.session, repo, issueid=2)
        issue3 = pagure.lib.search_issues(self.session, repo, issueid=3)

        # Add a couple of comment on the ticket
        msg = pagure.lib.add_issue_comment(
            session=self.session,
            issue=issue,
            comment='Hey look a comment!',
            user='foo',
            ticketfolder=repopath,
        )
        self.session.commit()
        self.assertEqual(msg, 'Comment added')
        msg = pagure.lib.add_issue_comment(
            session=self.session,
            issue=issue,
            comment='crazy right?',
            user='pingou',
            ticketfolder=repopath,
        )
        self.session.commit()
        self.assertEqual(msg, 'Comment added')
        # Assign the ticket to someone
        msg = pagure.lib.add_issue_assignee(
            session=self.session,
            issue=issue,
            assignee='pingou',
            user='pingou',
            ticketfolder=repopath,
        )
        self.session.commit()
        self.assertEqual(msg, 'Issue assigned')
        # Add a couple of tags on the ticket
        msg = pagure.lib.add_tag_obj(
            session=self.session,
            obj=issue,
            tags=[' feature ', 'future '],
            user='pingou',
            ticketfolder=repopath,
        )
        self.session.commit()
        self.assertEqual(msg, 'Tag added: feature, future')
        # Add dependencies
        msg = pagure.lib.add_issue_dependency(
            session=self.session,
            issue=issue,
            issue_blocked=issue2,
            user='pingou',
            ticketfolder=repopath,
        )
        self.session.commit()
        self.assertEqual(msg, 'Dependency added')
        msg = pagure.lib.add_issue_dependency(
            session=self.session,
            issue=issue3,
            issue_blocked=issue,
            user='foo',
            ticketfolder=repopath,
        )
        self.session.commit()
        self.assertEqual(msg, 'Dependency added')

        # Dump the JSON
        pagure.lib.git.update_git(issue, repo, repopath)
        repo = pygit2.Repository(self.gitrepo)
        cnt = len([commit
            for commit in repo.walk(
                repo.head.target, pygit2.GIT_SORT_TOPOLOGICAL)])
        self.assertEqual(cnt, 10)

        last_commit = repo.revparse_single('HEAD')
        patch = pagure.lib.git.commit_to_patch(repo, last_commit)
        for line in patch.split('\n'):
            if line.startswith('--- a/'):
                fileid = line.split('--- a/')[1]
                break

        newpath = tempfile.mkdtemp(prefix='pagure-dump-load')
        clone_repo = pygit2.clone_repository(self.gitrepo, newpath)

        self.assertEqual(len(os.listdir(newpath)), 4)

        ticket_json = os.path.join(self.path, 'test_ticket.json')
        self.assertFalse(os.path.exists(ticket_json))
        shutil.copyfile(os.path.join(newpath, fileid), ticket_json)
        self.assertTrue(os.path.exists(ticket_json))
        jsondata = None
        with open(ticket_json) as stream:
            jsondata = json.load(stream)
        self.assertNotEqual(jsondata, None)

        shutil.rmtree(newpath)

        # Test reloading the JSON
        self.tearDown()
        self.setUp()
        tests.create_projects(self.session)

        pagure.lib.git.update_ticket_from_git(
            self.session,
            reponame='test',
            namespace=None,
            username=None,
            issue_uid='foobar',
            json_data=jsondata,
        )

        # Post loading
        repo = pagure.lib.get_project(self.session, 'test')
        self.assertEqual(len(repo.issues), 1)
        issue = pagure.lib.search_issues(self.session, repo, issueid=1)

        # Check after re-loading
        self.assertEqual(len(issue.comments), 2)
        self.assertEqual(len(issue.tags), 2)
        self.assertEqual(issue.tags_text, ['future', 'feature'])
        self.assertEqual(issue.assignee.username, 'pingou')
        self.assertEqual(issue.children, [])
        self.assertEqual(issue.parents, [])

Example 30

Project: pysb
Source File: kappa.py
View license
def run_simulation(model, time=10000, points=200, cleanup=True,
                   output_prefix=None, output_dir=None, flux_map=False,
                   perturbation=None, seed=None, verbose=False):
    """Runs the given model using KaSim and returns the parsed results.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSim.
    time : number
        The amount of time (in arbitrary units) to run a simulation.
        Identical to the -t argument when using KaSim at the command line.
        Default value is 10000. If set to 0, no simulation will be run.
    points : integer
        The number of data points to collect for plotting.
        Identical to the -p argument when using KaSim at the command line.
        Default value is 200. Note that the number of points actually returned
        by the simulator will be points + 1 (including the 0 point).
    cleanup : boolean
        Specifies whether output files produced by KaSim should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    flux_map: boolean
        Specifies whether or not to produce the flux map (generated over the
        full duration of the simulation). Default value is False.
    perturbation : string or None
        Optional perturbation language syntax to be appended to the Kappa file.
        See KaSim manual for more details. Default value is None (no
        perturbation).
    seed : integer
        A seed integer for KaSim random number generator. Set to None to
        allow KaSim to use a random seed (default) or supply a seed for
        deterministic behaviour (e.g. for testing)
    verbose : boolean
        Whether to pass the output of KaSim through to stdout/stderr.

    Returns
    -------
    If flux_map is False, returns the kasim simulation data as a Numpy ndarray.
    Data is accessed using the syntax::

            results[index_name]

    The index 'time' gives the time coordinates of the simulation. Data for the
    observables can be accessed by indexing the array with the names of the
    observables. Each entry in the ndarray has length points + 1, due to the
    inclusion of both the zero point and the final timepoint.

    If flux_map is True, returns an instance of SimulationResult, a namedtuple
    with two members, `timecourse` and `flux_map`. The `timecourse` field
    contains the simulation ndarray, and the `flux_map` field is an instance of
    a pygraphviz AGraph containing the flux map. The flux map can be rendered
    as a pdf using the dot layout program as follows::

        fluxmap.draw('fluxmap.pdf', prog='dot')
    """

    gen = KappaGenerator(model)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, model.name)
    kappa_filename = base_filename + '.ka'
    fm_filename = base_filename + '_fm.dot'
    out_filename = base_filename + '.out'

    args = ['-i', kappa_filename, '-t', str(time), '-p', str(points),
            '-o', out_filename]

    if seed:
        args.extend(['-seed', str(seed)])

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        kappa_file.write(gen.get_content())
        # If desired, add instructions to the kappa file to generate the
        # flux map:
        if flux_map:
            kappa_file.write('%%mod: [true] do $FLUX "%s" [true]\n' %
                             fm_filename)
        # If any perturbation language code has been passed in, add it to
        # the Kappa file:
        if perturbation:
            kappa_file.write('\n%s\n' % perturbation)

    # Run KaSim
    kasim_path = _get_kappa_path('KaSim')
    p = subprocess.Popen([kasim_path] + args,
                         stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasimInterfaceError(p_out + '\n' + p_err)

    # The simulation data, as a numpy array
    data = _parse_kasim_outfile(out_filename)

    if flux_map:
        try:
            import pygraphviz
            flux_graph = pygraphviz.AGraph(fm_filename)
        except ImportError:
            if cleanup:
                raise RuntimeError(
                        "Couldn't import pygraphviz, which is "
                        "required to return the flux map as a "
                        "pygraphviz AGraph object. Either install "
                        "pygraphviz or set cleanup=False to retain "
                        "dot files.")
            else:
                warnings.warn(
                        "pygraphviz could not be imported so no AGraph "
                        "object returned (returning None); flux map "
                        "dot file available at %s" % fm_filename)
                flux_graph = None

    if cleanup:
        shutil.rmtree(base_directory)

    # If a flux map was generated, return both the simulation output and the
    # flux map as a pygraphviz graph
    if flux_map:
        return SimulationResult(data, flux_graph)
    # If no flux map was requested, return only the simulation data
    else:
        return data

Example 31

Project: pysb
Source File: kappa.py
View license
def run_static_analysis(model, influence_map=False, contact_map=False,
                        cleanup=True, output_prefix=None, output_dir=None,
                        verbose=False):
    """Run static analysis (KaSa) on to get the contact and influence maps.

    If neither influence_map nor contact_map are set to True, then a ValueError
    is raised.

    Parameters
    ----------
    model : pysb.core.Model
        The model to simulate/analyze using KaSa.
    influence_map : boolean
        Whether to compute the influence map.
    contact_map : boolean
        Whether to compute the contact map.
    cleanup : boolean
        Specifies whether output files produced by KaSa should be deleted
        after execution is completed. Default value is True.
    output_prefix: str
        Prefix of the temporary directory name. Default is
        'tmpKappa_<model name>_'.
    output_dir : string
        The directory in which to create the temporary directory for
        the .ka and other output files. Defaults to the system temporary file
        directory (e.g. /tmp). If the specified directory does not exist,
        an Exception is thrown.
    verbose : boolean
        Whether to pass the output of KaSa through to stdout/stderr.

    Returns
    -------
    StaticAnalysisResult, a namedtuple with two fields, `contact_map` and
    `influence_map`, each containing the respective result as an instance
    of a pygraphviz AGraph. If the either the contact_map or influence_map
    argument to the function is False, the corresponding entry in the
    StaticAnalysisResult returned by the function will be None.
    """

    # Make sure the user has asked for an output!
    if not influence_map and not contact_map:
        raise ValueError('Either contact_map or influence_map (or both) must '
                         'be set to True in order to perform static analysis.')

    gen = KappaGenerator(model, _warn_no_ic=False)

    if output_prefix is None:
        output_prefix = 'tmpKappa_%s_' % model.name

    base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir)

    base_filename = os.path.join(base_directory, str(model.name))
    kappa_filename = base_filename + '.ka'
    im_filename = base_filename + '_im.dot'
    cm_filename = base_filename + '_cm.dot'

    # NOTE: in the args passed to KaSa, the directory for the .dot files is
    # specified by the --output_directory option, and the output_contact_map
    # and output_influence_map should only be the base filenames (without
    # a directory prefix).
    # Contact map args:
    if contact_map:
        cm_args = ['--compute-contact-map', '--output-contact-map',
                   os.path.basename(cm_filename)]
    else:
        cm_args = ['--no-compute-contact-map']
    # Influence map args:
    if influence_map:
        im_args = ['--compute-influence-map', '--output-influence-map',
                   os.path.basename(im_filename)]
    else:
        im_args = ['--no-compute-influence-map']
    # Full arg list
    args = [kappa_filename, '--output-directory', base_directory] \
            + cm_args + im_args

    # Generate the Kappa model code from the PySB model and write it to
    # the Kappa file:
    with open(kappa_filename, 'w') as kappa_file:
        kappa_file.write(gen.get_content())

    # Run KaSa using the given args
    kasa_path = _get_kappa_path('KaSa')
    p = subprocess.Popen([kasa_path] + args,
                         stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if verbose:
        for line in iter(p.stdout.readline, b''):
            print('@@', line, end='')
    (p_out, p_err) = p.communicate()

    if p.returncode:
        raise KasaInterfaceError(p_out + '\n' + p_err)

    # Try to create the graphviz objects from the .dot files created
    try:
        import pygraphviz
        # Convert the contact map to a Graph
        cmap = pygraphviz.AGraph(cm_filename) if contact_map else None
        imap = pygraphviz.AGraph(im_filename) if influence_map else None
    except ImportError:
        if cleanup:
            raise RuntimeError(
                    "Couldn't import pygraphviz, which is "
                    "required to return the influence and contact maps "
                    " as pygraphviz AGraph objects. Either install "
                    "pygraphviz or set cleanup=False to retain "
                    "dot files.")
        else:
            warnings.warn(
                    "pygraphviz could not be imported so no AGraph "
                    "objects returned (returning None); "
                    "contact/influence maps available at %s" %
                    base_directory)
            cmap = None
            imap = None

    # Clean up the temp directory if desired
    if cleanup:
        shutil.rmtree(base_directory)

    return StaticAnalysisResult(cmap, imap)

Example 32

View license
    def run(self):
        """Starts the process when Process.start() is called.
        """

        global JOB_CHECK_INTERVAL

        # make sure to catch sys.exit (which raises SystemExit)
        try :
            # Get directory where this module lives
            mod_dir = os.path.dirname(os.path.realpath(__file__))

            # Try to connect to the database
            try:
                db = self._session.get_db()
                pilot_col = db["%s.p" % self._session.uid]
                logger.debug("Connected to MongoDB. Serving requests for PilotManager %s." % self.pilot_manager_id)

            except Exception as e :
                logger.exception ("Connection error: %s" % e)
                return

            last_job_check = time.time()

            while not self._terminate.is_set():

                # Periodically, we pull up all ComputePilots that are pending 
                # execution or were last seen executing and check if the corresponding  
                # SAGA job is still pending in the queue. If that is not the case, 
                # we assume that the job has failed for some reasons and update
                # the state of the ComputePilot accordingly.
                if  last_job_check + JOB_CHECK_INTERVAL < time.time() :
                    last_job_check = time.time()
                    self.check_pilot_states (pilot_col)

                if self._disabled.is_set():
                    # don't process any new pilot start requests.  
                    # NOTE: this is not clean, in principle there could be other
                    #       launchers alive which want to still start those 
                    #       pending pilots.  In practice we only ever use one
                    #       pmgr though, and its during its shutdown that we get
                    #       here...
                    ts = time.time()
                    compute_pilot = pilot_col.find_and_modify(
                        query={"pilotmanager": self.pilot_manager_id,
                               "state" : PENDING_LAUNCH},
                        update={"$set" : {"state": CANCELED},
                                "$push": {"statehistory": {"state": CANCELED, "timestamp": ts}}}
                    )

                    # run state checks more frequently.
                    JOB_CHECK_INTERVAL = 3
                    time.sleep(1)
                    continue


                # See if we can find a ComputePilot that is waiting to be launched.
                # If we find one, we use SAGA to create a job service, a job
                # description and a job that is then send to the local or remote
                # queueing system. If this succedes, we set the ComputePilot's
                # state to pending, otherwise to failed.
                compute_pilot = None

                ts = time.time()
                compute_pilot = pilot_col.find_and_modify(
                    query={"pilotmanager": self.pilot_manager_id,
                           "state" : PENDING_LAUNCH},
                    update={"$set" : {"state": LAUNCHING},
                            "$push": {"statehistory": {"state": LAUNCHING, "timestamp": ts}}}
                )

                if  not compute_pilot :
                    time.sleep(IDLE_TIMER)

                else:
                    try:
                        # ------------------------------------------------------
                        #
                        # LAUNCH THE PILOT AGENT VIA SAGA
                        #
                        logentries = []
                        pilot_id   = str(compute_pilot["_id"])

                        logger.info("Launching ComputePilot %s" % pilot_id)

                        # ------------------------------------------------------
                        # Database connection parameters
                        session_id    = self._session.uid
                        database_url  = self._session.dburl

                        # ------------------------------------------------------
                        # pilot description and resource configuration
                        number_cores    = compute_pilot['description']['cores']
                        runtime         = compute_pilot['description']['runtime']
                        queue           = compute_pilot['description']['queue']
                        project         = compute_pilot['description']['project']
                        cleanup         = compute_pilot['description']['cleanup']
                        resource_key    = compute_pilot['description']['resource']
                        schema          = compute_pilot['description']['access_schema']
                        memory          = compute_pilot['description']['memory']
                        candidate_hosts = compute_pilot['description']['candidate_hosts']
                        pilot_sandbox   = compute_pilot['sandbox']
                        global_sandbox  = compute_pilot['global_sandbox']

                        # we expand and exchange keys in the resource config,
                        # depending on the selected schema so better use a deep
                        # copy..
                        resource_cfg = self._session.get_resource_config(resource_key, schema)

                        # import pprint
                        # pprint.pprint (resource_cfg)

                        # ------------------------------------------------------
                        # get parameters from cfg, set defaults where needed
                        agent_launch_method     = resource_cfg.get ('agent_launch_method')
                        agent_dburl             = resource_cfg.get ('agent_mongodb_endpoint', database_url)
                        agent_spawner           = resource_cfg.get ('agent_spawner',       DEFAULT_AGENT_SPAWNER)
                        agent_type              = resource_cfg.get ('agent_type',          DEFAULT_AGENT_TYPE)
                        rc_agent_config         = resource_cfg.get ('agent_config',        DEFAULT_AGENT_CONFIG)
                        agent_scheduler         = resource_cfg.get ('agent_scheduler')
                        tunnel_bind_device      = resource_cfg.get ('tunnel_bind_device')
                        default_queue           = resource_cfg.get ('default_queue')
                        forward_tunnel_endpoint = resource_cfg.get ('forward_tunnel_endpoint')
                        js_endpoint             = resource_cfg.get ('job_manager_endpoint')
                        lrms                    = resource_cfg.get ('lrms')
                        mpi_launch_method       = resource_cfg.get ('mpi_launch_method')
                        pre_bootstrap_1         = resource_cfg.get ('pre_bootstrap_1')
                        pre_bootstrap_2         = resource_cfg.get ('pre_bootstrap_2')
                        python_interpreter      = resource_cfg.get ('python_interpreter')
                        spmd_variation          = resource_cfg.get ('spmd_variation')
                        task_launch_method      = resource_cfg.get ('task_launch_method')
                        rp_version              = resource_cfg.get ('rp_version',          DEFAULT_RP_VERSION)
                        virtenv_mode            = resource_cfg.get ('virtenv_mode',        DEFAULT_VIRTENV_MODE)
                        virtenv                 = resource_cfg.get ('virtenv',             DEFAULT_VIRTENV)
                        stage_cacerts           = resource_cfg.get ('stage_cacerts',       'False')
                        cores_per_node          = resource_cfg.get ('cores_per_node')
                        shared_filesystem       = resource_cfg.get ('shared_filesystem', True)
                        health_check            = resource_cfg.get ('health_check', True)
                        python_dist             = resource_cfg.get ('python_dist')
                        cu_pre_exec             = resource_cfg.get ('cu_pre_exec')
                        cu_post_exec            = resource_cfg.get ('cu_post_exec')
                        export_to_cu            = resource_cfg.get ('export_to_cu')
                        

                        # Agent configuration that is not part of the public API.
                        # The agent config can either be a config dict, or
                        # a string pointing to a configuration name.  If neither
                        # is given, check if 'RADICAL_PILOT_AGENT_CONFIG' is
                        # set.  The last fallback is 'agent_default'
                        agent_config = compute_pilot['description'].get('_config')
                        if not agent_config:
                            agent_config = os.environ.get('RADICAL_PILOT_AGENT_CONFIG')
                        if not agent_config:
                            agent_config = rc_agent_config

                        if isinstance(agent_config, dict):
                            # nothing to do
                            agent_cfg_dict = agent_config
                            pass

                        elif isinstance(agent_config, basestring):
                            try:
                                if os.path.exists(agent_config):
                                    # try to open as file name
                                    logger.info("Read agent config file: %s" % agent_config)
                                    agent_cfg_dict = ru.read_json(agent_config)
                                else:
                                    # otherwise interpret as a config name
                                    module_path = os.path.dirname(os.path.abspath(__file__))
                                    config_path = "%s/../configs/" % module_path
                                    agent_cfg_file = os.path.join(config_path, "agent_%s.json" % agent_config)
                                    logger.info("Read agent config file: %s" % agent_cfg_file)
                                    agent_cfg_dict = ru.read_json(agent_cfg_file)
                                # no matter how we read the config file, we
                                # allow for user level overload
                                cfg_base = os.path.basename(agent_cfg_file)
                                user_cfg = '%s/.radical/pilot/config/%s' \
                                              % (os.environ['HOME'], cfg_base)
                                if os.path.exists(user_cfg):
                                    logger.info("merging user config: %s" % user_cfg)
                                    user_cfg_dict = ru.read_json(user_cfg)
                                    ru.dict_merge (agent_cfg_dict, user_cfg_dict, policy='overwrite')
                            except Exception as e:
                                logger.exception("Error reading agent config file: %s" % e)
                                raise

                        else:
                            # we can't handle this type
                            raise TypeError('agent config must be string (filename) or dict')

                        # TODO: use booleans all the way?
                        if stage_cacerts.lower() == 'true':
                            stage_cacerts = True
                        else:
                            stage_cacerts = False

                        # expand variables in virtenv string
                        virtenv = virtenv % {'pilot_sandbox' : saga.Url(pilot_sandbox).path,
                                             'global_sandbox': saga.Url(global_sandbox).path }

                        # Check for deprecated global_virtenv
                        global_virtenv = resource_cfg.get('global_virtenv')
                        if global_virtenv:
                            logger.warn ("'global_virtenv' keyword is deprecated -- use 'virtenv' and 'virtenv_mode'")
                            virtenv = global_virtenv
                            virtenv_mode = 'use'

                        # Create a host:port string for use by the bootstrap_1.
                        db_url = saga.Url(agent_dburl)
                        if db_url.port:
                            db_hostport = "%s:%d" % (db_url.host, db_url.port)
                        else:
                            db_hostport = "%s:%d" % (db_url.host, 27017) # mongodb default

                        # Open the remote sandbox
                        # TODO: make conditional on shared_fs?
                        sandbox_tgt = saga.filesystem.Directory(pilot_sandbox,
                                                                session=self._session,
                                                                flags=saga.filesystem.CREATE_PARENTS)

                        LOCAL_SCHEME = 'file'

                        # ------------------------------------------------------
                        # Copy the bootstrap shell script.
                        # This also creates the sandbox.
                        BOOTSTRAPPER_SCRIPT = "bootstrap_1.sh"
                        bootstrapper_path   = os.path.abspath("%s/../bootstrapper/%s" \
                                % (mod_dir, BOOTSTRAPPER_SCRIPT))

                        msg = "Using bootstrapper %s" % bootstrapper_path
                        logentries.append(Logentry(msg, logger=logger.info))

                        bs_script_url = saga.Url("%s://localhost%s" % (LOCAL_SCHEME, bootstrapper_path))

                        msg = "Copying bootstrapper '%s' to agent sandbox (%s)." \
                                % (bs_script_url, sandbox_tgt)
                        logentries.append(Logentry (msg, logger=logger.debug))

                        if shared_filesystem:
                            sandbox_tgt.copy(bs_script_url, BOOTSTRAPPER_SCRIPT)

                        # ------------------------------------------------------
                        # the version of the agent is derived from
                        # rp_version, which has the following format
                        # and interpretation:
                        #
                        # case rp_version:
                        #   @<token>:
                        #   @tag/@branch/@commit: # no sdist staging
                        #       git clone $github_base radical.pilot.src
                        #       (cd radical.pilot.src && git checkout token)
                        #       pip install -t $VIRTENV/rp_install/ radical.pilot.src
                        #       rm -rf radical.pilot.src
                        #       export PYTHONPATH=$VIRTENV/rp_install:$PYTHONPATH
                        #
                        #   release: # no sdist staging
                        #       pip install -t $VIRTENV/rp_install radical.pilot
                        #       export PYTHONPATH=$VIRTENV/rp_install:$PYTHONPATH
                        #
                        #   local: # needs sdist staging
                        #       tar zxf $sdist.tgz
                        #       pip install -t $VIRTENV/rp_install $sdist/
                        #       export PYTHONPATH=$VIRTENV/rp_install:$PYTHONPATH
                        #
                        #   debug: # needs sdist staging
                        #       tar zxf $sdist.tgz
                        #       pip install -t $SANDBOX/rp_install $sdist/
                        #       export PYTHONPATH=$SANDBOX/rp_install:$PYTHONPATH
                        #
                        #   installed: # no sdist staging
                        #       true
                        # esac
                        #
                        # virtenv_mode
                        #   private : error  if ve exists, otherwise create, then use
                        #   update  : update if ve exists, otherwise create, then use
                        #   create  : use    if ve exists, otherwise create, then use
                        #   use     : use    if ve exists, otherwise error,  then exit
                        #   recreate: delete if ve exists, otherwise create, then use
                        #      
                        # examples   :
                        #   [email protected]
                        #   [email protected]
                        #   [email protected]
                        #   [email protected]
                        #   [email protected]
                        #   [email protected]/tmp/my_agent.py
                        #
                        # Note that some combinations may be invalid,
                        # specifically in the context of virtenv_mode.  If, for
                        # example, virtenv_mode is 'use', then the 'virtenv:tag'
                        # will not make sense, as the virtenv is not updated.
                        # In those cases, the virtenv_mode is honored, and
                        # a warning is printed.
                        #
                        # Also, the 'stage' mode can only be combined with the
                        # 'local' source, or with a path to the agent (relative
                        # to mod_dir, or absolute).
                        #
                        # A rp_version which does not adhere to the
                        # above syntax is ignored, and the fallback [email protected]
                        # is used.

                        if  not rp_version.startswith('@') and \
                            not rp_version in ['installed', 'local', 'debug']:
                            raise ValueError("invalid rp_version '%s'" % rp_version)

                        stage_sdist=True
                        if rp_version in ['installed', 'release']:
                            stage_sdist = False

                        if rp_version.startswith('@'):
                            stage_sdist = False
                            rp_version  = rp_version[1:]  # strip '@'


                        # ------------------------------------------------------
                        # Copy the rp sdist if needed.  We actually also stage
                        # the sdists for radical.utils and radical.saga, so that
                        # we have the complete stack to install...
                        if stage_sdist:

                            for sdist_path in [ru.sdist_path, saga.sdist_path, rp_sdist_path]:

                                sdist_url = saga.Url("%s://localhost%s" % (LOCAL_SCHEME, sdist_path))
                                msg = "Copying sdist '%s' to sandbox (%s)." % (sdist_url, pilot_sandbox)
                                logentries.append(Logentry (msg, logger=logger.debug))
                                if shared_filesystem:
                                    sandbox_tgt.copy(sdist_url, os.path.basename(str(sdist_url)))


                        # ------------------------------------------------------
                        # Some machines cannot run pip due to outdated CA certs.
                        # For those, we also stage an updated certificate bundle
                        if stage_cacerts:
                            cc_path = os.path.abspath("%s/../bootstrapper/%s" \
                                    % (mod_dir, 'cacert.pem.gz'))

                            cc_url= saga.Url("%s://localhost/%s" % (LOCAL_SCHEME, cc_path))
                            msg = "Copying CA certificate bundle '%s' to sandbox (%s)." % (cc_url, pilot_sandbox)
                            logentries.append(Logentry (msg, logger=logger.debug))
                            if shared_filesystem:
                                sandbox_tgt.copy(cc_url, os.path.basename(str(cc_url)))


                        # ------------------------------------------------------
                        # sanity checks
                        if not python_dist        : raise RuntimeError("missing python distribution")
                        if not agent_spawner      : raise RuntimeError("missing agent spawner")
                        if not agent_scheduler    : raise RuntimeError("missing agent scheduler")
                        if not lrms               : raise RuntimeError("missing LRMS")
                        if not agent_launch_method: raise RuntimeError("missing agentlaunch method")
                        if not task_launch_method : raise RuntimeError("missing task launch method")

                        # massage some values
                        if not queue :
                            queue = default_queue

                        if  cleanup and isinstance (cleanup, bool) :
                            cleanup = 'luve'    #  l : log files
                                                #  u : unit work dirs
                                                #  v : virtualenv
                                                #  e : everything (== pilot sandbox)
                                                #
                            # we never cleanup virtenvs which are not private
                            if virtenv_mode is not 'private' :
                                cleanup = cleanup.replace ('v', '')

                        sdists = ':'.join([ru.sdist_name, saga.sdist_name, rp_sdist_name])

                        # if cores_per_node is set (!= None), then we need to
                        # allocation full nodes, and thus round up
                        if cores_per_node:
                            cores_per_node = int(cores_per_node)
                            number_cores = int(cores_per_node
                                    * math.ceil(float(number_cores)/cores_per_node))

                        # set mandatory args
                        bootstrap_args  = ""
                        bootstrap_args += " -d '%s'" % sdists
                        bootstrap_args += " -m '%s'" % virtenv_mode
                        bootstrap_args += " -p '%s'" % pilot_id
                        bootstrap_args += " -r '%s'" % rp_version
                        bootstrap_args += " -s '%s'" % session_id
                        bootstrap_args += " -v '%s'" % virtenv
                        bootstrap_args += " -b '%s'" % python_dist

                        # set optional args
                        if agent_type:              bootstrap_args += " -a '%s'" % agent_type
                        if lrms == "CCM":           bootstrap_args += " -c"
                        if pre_bootstrap_1:         bootstrap_args += " -e '%s'" % "' -e '".join (pre_bootstrap_1)
                        if pre_bootstrap_2:         bootstrap_args += " -w '%s'" % "' -w '".join (pre_bootstrap_2)
                        if forward_tunnel_endpoint: bootstrap_args += " -f '%s'" % forward_tunnel_endpoint
                        if forward_tunnel_endpoint: bootstrap_args += " -h '%s'" % db_hostport
                        if python_interpreter:      bootstrap_args += " -i '%s'" % python_interpreter
                        if tunnel_bind_device:      bootstrap_args += " -t '%s'" % tunnel_bind_device
                        if cleanup:                 bootstrap_args += " -x '%s'" % cleanup

                        # set some agent configuration
                        agent_cfg_dict['cores']              = number_cores
                        agent_cfg_dict['resource_cfg']       = resource_cfg
                        agent_cfg_dict['debug']              = os.environ.get('RADICAL_PILOT_AGENT_VERBOSE',
                                                                              logger.getEffectiveLevel())
                        agent_cfg_dict['mongodb_url']        = str(agent_dburl)
                        agent_cfg_dict['lrms']               = lrms
                        agent_cfg_dict['spawner']            = agent_spawner
                        agent_cfg_dict['scheduler']          = agent_scheduler
                        agent_cfg_dict['runtime']            = runtime
                        agent_cfg_dict['pilot_id']           = pilot_id
                        agent_cfg_dict['session_id']         = session_id
                        agent_cfg_dict['agent_launch_method']= agent_launch_method
                        agent_cfg_dict['task_launch_method'] = task_launch_method
                        agent_cfg_dict['export_to_cu']       = export_to_cu
                        agent_cfg_dict['cu_pre_exec']        = cu_pre_exec
                        agent_cfg_dict['cu_post_exec']       = cu_post_exec
                        if mpi_launch_method:
                            agent_cfg_dict['mpi_launch_method']  = mpi_launch_method
                        if cores_per_node:
                            agent_cfg_dict['cores_per_node'] = cores_per_node

                        # ------------------------------------------------------
                        # Write agent config dict to a json file in pilot sandbox.

                        cfg_tmp_dir = tempfile.mkdtemp(prefix='rp_agent_cfg_dir')
                        agent_cfg_name = 'agent_0.cfg'
                        cfg_tmp_file = os.path.join(cfg_tmp_dir, agent_cfg_name)
                        cfg_tmp_handle = os.open(cfg_tmp_file, os.O_WRONLY|os.O_CREAT)

                        # Convert dict to json file
                        msg = "Writing agent configuration to file '%s'." % cfg_tmp_file
                        logentries.append(Logentry (msg, logger=logger.debug))
                        ru.write_json(agent_cfg_dict, cfg_tmp_file)

                        cf_url = saga.Url("%s://localhost%s" % (LOCAL_SCHEME, cfg_tmp_file))
                        msg = "Copying agent configuration file '%s' to sandbox (%s)." % (cf_url, pilot_sandbox)
                        logentries.append(Logentry (msg, logger=logger.debug))
                        if shared_filesystem:
                            sandbox_tgt.copy(cf_url, agent_cfg_name)

                        # Close agent config file
                        os.close(cfg_tmp_handle)

                        # ------------------------------------------------------
                        # Done with all transfers to pilot sandbox, close handle
                        sandbox_tgt.close()

                        # ------------------------------------------------------
                        # now that the scripts are in place and configured, 
                        # we can launch the agent
                        js_url = saga.Url(js_endpoint)
                        logger.debug ("saga.job.Service ('%s')" % js_url)
                        if  js_url in self._shared_worker_data['job_services'] :
                            js = self._shared_worker_data['job_services'][js_url]
                        else :
                            js = saga.job.Service(js_url, session=self._session)
                            self._shared_worker_data['job_services'][js_url] = js


                        # ------------------------------------------------------
                        # Create SAGA Job description and submit the pilot job

                        jd = saga.job.Description()

                        jd.executable            = "/bin/bash"
                        jd.arguments             = ["-l %s" % BOOTSTRAPPER_SCRIPT, bootstrap_args]
                        jd.working_directory     = saga.Url(pilot_sandbox).path
                        jd.project               = project
                        jd.output                = "bootstrap_1.out"
                        jd.error                 = "bootstrap_1.err"
                        jd.total_cpu_count       = number_cores
                        jd.processes_per_host    = cores_per_node
                        jd.wall_time_limit       = runtime
                        jd.total_physical_memory = memory
                        jd.queue                 = queue
                        jd.candidate_hosts       = candidate_hosts
                        jd.environment           = dict()

                        # TODO: not all files might be required, this also needs to be made conditional
                        if not shared_filesystem:
                            jd.file_transfer = [
                                #'%s > %s' % (bootstrapper_path, os.path.basename(bootstrapper_path)),
                                '%s > %s' % (bootstrapper_path, os.path.join(jd.working_directory, 'input', os.path.basename(bootstrapper_path))),
                                '%s > %s' % (cfg_tmp_file, os.path.join(jd.working_directory, 'input', agent_cfg_name)),
                                #'%s < %s' % ('agent.log', os.path.join(jd.working_directory, 'agent.log')),
                                #'%s < %s' % (os.path.join(jd.working_directory, 'agent.log'), 'agent.log'),
                                #'%s < %s' % ('agent.log', 'agent.log'),
                                #'%s < %s' % (os.path.join(jd.working_directory, 'STDOUT'), 'unit.000000/STDOUT'),
                                #'%s < %s' % (os.path.join(jd.working_directory, 'unit.000000/STDERR'), 'STDERR')
                                #'%s < %s' % ('unit.000000/STDERR', 'unit.000000/STDERR')

                                # TODO: This needs to go into a per pilot directory on the submit node
                                '%s < %s' % ('pilot.0000.log.tgz', 'pilot.0000.log.tgz')
                            ]

                            if stage_sdist:
                                jd.file_transfer.extend([
                                    #'%s > %s' % (rp_sdist_path, os.path.basename(rp_sdist_path)),
                                    '%s > %s' % (rp_sdist_path, os.path.join(jd.working_directory, 'input', os.path.basename(rp_sdist_path))),
                                    #'%s > %s' % (saga.sdist_path, os.path.basename(saga.sdist_path)),
                                    '%s > %s' % (saga.sdist_path, os.path.join(jd.working_directory, 'input', os.path.basename(saga.sdist_path))),
                                    #'%s > %s' % (ru.sdist_path, os.path.basename(ru.sdist_path)),
                                    '%s > %s' % (ru.sdist_path, os.path.join(jd.working_directory, 'input', os.path.basename(ru.sdist_path)))
                                ])

                            if stage_cacerts:
                                jd.file_transfer.append('%s > %s' % (cc_path, os.path.join(jd.working_directory, 'input', os.path.basename(cc_path))))

                            if 'RADICAL_PILOT_PROFILE' in os.environ :
                                # TODO: This needs to go into a per pilot directory on the submit node
                                jd.file_transfer.append('%s < %s' % ('pilot.0000.prof.tgz', 'pilot.0000.prof.tgz'))

                        # Set the SPMD variation only if required
                        if spmd_variation:
                            jd.spmd_variation = spmd_variation

                        if 'RADICAL_PILOT_PROFILE' in os.environ :
                            jd.environment['RADICAL_PILOT_PROFILE'] = 'TRUE'

                        logger.debug("Bootstrap command line: %s %s" % (jd.executable, jd.arguments))

                        msg = "Submitting SAGA job with description: %s" % str(jd.as_dict())
                        logentries.append(Logentry (msg, logger=logger.debug))

                        try:
                            pilotjob = js.create_job(jd)
                        except saga.BadParameter as e:
                            raise ValueError('Pilot submission to %s failed: %s' % (resource_key, e))
                        pilotjob.run()

                        # Clean up agent config file and dir after submission
                        os.unlink(cfg_tmp_file)
                        os.rmdir(cfg_tmp_dir)

                        # do a quick error check
                        if pilotjob.state == saga.FAILED:
                            raise RuntimeError ("SAGA Job state is FAILED.")

                        saga_job_id = pilotjob.id
                        self._shared_worker_data['job_ids'][pilot_id] = [saga_job_id, js_url]

                        msg = "SAGA job submitted with job id %s" % str(saga_job_id)
                        logentries.append(Logentry (msg, logger=logger.debug))

                        #
                        # ------------------------------------------------------

                        log_dicts = list()
                        for le in logentries :
                            log_dicts.append (le.as_dict())

                        # Update the Pilot's state to 'PENDING_ACTIVE' if SAGA job submission was successful.
                        ts = time.time()
                        ret = pilot_col.update(
                            {"_id"  : pilot_id,
                             "state": LAUNCHING},
                            {"$set" : {"state": PENDING_ACTIVE,
                                       "saga_job_id": saga_job_id,
                                       "health_check_enabled": health_check,
                                       "agent_config": agent_cfg_dict},
                             "$push": {"statehistory": {"state": PENDING_ACTIVE, "timestamp": ts}},
                             "$pushAll": {"log": log_dicts}
                            }
                        )

                        if  ret['n'] == 0 :
                            # could not update, probably because the agent is
                            # running already.  Just update state history and
                            # jobid then
                            # FIXME: make sure of the agent state!
                            ret = pilot_col.update(
                                {"_id"  : pilot_id},
                                {"$set" : {"saga_job_id": saga_job_id,
                                           "health_check_enabled": health_check},
                                 "$push": {"statehistory": {"state": PENDING_ACTIVE, "timestamp": ts}},
                                 "$pushAll": {"log": log_dicts}}
                            )


                    except Exception as e:
                        # Update the Pilot's state 'FAILED'.
                        out, err, log = self._get_pilot_logs (pilot_col, pilot_id)
                        ts = time.time()

                        # FIXME: we seem to be unable to bson/json handle saga
                        # log messages containing an '#'.  This shows up here.
                        # Until we find a clean workaround, make log shorter and
                        # rely on saga logging to reveal the problem.
                        msg = "Pilot launching failed! (%s)" % e
                        logentries.append (Logentry (msg))

                        log_dicts    = list()
                        log_messages = list()
                        for le in logentries :
                            log_dicts.append (le.as_dict())
                            log_messages.append (str(le.message))

                        pilot_col.update(
                            {"_id"  : pilot_id,
                             "state": {"$ne" : FAILED}},
                            {"$set" : {
                                "state"   : FAILED,
                                "stdout"  : out,
                                "stderr"  : err,
                                "logfile" : log},
                             "$push": {"statehistory": {"state"    : FAILED,
                                                        "timestamp": ts}},
                             "$pushAll": {"log": log_dicts}}
                        )
                        logger.exception ('\n'.join (log_messages))

        except SystemExit as e :
            logger.exception("pilot launcher thread caught system exit -- forcing application shutdown")
            import thread
            thread.interrupt_main ()

Example 33

Project: saga-python
Source File: go_remote_dir_copy.py
View license
def main():

    tmp_dir = None

    try:

        tmp_dir = tempfile.mkdtemp(prefix='saga-test-', suffix='-%s' % TEST_NAME,
                                   dir=os.path.expanduser('~/tmp'))

        print 'tmpdir: %s' % tmp_dir

        ctx = saga.Context("x509")
        ctx.user_proxy = '/Users/mark/proj/myproxy/xsede.x509'

        session = saga.Session()
        session.add_context(ctx)

        source_url = saga.Url()
        source_url.schema = 'go'
        source_url.host = SOURCE
        source_url.path = tmp_dir

        target_url = saga.Url()
        target_url.schema = 'go'
        target_url.host = TARGET
        target_url.path = os.path.join('~/saga-tests/', os.path.basename(tmp_dir))

        print "Point to local Directory through GO ..."
        d = saga.filesystem.Directory(source_url)
        print "And check ..."
        assert d.is_dir() == True
        assert d.is_file() == False
        assert d.is_link() == False
        d.close()
        print "Point to remote Directory through GO ..."
        d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS)
        print "And check ..."
        assert d.is_dir() == True
        assert d.is_file() == False
        assert d.is_link() == False
        d.close()

        print "Point to local file through GO, before creation ..."
        caught = False
        try:
            saga.filesystem.File(os.path.join(str(source_url), FILE_A_level_0))
        except saga.DoesNotExist:
            caught = True
        assert caught == True

        print "Create actual file ..."
        touch(tmp_dir, FILE_A_level_0)
        print "Try again ..."
        f = saga.filesystem.File(os.path.join(str(source_url), FILE_A_level_0))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy local file to remote, using different filename ..."
        d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS)
        d.copy(os.path.join(str(source_url), FILE_A_level_0), FILE_A_level_0+COPIED_SUFFIX)
        d.close()
        f = saga.filesystem.File(os.path.join(str(target_url), FILE_A_level_0+COPIED_SUFFIX))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy local file to remote, keeping filename in tact ..."
        d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS)
        d.copy(os.path.join(str(source_url), FILE_A_level_0), FILE_A_level_0)
        d.close()
        f = saga.filesystem.File(os.path.join(str(target_url), FILE_A_level_0))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print 'Create file in level 1 ...'
        tree = LEVEL_1
        os.mkdir(os.path.join(tmp_dir, tree))
        touch(os.path.join(tmp_dir, tree), FILE_A_level_1)
        print "Test local file ..."
        f = saga.filesystem.File(os.path.join(str(source_url), tree, FILE_A_level_1))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy local file to remote, keeping filename in tact ..."
        d = saga.filesystem.Directory(os.path.join(str(target_url), tree), flags=saga.filesystem.CREATE_PARENTS)
        d.copy(os.path.join(str(source_url), tree, FILE_A_level_1), FILE_A_level_1)
        d.close()

        print "Test file after transfer ..."
        f = saga.filesystem.File(os.path.join(str(target_url), tree, FILE_A_level_1))
        assert f.is_file() == True
        assert f.is_dir() == False
        assert f.is_link() == False
        f.close()

        print "Copy non-existent local file to remote, keeping filename in tact ..."
        d = saga.filesystem.Directory(str(target_url), flags=saga.filesystem.CREATE_PARENTS)
        try:
            d.copy(os.path.join(str(source_url), NON_EXISTING_FILE), NON_EXISTING_FILE)
        except saga.DoesNotExist:
            caught = True
        assert caught == True

        print "Test file after (non-)transfer ..."
        caught = False
        try:
            saga.filesystem.File(os.path.join(str(target_url), NON_EXISTING_FILE))
        except saga.DoesNotExist:
            caught = True
        assert caught == True

        # destination = "go://gridftp.stampede.tacc.xsede.org/~/tmp/"
        # #destination = "go://oasis-dm.sdsc.xsede.org/~/tmp/"
        # #destination = "go://ncsa#BlueWaters/~/tmp/"
        # #destination = "go://marksant#netbook/Users/mark/tmp/go/"
        # src_filename = "my_file"
        # dst_filename = "my_file_"
        # rt_filename = "my_file__"
        #
        # # open home directory on a remote machine
        # source_dir = saga.filesystem.Directory(source)
        #
        # # copy .bash_history to /tmp/ on the local machine
        # source_dir.copy(src_filename, os.path.join(destination, dst_filename))
        #
        # # list 'm*' in local /tmp/ directory
        # dest_dir = saga.filesystem.Directory(destination)
        # for entry in dest_dir.list(pattern='%s*' % src_filename[0]):
        #     print entry
        #
        # dest_file = saga.filesystem.File(os.path.join(destination, dst_filename))
        # assert dest_file.is_file() == True
        # assert dest_file.is_link() == False
        # assert dest_file.is_dir() == False
        # print 'Size: %d' % dest_file.get_size()
        #
        # dest_file.copy(source)
        #
        # dest_file.copy(os.path.join(source+'broken', rt_filename))

        print "Before return 0"
        return 0

    except saga.SagaException as ex:
        # Catch all saga exceptions
        print "An exception occurred: (%s) %s " % (ex.type, (str(ex)))
        # Trace back the exception. That can be helpful for debugging.
        print " \n*** Backtrace:\n %s" % ex.traceback

        print "before return -1"
        return -1

    finally:

        print "and finally ..."

        if CLEANUP and tmp_dir:
            shutil.rmtree(tmp_dir)

Example 34

View license
def main():
    """Run the main app.

    This application will create all Python wheel files from within an
    environment.  The purpose is to create pre-compiled python wheels from
    the RPC playbooks.
    """

    # Parse input arguments
    user_args = _user_args()

    # Load the logging
    _logging = logger.LogSetup(debug_logging=user_args['debug'])
    if user_args['quiet'] is True or user_args['debug'] is False:
        stream = False
    else:
        stream = True

    _logging.default_logger(
        name='rpc_wheel_builder',
        enable_stream=stream
    )

    global LOG
    LOG = logger.getLogger(name='rpc_wheel_builder')

    # Create the output path
    output_path = _get_abs_path(path=user_args['output'])
    LOG.info('Getting output path')
    _mkdirs(path=output_path)

    # Create the build path
    LOG.info('Getting build path')
    indicator_kwargs = {
        'debug': user_args['debug'],
        'quiet': user_args['quiet'],
        'note': 'Gather dependencies... '
    }
    with IndicatorThread(**indicator_kwargs):
        if user_args['build_dir'] is not None:
            build_path = _get_abs_path(path=user_args['build_dir'])
            _mkdirs(path=build_path)
        else:
            build_path = tempfile.mkdtemp(prefix='rpc_wheels_build_')
            pre_input = user_args['pre_input']
            if pre_input:
                pre_input_path = _get_abs_path(path=user_args['pre_input'])
                with open(pre_input_path, 'rb') as f:
                    global PYTHON_PACKAGES
                    PYTHON_PACKAGES = json.loads(f.read())
            else:
                # Get the input path
                LOG.info('Getting input path')
                new_setup(
                    user_args=user_args,
                    input_path=_get_abs_path(path=user_args['input'])
                )

    indicator_kwargs['note'] = 'Building wheels... '
    with IndicatorThread(**indicator_kwargs):
        # Create all of the python package wheels
        make_wheels(
            wheel_dir=output_path,
            build_dir=build_path
        )

    indicator_kwargs['note'] = 'Generating build log... '
    with IndicatorThread(**indicator_kwargs):
        # Get a timestamp and create a report file
        utctime = datetime.datetime.utcnow()
        utctime = utctime.strftime("%Y%m%d_%H%M%S")
        backup_name = '%s-build-report-%s.json' % (
            user_args['release'],
            utctime
        )
        output_report_file = os.path.join(
            output_path,
            'json-reports',
            backup_name
        )

        # Make the directory if needed
        _mkdirs(path=os.path.dirname(output_report_file))

        # Generate a timestamped report file
        LOG.info('Generating packaging report [ %s ]', output_report_file)
        with open(output_report_file, 'wb') as f:
            f.write(
                json.dumps(
                    PYTHON_PACKAGES,
                    indent=2,
                    sort_keys=True
                )
            )

    # If link_dir is defined create a link to all built wheels.
    links_path = user_args.get('link_dir')
    if links_path:
        indicator_kwargs['note'] = 'Creating file links... '
        with IndicatorThread(**indicator_kwargs):
            links_path = _get_abs_path(path=links_path)
            LOG.info('Creating Links at [ %s ]', links_path)
            _mkdirs(path=links_path)

            # Change working directory.
            os.chdir(links_path)

            # Create all the links
            for inode in PYTHON_PACKAGES['built_files']:
                try:
                    dest_link = os.path.join(links_path, inode)

                    # Remove the destination inode if it exists
                    if os.path.exists(dest_link):
                        os.remove(dest_link)

                    # Create the link using the relative path
                    os.symlink(os.path.relpath(
                        os.path.join(output_path, inode)), dest_link
                    )
                except OSError as exp:
                    LOG.warn(
                        'Error Creating Link: [ %s ] Error: [ %s ]',
                        inode,
                        exp
                    )
                else:
                    LOG.debug('Link Created: [ %s ]', dest_link)

    # if git_repos was defined save all of the sources to the defined location
    git_repos_path = user_args.get('git_repos')
    if git_repos_path:
        indicator_kwargs['note'] = 'Storing updated git sources...'
        with IndicatorThread(**indicator_kwargs):
            LOG.info('Updating git sources [ %s ]', links_path)
            _store_git_repos(_get_abs_path(path=git_repos_path))

Example 35

Project: imagefactory
Source File: Docker.py
View license
    def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
        self.log.debug("builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process")
        tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
        # At this point our input base_image is available as builder.base_image.data
        # We simply mount it up in libguestfs and tar out the results as builder.target_image.data
        wrap_metadata = parameter_cast_to_bool(parameters.get('create_docker_metadata', True))
        compress_type = parameters.get('compress', None)
        if compress_type:
            if compress_type in self.compress_commands.keys():
                compress_command = self.compress_commands[compress_type]
            else:
                raise Exception("Passed unknown compression type (%s) for Docker plugin" % (compress_type))
        else:
            compress_command = None
        guestfs_handle = launch_inspect_and_mount(builder.base_image.data, readonly = True)
        storagedir = os.path.dirname(builder.target_image.data)

        # guestfs lets us mount locally via the API, which is cool, but requires that
        # we call a blocking function to activate the mount, which requires a thread
        # We also need a temp dir to mount it to - do our best to clean up when things
        # go wrong
        tempdir = None
        fuse_thread = None
        try:
            tempdir = tempfile.mkdtemp(dir=storagedir)
            self.log.debug("Mounting input image locally at (%s)" % (tempdir))
            guestfs_handle.mount_local(tempdir)
            def _run_guestmount(g):
                g.mount_local_run()
            self.log.debug("Launching mount_local_run thread")
            fuse_thread = threading.Thread(group=None, target=_run_guestmount, args=(guestfs_handle,))
            fuse_thread.start()
            self.log.debug("Creating tar of entire image")
            # NOTE - we used to capture xattrs here but have reverted the change for now
            #        as SELinux xattrs break things in unexpected ways and the tar feature
            #        to allow selective inclusion is broken
            # TODO: Follow up with tar maintainers and docker image creators to find out what
            #       if any xattrs we really need to capture here
            tarcmd = [ 'tar',  '-cf', builder.target_image.data, '-C', tempdir ]
            # User may pass in a comma separated list of additional options to the tar command
            tar_options = parameters.get('tar_options', None)
            if tar_options:
                tar_options_list=tar_options.split(',')
                for option in tar_options_list:
                    tarcmd.append(option.strip())
            # User may pass in a comma separated list of excludes to override this
            # Default to ./etc/fstab as many people have complained this does not belong in Docker images
            tar_excludes = parameters.get('tar_excludes', './etc/fstab').split(',')
            for exclude in tar_excludes:
                tarcmd.append('--exclude=%s' % (exclude.strip()))
            tarcmd.append('./')
            self.log.debug("Command: %s" % (str(tarcmd)))
            subprocess.check_call(tarcmd)
            if wrap_metadata:
                self.log.debug("Estimating size of tar contents to include in Docker metadata")
                size = 0
                for root, dirs, files in os.walk(tempdir):
                    for name in files:
                        fp = os.path.join(root,name)
                        if os.path.isfile(fp) and not os.path.islink(fp):
                            size += os.path.getsize(fp)
                self.log.debug("Total real file content size (%d)" % (size))
        except Exception, e:
            self.log.exception(e)
            raise
        finally:
            if tempdir:
                try:
                    subprocess.check_call( ['umount', '-f', tempdir] )
                    os.rmdir(tempdir)
                except Exception, e:
                    self.log.exception(e)
                    self.log.error("WARNING: Could not unmount guest at (%s) - may still be mounted" % (tempdir) )
            if fuse_thread:
                fuse_thread.join(30.0)
                if fuse_thread.isAlive():
                    self.log.error("Guestfs local mount thread is still active - FUSE filesystem still mounted at (%s)" % (tempdir) )

        if wrap_metadata:
            # Get any parameters and if they are not set, create our defaults
            # Docker image names should not have uppercase characters 
            # https://fedorahosted.org/cloud/ticket/131
            repository = parameters.get('repository',tdlobj.name).lower()
            tag = parameters.get('tag','latest')
            docker_image_id = parameters.get('docker_image_id', self._generate_docker_id())
            cmd = parameters.get('docker_cmd', 'null')
            env = parameters.get('docker_env', 'null')
            label = parameters.get('docker_label', 'null')
            rdict = { repository: { tag: docker_image_id } }
                       
            dockerversion = parameters.get('dockerversion', '0.11.1')
            if not dockerversion in self.docker_templates_dict:
                raise Exception("No docker JSON template available for specified docker version (%s)" % (dockerversion))
            docker_json_template=self.docker_templates_dict[dockerversion]

            arch = tdlobj.arch
            if arch == "x86_64":
                arch = "amd64"
            elif arch == "armv7hl":
                arch = "armhfp"
            tdict = { }
            tdict['commentstring'] = parameters.get('comment', 'Created by Image Factory')
            tdict['os'] = parameters.get('os', 'linux')
            tdict['createdtime'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
            tdict['arch'] = arch
            tdict['idstring'] = docker_image_id
            tdict['cmd'] = cmd
            tdict['env'] = env
            tdict['label'] = label
            tdict['size'] = size

            image_json = docker_json_template.format(**tdict) 

            # v2 images
            # TODO: Something significantly less hacky looking.....
            if dockerversion == "1.10.1":
                shasum = self._file_sha256(builder.target_image.data)
                image_v2_config = json.loads(image_json)
                # The new top level JSON file is a light modification of the layer JSON
                del image_v2_config['Size']
                del image_v2_config['id']
                image_v2_config['history'] = [ { 'comment': image_v2_config['comment'],
                                               'created': image_v2_config['created'] } ]
                image_v2_config['rootfs'] = { 'diff_ids': [ "sha256:%s" % (shasum) ],
                                            'type': 'layers' }

                # Docker wants this config file to be named after its own sha256 sum
                image_v2_config_id = hashlib.sha256(json.dumps(image_v2_config)).hexdigest()

                image_v2_manifest = [ { "Config": "%s.json" % (image_v2_config_id),
                                        "Layers": [ "%s/layer.tar" % (docker_image_id) ],
                                        "RepoTags": [ "%s:%s" % (repository, tag) ] } ]

            # Create directory
            storagedir = os.path.dirname(builder.target_image.data)
            tempdir = None
            try:
                tempdir = tempfile.mkdtemp(dir=storagedir)
                self.log.debug("Creating docker image directory structure in (%s)" % (tempdir))

                repositories_path = os.path.join(tempdir,'repositories')
                repositories = open(repositories_path,"w")
                json.dump(rdict, repositories)
                repositories.close()

                if dockerversion == "1.10.1":
                    config_path = os.path.join(tempdir, '%s.json' % (image_v2_config_id))
                    config = open(config_path, "w")
                    json.dump(image_v2_config, config)
                    config.close()

                    manifest_path = os.path.join(tempdir, 'manifest.json')
                    manifest = open(manifest_path, "w")
                    json.dump(image_v2_manifest, manifest)
                    manifest.close()

                imagedir = os.path.join(tempdir, docker_image_id)
                os.mkdir(imagedir)

                jsonfile_path = os.path.join(imagedir,'json')
                jsonfile = open(jsonfile_path,'w')
                jsonfile.write(image_json)
                jsonfile.close()

                versionfile_path = os.path.join(imagedir,'VERSION')
                versionfile = open(versionfile_path, 'w')
                # TODO - Track version developments and compatibility
                versionfile.write("1.0")
                versionfile.close()

                layerfile_path = os.path.join(imagedir,'layer.tar')
                shutil.move(builder.target_image.data, layerfile_path)

                outtar = tarfile.TarFile(name=builder.target_image.data, mode="w")
                # It turns out that in at least some configurations or versions, Docker will
                # complain if the repositories file is not the last file in the archive
                # we add our single image directory first and then the repositories file to
                # avoid this
                outtar.add(imagedir, arcname=docker_image_id)
                outtar.add(repositories_path, arcname='repositories')
                if dockerversion == "1.10.1":
                    outtar.add(config_path, arcname='%s.json' % (image_v2_config_id))
                    outtar.add(manifest_path, arcname='manifest.json')
                outtar.close()
            finally:
                if tempdir:
                    try:
                        shutil.rmtree(tempdir)
                    except:
                        self.log.warning("Error encountered when removing temp dir (%s) - may not have been deleted" % (tempdir))

        if compress_command:
            self.log.debug("Compressing tar file using %s" % (compress_type))
            rawimage =  builder.target_image.data
            compimage =  builder.target_image.data + ".tmp.%s" % (compress_type)
            result = subprocess.call(compress_command % ( rawimage, compimage), shell = True)
            if result:
                raise Exception("Compression of image failed")
            self.log.debug("Compression complete, replacing original")
            os.unlink(rawimage)
            os.rename(compimage, rawimage)
            self.log.debug("Done")
        return False

Example 36

Project: bleachbit
Source File: Cleaner.py
View license
    def get_commands(self, option_id):
        # This variable will collect fully expanded file names, and
        # at the end of this function, they will be checked they exist
        # and processed through Command.Delete().
        files = []

        # cache
        if 'posix' == os.name and 'cache' == option_id:
            dirname = os.path.expanduser("~/.cache/")
            for filename in children_in_directory(dirname, True):
                if self.whitelisted(filename):
                    continue
                files += [filename]

        # custom
        if 'custom' == option_id:
            for (c_type, c_path) in options.get_custom_paths():
                if 'file' == c_type:
                    files += [c_path]
                elif 'folder' == c_type:
                    files += [c_path]
                    for path in children_in_directory(c_path, True):
                        files += [path]
                else:
                    raise RuntimeError(
                        'custom folder has invalid type %s' % c_type)

        # menu
        menu_dirs = ['~/.local/share/applications',
                     '~/.config/autostart',
                     '~/.gnome/apps/',
                     '~/.gnome2/panel2.d/default/launchers',
                     '~/.gnome2/vfolders/applications/',
                     '~/.kde/share/apps/RecentDocuments/',
                     '~/.kde/share/mimelnk',
                     '~/.kde/share/mimelnk/application/ram.desktop',
                     '~/.kde2/share/mimelnk/application/',
                     '~/.kde2/share/applnk']

        if 'posix' == os.name and 'desktop_entry' == option_id:
            for dirname in menu_dirs:
                for filename in [fn for fn in children_in_directory(dirname, False)
                                 if fn.endswith('.desktop')]:
                    if Unix.is_broken_xdg_desktop(filename):
                        yield Command.Delete(filename)

        # unwanted locales
        if 'posix' == os.name and 'localizations' == option_id:
            for path in Unix.locales.localization_paths(locales_to_keep=options.get_languages()):
                if os.path.isdir(path):
                    for f in FileUtilities.children_in_directory(path, True):
                        yield Command.Delete(f)
                yield Command.Delete(path)

        # Windows logs
        if 'nt' == os.name and 'logs' == option_id:
            paths = (
                '$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\*.log',
                '$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\user.dmp',
                '$LocalAppData\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
                '$LocalAppData\\Microsoft\\Windows\WER\\ReportQueue\\*\\*',
                '$programdata\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
                '$programdata\\Microsoft\\Windows\\WER\\ReportQueue\\*\\*',
                '$localappdata\\Microsoft\\Internet Explorer\\brndlog.bak',
                '$localappdata\\Microsoft\\Internet Explorer\\brndlog.txt',
                '$windir\\*.log',
                '$windir\\imsins.BAK',
                '$windir\\OEWABLog.txt',
                '$windir\\SchedLgU.txt',
                '$windir\\ntbtlog.txt',
                '$windir\\setuplog.txt',
                '$windir\\REGLOCS.OLD',
                '$windir\\Debug\\*.log',
                '$windir\\Debug\\Setup\\UpdSh.log',
                '$windir\\Debug\\UserMode\\*.log',
                '$windir\\Debug\\UserMode\\ChkAcc.bak',
                '$windir\\Debug\\UserMode\\userenv.bak',
                '$windir\\Microsoft.NET\Framework\*\*.log',
                '$windir\\pchealth\\helpctr\\Logs\\hcupdate.log',
                '$windir\\security\\logs\\*.log',
                '$windir\\security\\logs\\*.old',
                '$windir\\SoftwareDistribution\\*.log',
                '$windir\\SoftwareDistribution\\DataStore\\Logs\\*',
                '$windir\\system32\\TZLog.log',
                '$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.bak',
                '$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.txt',
                '$windir\\system32\\LogFiles\\AIT\\AitEventLog.etl.???',
                '$windir\\system32\\LogFiles\\Firewall\\pfirewall.log*',
                '$windir\\system32\\LogFiles\\Scm\\SCM.EVM*',
                '$windir\\system32\\LogFiles\\WMI\\Terminal*.etl',
                '$windir\\system32\\LogFiles\\WMI\\RTBackup\EtwRT.*etl',
                '$windir\\system32\\wbem\\Logs\\*.lo_',
                '$windir\\system32\\wbem\\Logs\\*.log', )

            for path in paths:
                expanded = expandvars(path)
                for globbed in glob.iglob(expanded):
                    files += [globbed]

        # memory
        if sys.platform.startswith('linux') and 'memory' == option_id:
            yield Command.Function(None, Memory.wipe_memory, _('Memory'))

        # memory dump
        # how to manually create this file
        # http://www.pctools.com/guides/registry/detail/856/
        if 'nt' == os.name and 'memory_dump' == option_id:
            fname = expandvars('$windir\\memory.dmp')
            if os.path.exists(fname):
                files += [fname]
            for fname in glob.iglob(expandvars('$windir\\Minidump\\*.dmp')):
                files += [fname]

        # most recently used documents list
        if 'posix' == os.name and 'recent_documents' == option_id:
            files += [os.path.expanduser("~/.recently-used")]
            # GNOME 2.26 (as seen on Ubuntu 9.04) will retain the list
            # in memory if it is simply deleted, so it must be shredded
            # (or at least truncated).
            #
            # GNOME 2.28.1 (Ubuntu 9.10) and 2.30 (10.04) do not re-read
            # the file after truncation, but do re-read it after
            # shredding.
            #
            # https://bugzilla.gnome.org/show_bug.cgi?id=591404
            for pathname in ["~/.recently-used.xbel", "~/.local/share/recently-used.xbel"]:
                pathname = os.path.expanduser(pathname)
                if os.path.lexists(pathname):
                    yield Command.Shred(pathname)
                    if HAVE_GTK:
                        gtk.RecentManager().purge_items()

        if 'posix' == os.name and 'rotated_logs' == option_id:
            for path in Unix.rotated_logs():
                yield Command.Delete(path)

        # temporary files
        if 'posix' == os.name and 'tmp' == option_id:
            dirnames = ['/tmp', '/var/tmp']
            for dirname in dirnames:
                for path in children_in_directory(dirname, True):
                    is_open = FileUtilities.openfiles.is_open(path)
                    ok = not is_open and os.path.isfile(path) and \
                        not os.path.islink(path) and \
                        FileUtilities.ego_owner(path) and \
                        not self.whitelisted(path)
                    if ok:
                        yield Command.Delete(path)

        # temporary files
        if 'nt' == os.name and 'tmp' == option_id:
            dirname = expandvars(
                "$USERPROFILE\\Local Settings\\Temp\\")
            # whitelist the folder %TEMP%\Low but not its contents
            # https://bugs.launchpad.net/bleachbit/+bug/1421726
            low = os.path.join(dirname, 'low').lower()
            for filename in children_in_directory(dirname, True):
                if not low == filename.lower():
                    yield Command.Delete(filename)
            dirname = expandvars("$windir\\temp\\")
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)

        # trash
        if 'posix' == os.name and 'trash' == option_id:
            dirname = os.path.expanduser("~/.Trash")
            for filename in children_in_directory(dirname, False):
                yield Command.Delete(filename)
            # fixme http://www.ramendik.ru/docs/trashspec.html
            # http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html
            # ~/.local/share/Trash
            # * GNOME 2.22, Fedora 9
            # * KDE 4.1.3, Ubuntu 8.10
            dirname = os.path.expanduser("~/.local/share/Trash/files")
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)
            dirname = os.path.expanduser("~/.local/share/Trash/info")
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)
            dirname = os.path.expanduser("~/.local/share/Trash/expunged")
            # [email protected] tells me that the trash
            # backend puts files in here temporary, but in some situations
            # the files are stuck.
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)

        # clipboard
        if HAVE_GTK and 'clipboard' == option_id:
            def clear_clipboard():
                gtk.gdk.threads_enter()
                clipboard = gtk.clipboard_get()
                clipboard.set_text("")
                gtk.gdk.threads_leave()
                return 0
            yield Command.Function(None, clear_clipboard, _('Clipboard'))

        # overwrite free space
        shred_drives = options.get_list('shred_drives')
        if 'free_disk_space' == option_id and shred_drives:
            for pathname in shred_drives:
                # TRANSLATORS: 'Free' means 'unallocated.'
                # %s expands to a path such as C:\ or /tmp/
                display = _("Overwrite free disk space %s") % pathname

                def wipe_path_func():
                    for ret in FileUtilities.wipe_path(pathname, idle=True):
                        # Yield control to GTK idle because this process
                        # is very slow.  Also display progress.
                        yield ret
                    yield 0
                yield Command.Function(None, wipe_path_func, display)

        # MUICache
        if 'nt' == os.name and 'muicache' == option_id:
            keys = (
                'HKCU\\Software\\Microsoft\\Windows\\ShellNoRoam\\MUICache',
                'HKCU\\Software\\Classes\\Local Settings\\Software\\Microsoft\\Windows\\Shell\\MuiCache')
            for key in keys:
                yield Command.Winreg(key, None)

        # prefetch
        if 'nt' == os.name and 'prefetch' == option_id:
            for path in glob.iglob(expandvars('$windir\\Prefetch\\*.pf')):
                yield Command.Delete(path)

        # recycle bin
        if 'nt' == os.name and 'recycle_bin' == option_id:
            # This method allows shredding
            for path in Windows.get_recycle_bin():
                yield Command.Delete(path)
            # If there were any files deleted, Windows XP will show the
            # wrong icon for the recycle bin indicating it is not empty.
            # The icon will be incorrect until logging in to Windows again
            # or until it is emptied using the Windows API call for emptying
            # the recycle bin.

            # Windows 10 refreshes the recycle bin icon when the user
            # opens the recycle bin folder.

            # This is a hack to refresh the icon.
            import tempfile
            tmpdir = tempfile.mkdtemp()
            Windows.move_to_recycle_bin(tmpdir)
            try:
                Windows.empty_recycle_bin(None, True)
            except:
                logger = logging.getLogger(__name__)
                logger.info('error in empty_recycle_bin()', exc_info=True)

        # Windows Updates
        if 'nt' == os.name and 'updates' == option_id:
            for wu in Windows.delete_updates():
                yield wu

        # return queued files
        for filename in files:
            if os.path.lexists(filename):
                yield Command.Delete(filename)

Example 37

Project: livecd-tools
Source File: mkbiarch.py
View license
def main():


    def usage():
        usage = 'usage: mkbiarch.py <x86 Live ISO File> <x64 Live ISO File> <Target Multi Arch Image File>'
        print >> sys.stdout, usage


    def mount(src, dst, options=None):
        if os.path.exists(src):
            if not os.path.exists(dst):
                os.makedir(dst)
            if options is None:
                args = ("/bin/mount", src, dst)
            else:
                args = ("/bin/mount", options, src, dst)
            rc = subprocess.call(args)
            return rc
        return


    def umount(src):
        if os.path.exists(src):
                args = ("/bin/umount", src)
                rc = subprocess.call(args)
                return rc
        return


    def copy(src, dst):
        if os.path.exists(src):
            if not os.path.exists(dst):
                if not os.path.isfile(src):
                    mkdir(dst)
            shutil.copy(src, dst)


    def move(src, dst):
        if os.path.exists(src):
            shutil.move(src, dst)

    def mkdir(dir=None):
        if dir is None:
            tmp = tempfile.mkdtemp()
            return tmp
        else:
            args = ("/bin/mkdir", "-p", dir)
            rc = subprocess.call(args)


    def losetup(src, dst, offset=None):
        if os.path.exists(src):
            if os.path.exists(dst):
                if offset is None:
                    args = ("/sbin/losetup", src, dst)
                else:
                    args = ("/sbin/losetup", "-o", str(offset), src, dst)
                rc = subprocess.call(args)
        return rc

    def lounset(device):
        args = ("/sbin/losetup", "-d", device)
        rc = subprocess.call(args) 

    def null():
        fd = open(os.devnull, 'w')
        return fd

    def dd(file, target):
        args = ("/bin/dd", "if=%s"%file, "of=%s"%target)
        rc = subprocess.call(args)

    def lo():
        args = ("/sbin/losetup", "--find")
        rc = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].rstrip()
        return rc

    def lodev(file):
        args = ("/sbin/losetup", "-j", file)
        rc = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].split(":")
        return rc[0]


    def mkimage(bs, count):
        tmp = tempfile.mkstemp()
        image = tmp[1]
        args = ("/bin/dd", "if=/dev/zero",
                 "of=%s"%image, "bs=%s"%bs,
                 "count=%s"%count)
        rc = subprocess.call(args)
        return image


    def size(ent):
        if os.path.exists(ent):
            return os.stat(ent).st_size

    def bs(size):
        return size / 2048

    def partition(device):
        dev = parted.Device(path=device)
        disk = parted.freshDisk(dev, 'msdos')
        constraint = parted.Constraint(device=dev)

        new_geom = parted.Geometry(device=dev,
                                   start=1,
                                   end=(constraint.maxSize - 1))
        filesystem = parted.FileSystem(type="ext2",
                                       geometry=new_geom)
        partition = parted.Partition(disk=disk,
                                     fs=filesystem,
                                     type=parted.PARTITION_NORMAL,
                                     geometry=new_geom)
        constraint = parted.Constraint(exactGeom=new_geom)
        partition.setFlag(parted.PARTITION_BOOT)
        disk.addPartition(partition=partition,
                          constraint=constraint)
        
        disk.commit()

    def format(partition):
        args = ("/sbin/mke2fs", "-j", partition)
        rc = subprocess.call(args)

    def mbr(target):
        mbr = "/usr/share/syslinux/mbr.bin"
        dd(mbr, target)

    def getuuid(device):
        args = ("/sbin/blkid", "-s", "UUID", "-o", "value", device)
        rc = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].rstrip()
        return rc

    def syslinux(multitmp, config, **args):
        arg = ("/sbin/extlinux", "--install", multitmp + "/extlinux/")
        rc = subprocess.call(arg)

        content = """
        default vesamenu.c32
        timeout 100

        menu background splash.jpg
        menu title Welcome to Fedora 13
        menu color border 0 #ffffffff #00000000
        menu color sel 7 #ffffffff #ff000000
        menu color title 0 #ffffffff #00000000
        menu color tabmsg 0 #ffffffff #00000000
        menu color unsel 0 #ffffffff #00000000
        menu color hotsel 0 #ff000000 #ffffffff
        menu color hotkey 7 #ffffffff #ff000000
        menu color timeout_msg 0 #ffffffff #00000000
        menu color timeout 0 #ffffffff #00000000
        menu color cmdline 0 #ffffffff #00000000
        menu hidden
        menu hiddenrow 5

        label Fedora-13-x86
        menu label Fedora-13-x86
        kernel vmlinuz0
        append initrd=initrd0.img root=UUID=%(uuid)s rootfstype=auto ro live_dir=/x86/LiveOS liveimg
        
        label Fedora-13-x64
        menu label Fedora-13-x64
        kernel vmlinuz1
        append initrd=initrd1.img root=UUID=%(uuid)s rootfstype=auto ro live_dir=/x64/LiveOS liveimg
        """ % args
        fd = open(config, 'w')
        fd.write(content)
        fd.close()

    def verify():
        # use md5 module to verify image files
        pass

    def setup(x86, x64, multi):

        sz = size(x86) + size(x64)
        count = bs(sz)
        blsz = str(2048)

        count = count + 102400

        multi = mkimage(blsz, count)    
        losetup(lo(), multi)
 
        mbr(lodev(multi))
        partition(lodev(multi))
 
        lounset(lodev(multi))
     
        losetup(lo(), multi, offset=512)
        format(lodev(multi))

        multitmp = mkdir()
        mount(lodev(multi), multitmp)

        losetup(lo(), x86)
        losetup(lo(), x64)
 
        x86tmp = mkdir()
        x64tmp = mkdir()

        mount(lodev(x86), x86tmp)
        mount(lodev(x64), x64tmp)


        dirs = ("/extlinux/", "/x86/", "/x64/")
        for dir in dirs:
            mkdir(multitmp + dir)
        dirs = ("/x86/", "/x64/")
        for dir in dirs:
            mkdir(multitmp + dir + "/LiveOS/")

        intermediate = tempfile.mkdtemp() # loopdev performance is slow
                                          # copy to here first then back
                                          # to multitmp + dir which is looback also

        imgs = ("squashfs.img", "osmin.img")
        for img in imgs:
            copy(x86tmp + "/LiveOS/" + img, intermediate)
            copy(intermediate + "/" + img, multitmp + "/x86/LiveOS/")
        for img in imgs:
            copy(x64tmp + "/LiveOS/" + img, intermediate)
            copy(intermediate + "/" + img, multitmp + "/x64/LiveOS/")

        for file in os.listdir(x86tmp + "/isolinux/"):
            copy(x86tmp + "/isolinux/" + file, multitmp + "/extlinux/")

        copy(x64tmp + "/isolinux/vmlinuz0", multitmp + "/extlinux/vmlinuz1")
        copy(x64tmp + "/isolinux/initrd0.img", multitmp + "/extlinux/initrd1.img")
            

       
        uuid = getuuid(lodev(multi))

  
        config = (multitmp + "/extlinux/extlinux.conf")
        syslinux(multitmp,
                 config,
                 uuid=uuid)



        umount(x86tmp)
        umount(x64tmp)
        umount(multitmp)

        lounset(lodev(x86))
        lounset(lodev(x64))
        lounset(lodev(multi))

        shutil.rmtree(x86tmp)
        shutil.rmtree(x64tmp)
        shutil.rmtree(multitmp)
        shutil.rmtree(intermediate)   
        


        if os.path.exists(sys.argv[3]):
            os.unlink(sys.argv[3])
        move(multi, sys.argv[3])
 

    def parse(x86, x64, multi):
        for file in x86, x64:
            if os.path.exists(file):
                pass
            else:
                usage()
        if not multi:
            usage()
        setup(x86, x64, multi)





    try: 
        parse(sys.argv[1], sys.argv[2], sys.argv[3])
    except:
        usage()

Example 38

Project: lorax
Source File: __init__.py
View license
    def run(self, dbo, product, version, release, variant="", bugurl="",
            isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None,
            domacboot=True, doupgrade=True, remove_temp=False,
            installpkgs=None,
            size=2,
            add_templates=None,
            add_template_vars=None,
            add_arch_templates=None,
            add_arch_template_vars=None,
            verify=True):

        assert self._configured

        installpkgs = installpkgs or []

        # get lorax version
        try:
            import pylorax.version
        except ImportError:
            vernum = "devel"
        else:
            vernum = pylorax.version.num

        if domacboot:
            try:
                runcmd(["rpm", "-q", "hfsplus-tools"])
            except CalledProcessError:
                logger.critical("you need to install hfsplus-tools to create mac images")
                sys.exit(1)

        # set up work directory
        self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.")
        if not os.path.isdir(self.workdir):
            os.makedirs(self.workdir)

        # set up log directory
        logdir = self.conf.get("lorax", "logdir")
        if not os.path.isdir(logdir):
            os.makedirs(logdir)

        self.init_stream_logging()
        self.init_file_logging(logdir)

        logger.debug("version is %s", vernum)
        logger.debug("using work directory %s", self.workdir)
        logger.debug("using log directory %s", logdir)

        # set up output directory
        self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.")
        if not os.path.isdir(self.outputdir):
            os.makedirs(self.outputdir)
        logger.debug("using output directory %s", self.outputdir)

        # do we have root privileges?
        logger.info("checking for root privileges")
        if not os.geteuid() == 0:
            logger.critical("no root privileges")
            sys.exit(1)

        # is selinux disabled?
        # With selinux in enforcing mode the rpcbind package required for
        # dracut nfs module, which is in turn required by anaconda module,
        # will not get installed, because it's preinstall scriptlet fails,
        # resulting in an incomplete initial ramdisk image.
        # The reason is that the scriptlet runs tools from the shadow-utils
        # package in chroot, particularly groupadd and useradd to add the
        # required rpc group and rpc user. This operation fails, because
        # the selinux context on files in the chroot, that the shadow-utils
        # tools need to access (/etc/group, /etc/passwd, /etc/shadow etc.),
        # is wrong and selinux therefore disallows access to these files.
        logger.info("checking the selinux mode")
        if selinux.is_selinux_enabled() and selinux.security_getenforce():
            logger.critical("selinux must be disabled or in Permissive mode")
            sys.exit(1)

        # do we have a proper dnf base object?
        logger.info("checking dnf base object")
        if not isinstance(dbo, dnf.Base):
            logger.critical("no dnf base object")
            sys.exit(1)
        self.inroot = dbo.conf.installroot
        logger.debug("using install root: %s", self.inroot)

        if not buildarch:
            buildarch = get_buildarch(dbo)

        logger.info("setting up build architecture")
        self.arch = ArchData(buildarch)
        for attr in ('buildarch', 'basearch', 'libdir'):
            logger.debug("self.arch.%s = %s", attr, getattr(self.arch,attr))

        logger.info("setting up build parameters")
        self.product = DataHolder(name=product, version=version, release=release,
                                 variant=variant, bugurl=bugurl, isfinal=isfinal)
        logger.debug("product data: %s", self.product)

        # NOTE: if you change isolabel, you need to change pungi to match, or
        # the pungi images won't boot.
        isolabel = volid or "%s-%s-%s" % (self.product.name, self.product.version, self.arch.basearch)

        if len(isolabel) > 32:
            logger.fatal("the volume id cannot be longer than 32 characters")
            sys.exit(1)

        # NOTE: rb.root = dbo.conf.installroot (== self.inroot)
        rb = RuntimeBuilder(product=self.product, arch=self.arch,
                            dbo=dbo, templatedir=self.templatedir,
                            installpkgs=installpkgs,
                            add_templates=add_templates,
                            add_template_vars=add_template_vars)

        logger.info("installing runtime packages")
        rb.install()

        # write .buildstamp
        buildstamp = BuildStamp(self.product.name, self.product.version,
                                self.product.bugurl, self.product.isfinal, self.arch.buildarch)

        buildstamp.write(joinpaths(self.inroot, ".buildstamp"))

        if self.debug:
            rb.writepkglists(joinpaths(logdir, "pkglists"))
            rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt"))

        logger.info("doing post-install configuration")
        rb.postinstall()

        # write .discinfo
        discinfo = DiscInfo(self.product.release, self.arch.basearch)
        discinfo.write(joinpaths(self.outputdir, ".discinfo"))

        logger.info("backing up installroot")
        installroot = joinpaths(self.workdir, "installroot")
        linktree(self.inroot, installroot)

        logger.info("generating kernel module metadata")
        rb.generate_module_data()

        logger.info("cleaning unneeded files")
        rb.cleanup()

        if verify:
            logger.info("verifying the installroot")
            if not rb.verify():
                sys.exit(1)
        else:
            logger.info("Skipping verify")

        if self.debug:
            rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt"))

        logger.info("creating the runtime image")
        runtime = "images/install.img"
        compression = self.conf.get("compression", "type")
        compressargs = self.conf.get("compression", "args").split()     # pylint: disable=no-member
        if self.conf.getboolean("compression", "bcj"):
            if self.arch.bcj:
                compressargs += ["-Xbcj", self.arch.bcj]
            else:
                logger.info("no BCJ filter for arch %s", self.arch.basearch)
        rb.create_runtime(joinpaths(installroot,runtime),
                          compression=compression, compressargs=compressargs,
                          size=size)
        rb.finished()

        logger.info("preparing to build output tree and boot images")
        treebuilder = TreeBuilder(product=self.product, arch=self.arch,
                                  inroot=installroot, outroot=self.outputdir,
                                  runtime=runtime, isolabel=isolabel,
                                  domacboot=domacboot, doupgrade=doupgrade,
                                  templatedir=self.templatedir,
                                  add_templates=add_arch_templates,
                                  add_template_vars=add_arch_template_vars,
                                  workdir=self.workdir)

        logger.info("rebuilding initramfs images")
        dracut_args = ["--xz", "--install", "/.buildstamp", "--no-early-microcode"]
        anaconda_args = dracut_args + ["--add", "anaconda pollcdrom qemu qemu-net"]

        # ppc64 cannot boot an initrd > 32MiB so remove some drivers
        if self.arch.basearch in ("ppc64", "ppc64le"):
            dracut_args.extend(["--omit-drivers", REMOVE_PPC64_DRIVERS])

            # Only omit dracut modules from the initrd so that they're kept for
            # upgrade.img
            anaconda_args.extend(["--omit", REMOVE_PPC64_MODULES])

        treebuilder.rebuild_initrds(add_args=anaconda_args)

        logger.info("populating output tree and building boot images")
        treebuilder.build()

        # write .treeinfo file and we're done
        treeinfo = TreeInfo(self.product.name, self.product.version,
                            self.product.variant, self.arch.basearch)
        for section, data in treebuilder.treeinfo_data.items():
            treeinfo.add_section(section, data)
        treeinfo.write(joinpaths(self.outputdir, ".treeinfo"))

        # cleanup
        if remove_temp:
            remove(self.workdir)

Example 39

Project: attention-lvcsr
Source File: svhn.py
View license
@check_exists(required_files=FORMAT_1_FILES)
def convert_svhn_format_1(directory, output_directory,
                          output_filename='svhn_format_1.hdf5'):
    """Converts the SVHN dataset (format 1) to HDF5.

    This method assumes the existence of the files
    `{train,test,extra}.tar.gz`, which are accessible through the
    official website [SVHNSITE].

    .. [SVHNSITE] http://ufldl.stanford.edu/housenumbers/

    Parameters
    ----------
    directory : str
        Directory in which input files reside.
    output_directory : str
        Directory in which to save the converted dataset.
    output_filename : str, optional
        Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.

    Returns
    -------
    output_paths : tuple of str
        Single-element tuple containing the path to the converted dataset.

    """
    try:
        output_path = os.path.join(output_directory, output_filename)
        h5file = h5py.File(output_path, mode='w')
        TMPDIR = tempfile.mkdtemp()

        # Every image has three channels (RGB) and variable height and width.
        # It features a variable number of bounding boxes that identify the
        # location and label of digits. The bounding box location is specified
        # using the x and y coordinates of its top left corner along with its
        # width and height.
        BoundingBoxes = namedtuple(
            'BoundingBoxes', ['labels', 'heights', 'widths', 'lefts', 'tops'])
        sources = ('features',) + tuple('bbox_{}'.format(field)
                                        for field in BoundingBoxes._fields)
        source_dtypes = dict([(source, 'uint8') for source in sources[:2]] +
                             [(source, 'uint16') for source in sources[2:]])
        source_axis_labels = {
            'features': ('channel', 'height', 'width'),
            'bbox_labels': ('bounding_box', 'index'),
            'bbox_heights': ('bounding_box', 'height'),
            'bbox_widths': ('bounding_box', 'width'),
            'bbox_lefts': ('bounding_box', 'x'),
            'bbox_tops': ('bounding_box', 'y')}

        # The dataset is split into three sets: the training set, the test set
        # and an extra set of examples that are somewhat less difficult but
        # can be used as extra training data. These sets are stored separately
        # as 'train.tar.gz', 'test.tar.gz' and 'extra.tar.gz'. Each file
        # contains a directory named after the split it stores. The examples
        # are stored in that directory as PNG images. The directory also
        # contains a 'digitStruct.mat' file with all the bounding box and
        # label information.
        splits = ('train', 'test', 'extra')
        file_paths = dict(zip(splits, FORMAT_1_FILES))
        for split, path in file_paths.items():
            file_paths[split] = os.path.join(directory, path)
        digit_struct_paths = dict(
            [(split, os.path.join(TMPDIR, split, 'digitStruct.mat'))
             for split in splits])

        # We first extract the data files in a temporary directory. While doing
        # that, we also count the number of examples for each split. Files are
        # extracted individually, which allows to display a progress bar. Since
        # the splits will be concatenated in the HDF5 file, we also compute the
        # start and stop intervals of each split within the concatenated array.
        def extract_tar(split):
            with tarfile.open(file_paths[split], 'r:gz') as f:
                members = f.getmembers()
                num_examples = sum(1 for m in members if '.png' in m.name)
                progress_bar_context = progress_bar(
                    name='{} file'.format(split), maxval=len(members),
                    prefix='Extracting')
                with progress_bar_context as bar:
                    for i, member in enumerate(members):
                        f.extract(member, path=TMPDIR)
                        bar.update(i)
            return num_examples

        examples_per_split = OrderedDict(
            [(split, extract_tar(split)) for split in splits])
        cumulative_num_examples = numpy.cumsum(
            [0] + list(examples_per_split.values()))
        num_examples = cumulative_num_examples[-1]
        intervals = zip(cumulative_num_examples[:-1],
                        cumulative_num_examples[1:])
        split_intervals = dict(zip(splits, intervals))

        # The start and stop indices are used to create a split dict that will
        # be parsed into the split array required by the H5PYDataset interface.
        # The split dict is organized as follows:
        #
        #     dict(split -> dict(source -> (start, stop)))
        #
        split_dict = OrderedDict([
            (split, OrderedDict([(s, split_intervals[split])
                                 for s in sources]))
            for split in splits])
        h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)

        # We then prepare the HDF5 dataset. This involves creating datasets to
        # store data sources and datasets to store auxiliary information
        # (namely the shapes for variable-length axes, and labels to indicate
        # what these variable-length axes represent).
        def make_vlen_dataset(source):
            # Create a variable-length 1D dataset
            dtype = h5py.special_dtype(vlen=numpy.dtype(source_dtypes[source]))
            dataset = h5file.create_dataset(
                source, (num_examples,), dtype=dtype)
            # Create a dataset to store variable-length shapes.
            axis_labels = source_axis_labels[source]
            dataset_shapes = h5file.create_dataset(
                '{}_shapes'.format(source), (num_examples, len(axis_labels)),
                dtype='uint16')
            # Create a dataset to store labels for variable-length axes.
            dataset_vlen_axis_labels = h5file.create_dataset(
                '{}_vlen_axis_labels'.format(source), (len(axis_labels),),
                dtype='S{}'.format(
                    numpy.max([len(label) for label in axis_labels])))
            # Fill variable-length axis labels
            dataset_vlen_axis_labels[...] = [
                label.encode('utf8') for label in axis_labels]
            # Attach auxiliary datasets as dimension scales of the
            # variable-length 1D dataset. This is in accordance with the
            # H5PYDataset interface.
            dataset.dims.create_scale(dataset_shapes, 'shapes')
            dataset.dims[0].attach_scale(dataset_shapes)
            dataset.dims.create_scale(dataset_vlen_axis_labels, 'shape_labels')
            dataset.dims[0].attach_scale(dataset_vlen_axis_labels)
            # Tag fixed-length axis with its label
            dataset.dims[0].label = 'batch'

        for source in sources:
            make_vlen_dataset(source)

        # The "fun" part begins: we extract the bounding box and label
        # information contained in 'digitStruct.mat'. This is a version 7.3
        # Matlab file, which uses HDF5 under the hood, albeit with a very
        # convoluted layout.
        def get_boxes(split):
            boxes = []
            with h5py.File(digit_struct_paths[split], 'r') as f:
                bar_name = '{} digitStruct'.format(split)
                bar_maxval = examples_per_split[split]
                with progress_bar(bar_name, bar_maxval) as bar:
                    for image_number in range(examples_per_split[split]):
                        # The 'digitStruct' group is the main group of the HDF5
                        # file. It contains two datasets: 'bbox' and 'name'.
                        # The 'name' dataset isn't of interest to us, as it
                        # stores file names and there's already a one-to-one
                        # mapping between row numbers and image names (e.g.
                        # row 0 corresponds to '1.png', row 1 corresponds to
                        # '2.png', and so on).
                        main_group = f['digitStruct']
                        # The 'bbox' dataset contains the bounding box and
                        # label information we're after. It has as many rows
                        # as there are images, and one column. Elements of the
                        # 'bbox' dataset are object references that point to
                        # (yet another) group that contains the information
                        # for the corresponding image.
                        image_reference = main_group['bbox'][image_number, 0]

                        # There are five datasets contained in that group:
                        # 'label', 'height', 'width', 'left' and 'top'. Each of
                        # those datasets has as many rows as there are bounding
                        # boxes in the corresponding image, and one column.
                        def get_dataset(name):
                            return main_group[image_reference][name][:, 0]
                        names = ('label', 'height', 'width', 'left', 'top')
                        datasets = dict(
                            [(name, get_dataset(name)) for name in names])

                        # If there is only one bounding box, the information is
                        # stored directly in the datasets. If there are
                        # multiple bounding boxes, elements of those datasets
                        # are object references pointing to 1x1 datasets that
                        # store the information (fortunately, it's the last
                        # hop we need to make).
                        def get_elements(dataset):
                            if len(dataset) > 1:
                                return [int(main_group[reference][0, 0])
                                        for reference in dataset]
                            else:
                                return [int(dataset[0])]
                        # Names are pluralized in the BoundingBox named tuple.
                        kwargs = dict(
                            [(name + 's', get_elements(dataset))
                             for name, dataset in iteritems(datasets)])
                        boxes.append(BoundingBoxes(**kwargs))
                        if bar:
                            bar.update(image_number)
            return boxes

        split_boxes = dict([(split, get_boxes(split)) for split in splits])

        # The final step is to fill the HDF5 file.
        def fill_split(split, bar=None):
            for image_number in range(examples_per_split[split]):
                image_path = os.path.join(
                    TMPDIR, split, '{}.png'.format(image_number + 1))
                image = numpy.asarray(
                    Image.open(image_path)).transpose(2, 0, 1)
                bounding_boxes = split_boxes[split][image_number]
                num_boxes = len(bounding_boxes.labels)
                index = image_number + split_intervals[split][0]

                h5file['features'][index] = image.flatten()
                h5file['features'].dims[0]['shapes'][index] = image.shape
                for field in BoundingBoxes._fields:
                    name = 'bbox_{}'.format(field)
                    h5file[name][index] = numpy.maximum(0,
                                                        getattr(bounding_boxes,
                                                                field))
                    h5file[name].dims[0]['shapes'][index] = [num_boxes, 1]

                # Replace label '10' with '0'.
                labels = h5file['bbox_labels'][index]
                labels[labels == 10] = 0
                h5file['bbox_labels'][index] = labels

                if image_number % 1000 == 0:
                    h5file.flush()
                if bar:
                    bar.update(index)

        with progress_bar('SVHN format 1', num_examples) as bar:
            for split in splits:
                fill_split(split, bar=bar)
    finally:
        if os.path.isdir(TMPDIR):
            shutil.rmtree(TMPDIR)
        h5file.flush()
        h5file.close()

    return (output_path,)

Example 40

Project: batch-shipyard
Source File: mnist_replica.py
View license
def main(unused_argv):
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
  if FLAGS.download_only:
    sys.exit(0)

  if FLAGS.job_name is None or FLAGS.job_name == "":
    raise ValueError("Must specify an explicit `job_name`")
  if FLAGS.task_index is None or FLAGS.task_index =="":
    raise ValueError("Must specify an explicit `task_index`")

  print("job name = %s" % FLAGS.job_name)
  print("task index = %d" % FLAGS.task_index)

  #Construct the cluster and start the server
  ps_spec = FLAGS.ps_hosts.split(",")
  worker_spec = FLAGS.worker_hosts.split(",")

  # Get the number of workers
  num_workers = len(worker_spec)

  cluster = tf.train.ClusterSpec({
      "ps": ps_spec,
      "worker": worker_spec})
  server = tf.train.Server(cluster,
                           job_name=FLAGS.job_name,
                           task_index=FLAGS.task_index)

  if FLAGS.job_name == "ps":
    server.join()
  elif FLAGS.job_name == "worker":
    is_chief = (FLAGS.task_index == 0)
    if FLAGS.num_gpus > 0:
      if FLAGS.num_gpus < num_workers:
        raise ValueError("number of gpus is less than number of workers")
      # Avoid gpu allocation conflict: now allocate task_num -> #gpu
      # for each worker in the corresponding machine
      gpu = (FLAGS.task_index % FLAGS.num_gpus)
      worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
    elif FLAGS.num_gpus == 0:
      # Just allocate the CPU to worker server
      cpu = 0
      worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
    # The device setter will automatically place Variables ops on separate
    # parameter servers (ps). The non-Variable ops will be placed on the workers.
    # The ps use CPU and workers use corresponding GPU
    with tf.device(tf.train.replica_device_setter(
        worker_device=worker_device,
        cluster=cluster)):
      global_step = tf.Variable(0, name="global_step", trainable=False)

      # Variables of the hidden layer
      hid_w = tf.Variable(
          tf.truncated_normal([IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
                              stddev=1.0 / IMAGE_PIXELS), name="hid_w")
      hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")

      # Variables of the softmax layer
      sm_w = tf.Variable(
          tf.truncated_normal([FLAGS.hidden_units, 10],
                              stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
          name="sm_w")
      sm_b = tf.Variable(tf.zeros([10]), name="sm_b")

      # Ops: located on the worker specified with FLAGS.task_index
      x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
      y_ = tf.placeholder(tf.float32, [None, 10])

      hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
      hid = tf.nn.relu(hid_lin)

      y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
      cross_entropy = -tf.reduce_sum(y_ *
                                     tf.log(tf.clip_by_value(y, 1e-10, 1.0)))

      opt = tf.train.AdamOptimizer(FLAGS.learning_rate)

      if FLAGS.sync_replicas:
        if FLAGS.replicas_to_aggregate is None:
          replicas_to_aggregate = num_workers
        else:
          replicas_to_aggregate = FLAGS.replicas_to_aggregate

        opt = tf.train.SyncReplicasOptimizer(
            opt,
            replicas_to_aggregate=replicas_to_aggregate,
            total_num_replicas=num_workers,
            replica_id=FLAGS.task_index,
            name="mnist_sync_replicas")

      train_step = opt.minimize(cross_entropy,
                                global_step=global_step)

      if FLAGS.sync_replicas and is_chief:
        # Initial token and chief queue runners required by the sync_replicas mode
        chief_queue_runner = opt.get_chief_queue_runner()
        init_tokens_op = opt.get_init_tokens_op()

      init_op = tf.initialize_all_variables()
      train_dir = tempfile.mkdtemp()
      sv = tf.train.Supervisor(is_chief=is_chief,
                               logdir=train_dir,
                               init_op=init_op,
                               recovery_wait_secs=1,
                               global_step=global_step)

      sess_config = tf.ConfigProto(
          allow_soft_placement=True,
          log_device_placement=False,
          device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])

      # The chief worker (task_index==0) session will prepare the session,
      # while the remaining workers will wait for the preparation to complete.
      if is_chief:
        print("Worker %d: Initializing session..." % FLAGS.task_index)
      else:
        print("Worker %d: Waiting for session to be initialized..." %
              FLAGS.task_index)

      sess = sv.prepare_or_wait_for_session(server.target,
                                            config=sess_config)

      print("Worker %d: Session initialization complete." % FLAGS.task_index)

      if FLAGS.sync_replicas and is_chief:
        # Chief worker will start the chief queue runner and call the init op
        print("Starting chief queue runner and running init_tokens_op")
        sv.start_queue_runners(sess, [chief_queue_runner])
        sess.run(init_tokens_op)

      # Perform training
      time_begin = time.time()
      print("Training begins @ %f" % time_begin)

      local_step = 0
      while True:
        # Training feed
        batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
        train_feed = {x: batch_xs,
                      y_: batch_ys}

        _, step = sess.run([train_step, global_step], feed_dict=train_feed)
        local_step += 1

        now = time.time()
        print("%f: Worker %d: training step %d done (global step: %d)" %
              (now, FLAGS.task_index, local_step, step))

        if step >= FLAGS.train_steps:
          break

      time_end = time.time()
      print("Training ends @ %f" % time_end)
      training_time = time_end - time_begin
      print("Training elapsed time: %f s" % training_time)

      # Validation feed
      val_feed = {x: mnist.validation.images,
                  y_: mnist.validation.labels}
      val_xent = sess.run(cross_entropy, feed_dict=val_feed)
      print("After %d training step(s), validation cross entropy = %g" %
            (FLAGS.train_steps, val_xent))

Example 41

Project: batch-shipyard
Source File: mnist_replica.py
View license
def main(unused_argv):
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
  if FLAGS.download_only:
    sys.exit(0)

  if FLAGS.job_name is None or FLAGS.job_name == "":
    raise ValueError("Must specify an explicit `job_name`")
  if FLAGS.task_index is None or FLAGS.task_index =="":
    raise ValueError("Must specify an explicit `task_index`")

  print("job name = %s" % FLAGS.job_name)
  print("task index = %d" % FLAGS.task_index)

  #Construct the cluster and start the server
  ps_spec = FLAGS.ps_hosts.split(",")
  worker_spec = FLAGS.worker_hosts.split(",")

  # Get the number of workers
  num_workers = len(worker_spec)

  cluster = tf.train.ClusterSpec({
      "ps": ps_spec,
      "worker": worker_spec})
  server = tf.train.Server(cluster,
                           job_name=FLAGS.job_name,
                           task_index=FLAGS.task_index)

  if FLAGS.job_name == "ps":
    server.join()
  elif FLAGS.job_name == "worker":
    is_chief = (FLAGS.task_index == 0)
    if FLAGS.num_gpus > 0:
      if FLAGS.num_gpus < num_workers:
        raise ValueError("number of gpus is less than number of workers")
      # Avoid gpu allocation conflict: now allocate task_num -> #gpu
      # for each worker in the corresponding machine
      gpu = (FLAGS.task_index % FLAGS.num_gpus)
      worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
    elif FLAGS.num_gpus == 0:
      # Just allocate the CPU to worker server
      cpu = 0
      worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
    # The device setter will automatically place Variables ops on separate
    # parameter servers (ps). The non-Variable ops will be placed on the workers.
    # The ps use CPU and workers use corresponding GPU
    with tf.device(tf.train.replica_device_setter(
        worker_device=worker_device,
        cluster=cluster)):
      global_step = tf.Variable(0, name="global_step", trainable=False)

      # Variables of the hidden layer
      hid_w = tf.Variable(
          tf.truncated_normal([IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
                              stddev=1.0 / IMAGE_PIXELS), name="hid_w")
      hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")

      # Variables of the softmax layer
      sm_w = tf.Variable(
          tf.truncated_normal([FLAGS.hidden_units, 10],
                              stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
          name="sm_w")
      sm_b = tf.Variable(tf.zeros([10]), name="sm_b")

      # Ops: located on the worker specified with FLAGS.task_index
      x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
      y_ = tf.placeholder(tf.float32, [None, 10])

      hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
      hid = tf.nn.relu(hid_lin)

      y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
      cross_entropy = -tf.reduce_sum(y_ *
                                     tf.log(tf.clip_by_value(y, 1e-10, 1.0)))

      opt = tf.train.AdamOptimizer(FLAGS.learning_rate)

      if FLAGS.sync_replicas:
        if FLAGS.replicas_to_aggregate is None:
          replicas_to_aggregate = num_workers
        else:
          replicas_to_aggregate = FLAGS.replicas_to_aggregate

        opt = tf.train.SyncReplicasOptimizer(
            opt,
            replicas_to_aggregate=replicas_to_aggregate,
            total_num_replicas=num_workers,
            replica_id=FLAGS.task_index,
            name="mnist_sync_replicas")

      train_step = opt.minimize(cross_entropy,
                                global_step=global_step)

      if FLAGS.sync_replicas and is_chief:
        # Initial token and chief queue runners required by the sync_replicas mode
        chief_queue_runner = opt.get_chief_queue_runner()
        init_tokens_op = opt.get_init_tokens_op()

      init_op = tf.initialize_all_variables()
      train_dir = tempfile.mkdtemp()
      sv = tf.train.Supervisor(is_chief=is_chief,
                               logdir=train_dir,
                               init_op=init_op,
                               recovery_wait_secs=1,
                               global_step=global_step)

      sess_config = tf.ConfigProto(
          allow_soft_placement=True,
          log_device_placement=False,
          device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])

      # The chief worker (task_index==0) session will prepare the session,
      # while the remaining workers will wait for the preparation to complete.
      if is_chief:
        print("Worker %d: Initializing session..." % FLAGS.task_index)
      else:
        print("Worker %d: Waiting for session to be initialized..." %
              FLAGS.task_index)

      sess = sv.prepare_or_wait_for_session(server.target,
                                            config=sess_config)

      print("Worker %d: Session initialization complete." % FLAGS.task_index)

      if FLAGS.sync_replicas and is_chief:
        # Chief worker will start the chief queue runner and call the init op
        print("Starting chief queue runner and running init_tokens_op")
        sv.start_queue_runners(sess, [chief_queue_runner])
        sess.run(init_tokens_op)

      # Perform training
      time_begin = time.time()
      print("Training begins @ %f" % time_begin)

      local_step = 0
      while True:
        # Training feed
        batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
        train_feed = {x: batch_xs,
                      y_: batch_ys}

        _, step = sess.run([train_step, global_step], feed_dict=train_feed)
        local_step += 1

        now = time.time()
        print("%f: Worker %d: training step %d done (global step: %d)" %
              (now, FLAGS.task_index, local_step, step))

        if step >= FLAGS.train_steps:
          break

      time_end = time.time()
      print("Training ends @ %f" % time_end)
      training_time = time_end - time_begin
      print("Training elapsed time: %f s" % training_time)

      # Validation feed
      val_feed = {x: mnist.validation.images,
                  y_: mnist.validation.labels}
      val_xent = sess.run(cross_entropy, feed_dict=val_feed)
      print("After %d training step(s), validation cross entropy = %g" %
            (FLAGS.train_steps, val_xent))

Example 42

Project: puzzle
Source File: view.py
View license
@base.command()
@click.argument('variant-source', type=click.Path(exists=True), required=False)
@click.option('--host', default='0.0.0.0', show_default=True)
@click.option('--port', default=5000, show_default=True)
@click.option('--debug', is_flag=True)
@click.option('-p', '--pattern', default='*', show_default=True)
@click.option('--no-browser', is_flag=True, help='Prevent auto-opening browser')
@phenomizer
@family_file
@family_type
@version
@root
@click.pass_context
def view(ctx, host, port, debug, pattern, family_file, family_type,
         variant_source, root, no_browser, phenomizer):
    """Visualize DNA variant resources.

    1. Look for variant source(s) to visualize and inst. the right plugin
    """
    main_loop = (not debug) or (os.environ.get('WERKZEUG_RUN_MAIN') == 'true')
    root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle")
    phenomizer_auth = phenomizer or ctx.obj.get('phenomizer_auth')
    BaseConfig.PHENOMIZER_AUTH = True if ctx.obj.get('phenomizer_auth') else False
    BaseConfig.STORE_ENABLED = True

    if variant_source is None:
        logger.info("Root directory is: {}".format(root))

        db_path = os.path.join(root, 'puzzle_db.sqlite3')
        logger.info("db path is: {}".format(db_path))
        if not os.path.exists(db_path):
            logger.warn("database not initialized, run 'puzzle init'")
            ctx.abort()

        if os.path.isfile(root):
            logger.error("'root' can't be a file")
            ctx.abort()

        store = SqlStore(db_path, phenomizer_auth=phenomizer_auth)
        for case_obj in store.cases():
            if case_obj.variant_mode == 'gemini':
                if not GEMINI:
                    logger.error("Need to have gemini instaled to view gemini database")
                    ctx.abort()

    else:
        logger.info("Using in memory database")
        tmpdir = tempfile.mkdtemp()
        tmpdb = os.path.join(tmpdir, 'puzzle.sqlite3')
        logger.info("building database: {}".format(tmpdb))
        store = SqlStore("sqlite:///{}".format(tmpdb),
                         phenomizer_auth=phenomizer_auth)
        if main_loop:
            store.set_up()
            cases = []
            if os.path.isfile(variant_source):
                file_type = get_file_type(variant_source)
                #Test if gemini is installed
                if file_type == 'unknown':
                    logger.error("File has to be vcf or gemini db")
                    ctx.abort()
                elif file_type == 'gemini':
                    #Check if gemini is installed
                    if not GEMINI:
                        logger.error("Need to have gemini installed to use gemini plugin")
                        ctx.abort()
                variant_type = get_variant_type(variant_source)
                cases = get_cases(
                    variant_source=variant_source,
                    case_lines=family_file,
                    case_type=family_type,
                    variant_type=variant_type,
                    variant_mode=file_type
                )
            else:
                for file in path(variant_source).walkfiles(pattern):
                    file_type = get_file_type(file)
                    if file_type != 'unknown':
                        variant_type = get_variant_type(file)
                        #Test if gemini is installed
                        if file_type == 'gemini':
                            if not GEMINI:
                                logger.error("Need to have gemini installed to use gemini plugin")
                                ctx.abort()

                        for case in get_cases(
                            variant_source=file,
                            case_type=family_type,
                            variant_type=variant_type,
                            variant_mode=file_type):

                            cases.append(case)

            for case_obj in cases:
                if store.case(case_obj.case_id) is not None:
                    logger.warn("{} already exists in the database"
                                .format(case_obj.case_id))
                    continue

                # extract case information
                logger.debug("adding case: {}".format(case_obj.case_id))
                store.add_case(case_obj, vtype=case_obj.variant_type, mode=case_obj.variant_mode)

    logger.debug("Plugin setup was succesfull")
    BaseConfig.PUZZLE_BACKEND = store
    BaseConfig.UPLOAD_DIR = os.path.join(root, 'resources')

    app = create_app(config_obj=BaseConfig)

    if no_browser is False:
        webbrowser.open_new_tab("http://{}:{}".format(host, port))

    app.run(host=host, port=port, debug=debug)

Example 43

Project: pyfrc
Source File: cli_deploy.py
View license
    def run(self, options, robot_class, **static_options):
        
        from .. import config
        config.mode = 'upload'
        
        # run the test suite before uploading
        if not options.skip_tests:
            from .cli_test import PyFrcTest
            
            tester = PyFrcTest()
            
            retval = tester.run_test([], robot_class, options.builtin, ignore_missing_test=True)
            if retval != 0:
                print_err("ERROR: Your robot tests failed, aborting upload.")
                if not sys.stdin.isatty():
                    print_err("- Use --skip-tests if you want to upload anyways")
                    return retval
                
                print()
                if not yesno('- Upload anyways?'):
                    return retval
                
                if not yesno('- Are you sure? Your robot code may crash!'):
                    return retval
                
                print()
                print("WARNING: Uploading code against my better judgement...")
        
        # upload all files in the robot.py source directory
        robot_file = abspath(inspect.getfile(robot_class))
        robot_path = dirname(robot_file)
        robot_filename = basename(robot_file)
        cfg_filename = join(robot_path, '.deploy_cfg')
        
        if not options.nonstandard and robot_filename != 'robot.py':
            print_err("ERROR: Your robot code must be in a file called robot.py (launched from %s)!" % robot_filename)
            print_err()
            print_err("If you really want to do this, then specify the --nonstandard argument")
            return 1
        
        # This probably should be configurable... oh well
        
        deploy_dir = '/home/lvuser'
        py_deploy_dir = '%s/py' % deploy_dir
        
        # note below: deployed_cmd appears that it only can be a single line
        
        # In 2015, there were stdout/stderr issues. In 2016, they seem to
        # have been fixed, but need to use -u for it to really work properly
        
        if options.debug:
            deployed_cmd = 'env LD_LIBRARY_PATH=/usr/local/frc/rpath-lib/ /usr/local/frc/bin/netconsole-host /usr/local/bin/python3 -u %s/%s -v run' % (py_deploy_dir, robot_filename)
            deployed_cmd_fname = 'robotDebugCommand'
            extra_cmd = 'touch /tmp/frcdebug; chown lvuser:ni /tmp/frcdebug'
        else:
            deployed_cmd = 'env LD_LIBRARY_PATH=/usr/local/frc/rpath-lib/ /usr/local/frc/bin/netconsole-host /usr/local/bin/python3 -u -O %s/%s run' % (py_deploy_dir, robot_filename)
            deployed_cmd_fname = 'robotCommand'
            extra_cmd = ''

        if options.in_place:
            del_cmd = ''
        else:
            del_cmd = "[ -d %(py_deploy_dir)s ] && rm -rf %(py_deploy_dir)s"

        del_cmd %= {"py_deploy_dir": py_deploy_dir}
        
        check_version = '/usr/local/bin/python3 -c "exec(open(\\"$SITEPACKAGES/wpilib/version.py\\", \\"r\\").read(), globals()); print(\\"WPILib version on robot is \\" + __version__);exit(0) if __version__ == \\"%s\\" else exit(89)"' % wpilib.__version__
        if options.no_version_check:
            check_version = ''
        
        # This is a nasty bit of code now...
        sshcmd = inspect.cleandoc("""
            /bin/bash -ce '[ -x /usr/local/bin/python3 ] || exit 87
            SITEPACKAGES=$(/usr/local/bin/python3 -c "import site; print(site.getsitepackages()[0])")
            [ -f $SITEPACKAGES/wpilib/version.py ] || exit 88
            %(check_version)s
            %(del_cmd)s
            echo "%(cmd)s" > %(deploy_dir)s/%(cmd_fname)s
            %(extra_cmd)s'
        """)
              
        sshcmd %= {
            'del_cmd': del_cmd,
            'deploy_dir': deploy_dir,
            'cmd': deployed_cmd,
            'cmd_fname': deployed_cmd_fname,
            'extra_cmd': extra_cmd,
            'check_version': check_version
        }
        
        sshcmd = re.sub("\n+", ";", sshcmd)
        
        nc_thread = None
        
        try:
            controller = installer.ssh_from_cfg(cfg_filename,
                                                username='lvuser',
                                                password='',
                                                hostname=options.robot,
                                                allow_mitm=True,
                                                no_resolve=options.no_resolve)
            
            # Housekeeping first
            logger.debug('SSH: %s', sshcmd)
            controller.ssh(sshcmd)
            
            # Copy the files over, copy to a temporary directory first
            # -> this is inefficient, but it's easier in sftp
            tmp_dir = tempfile.mkdtemp()
            py_tmp_dir = join(tmp_dir, 'py')
                    
            try:
                self._copy_to_tmpdir(py_tmp_dir, robot_path)
                controller.sftp(py_tmp_dir, deploy_dir, mkdir=not options.in_place)
            finally:
                shutil.rmtree(tmp_dir)
            
            # start the netconsole listener now if requested, *before* we
            # actually start the robot code, so we can see all messages
            if options.nc:
                from netconsole import run
                nc_event = threading.Event()
                nc_thread = threading.Thread(target=run,
                                             kwargs={'init_event': nc_event},
                                             daemon=True)
                nc_thread.start()
                nc_event.wait(5)
                logger.info("Netconsole is listening...")
            
            if not options.in_place:
                # Restart the robot code and we're done!
                sshcmd = "/bin/bash -ce '" + \
                         '. /etc/profile.d/natinst-path.sh; ' + \
                         'chown -R lvuser:ni %s; ' + \
                         '/usr/local/frc/bin/frcKillRobot.sh -t -r' + \
                         "'"
            
                sshcmd %= (py_deploy_dir)
            
                logger.debug('SSH: %s', sshcmd)
                controller.ssh(sshcmd)
            
        except installer.SshExecError as e:
            if e.retval == 87:
                print_err("ERROR: python3 was not found on the roboRIO: have you installed robotpy?")
            elif e.retval == 88:
                print_err("ERROR: WPILib was not found on the roboRIO: have you installed robotpy?")
            elif e.retval == 89:
                print_err("ERROR: expected WPILib version %s" % wpilib.__version__)
                print_err()
                print_err("You should either:")
                print_err("- If the robot version is older, upgrade the RobotPy on your robot")
                print_err("- Otherwise, upgrade pyfrc on your computer")
                print_err()
                print_err("Alternatively, you can specify --no-version-check to skip this check")
            else:
                print_err("ERROR: %s" % e)
            return 1
        except installer.Error as e:
            print_err("ERROR: %s" % e)
            return 1
        else:
            print("\nSUCCESS: Deploy was successful!")
        
        if nc_thread is not None:
            nc_thread.join()
        
        return 0

Example 44

Project: rootpy
Source File: plot_contour_matrix.py
View license
def plot_contour_matrix(arrays,
                        fields,
                        filename,
                        weights=None,
                        sample_names=None,
                        sample_lines=None,
                        sample_colors=None,
                        color_map=None,
                        num_bins=20,
                        num_contours=3,
                        cell_width=2,
                        cell_height=2,
                        cell_margin_x=0.05,
                        cell_margin_y=0.05,
                        dpi=100,
                        padding=0,
                        animate_field=None,
                        animate_steps=10,
                        animate_delay=20,
                        animate_loop=0):
    """
    Create a matrix of contour plots showing all possible 2D projections of a
    multivariate dataset. You may optionally animate the contours as a cut on
    one of the fields is increased. ImageMagick must be installed to produce
    animations.

    Parameters
    ----------

    arrays : list of arrays of shape [n_samples, n_fields]
        A list of 2D NumPy arrays for each sample. All arrays must have the
        same number of columns.

    fields : list of strings
        A list of the field names.

    filename : string
        The output filename. If animatation is enabled
        ``animate_field is not None`` then ``filename`` must have the .gif
        extension.

    weights : list of arrays, optional (default=None)
        List of 1D NumPy arrays of sample weights corresponding to the arrays
        in ``arrays``.

    sample_names : list of strings, optional (default=None)
        A list of the sample names for the legend. If None, then no legend will
        be shown.

    sample_lines : list of strings, optional (default=None)
        A list of matplotlib line styles for each sample. If None then line
        styles will cycle through 'dashed', 'solid', 'dashdot', and 'dotted'.
        Elements of this list may also be a list of line styles which will be
        cycled through for the contour lines of the corresponding sample.

    sample_colors : list of matplotlib colors, optional (default=None)
        The color of the contours for each sample. If None, then colors will be
        selected according to regular intervals along the ``color_map``.

    color_map : a matplotlib color map, optional (default=None)
        If ``sample_colors is None`` then select colors according to regular
        intervals along this matplotlib color map. If ``color_map`` is None,
        then the spectral color map is used.

    num_bins : int, optional (default=20)
        The number of bins along both axes of the 2D histograms.

    num_contours : int, optional (default=3)
        The number of contour line to show for each sample.

    cell_width : float, optional (default=2)
        The width, in inches, of each subplot in the matrix.

    cell_height : float, optional (default=2)
        The height, in inches, of each subplot in the matrix.

    cell_margin_x : float, optional (default=0.05)
        The horizontal margin between adjacent subplots, as a fraction
        of the subplot size.

    cell_margin_y : float, optional (default=0.05)
        The vertical margin between adjacent subplots, as a fraction
        of the subplot size.

    dpi : int, optional (default=100)
        The number of pixels per inch.

    padding : float, optional (default=0)
        The padding, as a fraction of the range of the value along each axes to
        guarantee around each sample's contour plot.

    animate_field : string, optional (default=None)
        The field to animate a cut along. By default no animation is produced.
        If ``animate_field is not None`` then ``filename`` must end in the .gif
        extension and an animated GIF is produced.

    animate_steps : int, optional (default=10)
        The number of frames in the animation, corresponding to the number of
        regularly spaced cut values to show along the range of the
        ``animate_field``.

    animate_delay : int, optional (default=20)
        The duration that each frame is shown in the animation as a multiple of
        1 / 100 of a second.

    animate_loop : int, optional (default=0)
        The number of times to loop the animation. If zero, then loop forever.

    Notes
    -----

    NumPy and matplotlib are required

    """
    import numpy as np
    from .. import root2matplotlib as r2m
    import matplotlib.pyplot as plt
    from matplotlib.ticker import MaxNLocator
    from matplotlib import cm
    from matplotlib.lines import Line2D

    # we must have at least two fields (columns)
    num_fields = len(fields)
    if num_fields < 2:
        raise ValueError(
            "record arrays must have at least two fields")
    # check that all arrays have the same number of columns
    for array in arrays:
        if array.shape[1] != num_fields:
            raise ValueError(
                "number of array columns does not match number of fields")

    if sample_colors is None:
        if color_map is None:
            color_map = cm.spectral
        steps = np.linspace(0, 1, len(arrays) + 2)[1:-1]
        sample_colors = [color_map(s) for s in steps]

    # determine range of each field
    low = np.vstack([a.min(axis=0) for a in arrays]).min(axis=0)
    high = np.vstack([a.max(axis=0) for a in arrays]).max(axis=0)
    width = np.abs(high - low)
    width *= padding
    low -= width
    high += width

    def single_frame(arrays, filename, label=None):

        # create the canvas and divide into matrix
        fig, axes = plt.subplots(
            nrows=num_fields,
            ncols=num_fields,
            figsize=(cell_width * num_fields, cell_height * num_fields))
        fig.subplots_adjust(hspace=cell_margin_y, wspace=cell_margin_x)

        for ax in axes.flat:
            # only show the left and bottom axes ticks and labels
            if ax.is_last_row() and not ax.is_last_col():
                ax.xaxis.set_visible(True)
                ax.xaxis.set_ticks_position('bottom')
                ax.xaxis.set_major_locator(MaxNLocator(4, prune='both'))
                for tick in ax.xaxis.get_major_ticks():
                    tick.label.set_rotation('vertical')
            else:
                ax.xaxis.set_visible(False)

            if ax.is_first_col() and not ax.is_first_row():
                ax.yaxis.set_visible(True)
                ax.yaxis.set_ticks_position('left')
                ax.yaxis.set_major_locator(MaxNLocator(4, prune='both'))
            else:
                ax.yaxis.set_visible(False)

        # turn off axes frames in upper triangular matrix
        for ix, iy in zip(*np.triu_indices_from(axes, k=0)):
            axes[ix, iy].axis('off')

        levels = np.linspace(0, 1, num_contours + 2)[1:-1]

        # plot the data
        for iy, ix in zip(*np.tril_indices_from(axes, k=-1)):
            ymin = float(low[iy])
            ymax = float(high[iy])
            xmin = float(low[ix])
            xmax = float(high[ix])
            for isample, a in enumerate(arrays):
                hist = Hist2D(
                    num_bins, xmin, xmax,
                    num_bins, ymin, ymax)
                if weights is not None:
                    hist.fill_array(a[:, [ix, iy]], weights[isample])
                else:
                    hist.fill_array(a[:, [ix, iy]])
                # normalize so maximum is 1.0
                _max = hist.GetMaximum()
                if _max != 0:
                    hist /= _max
                r2m.contour(hist,
                    axes=axes[iy, ix],
                    levels=levels,
                    linestyles=sample_lines[isample] if sample_lines else LINES,
                    colors=sample_colors[isample])

        # label the diagonal subplots
        for i, field in enumerate(fields):
            axes[i, i].annotate(field,
                (0.1, 0.2),
                rotation=45,
                xycoords='axes fraction',
                ha='left', va='center')

        # make proxy artists for legend
        lines = []
        for color in sample_colors:
            lines.append(Line2D([0, 0], [0, 0], color=color))

        if sample_names is not None:
            # draw the legend
            leg = fig.legend(lines, sample_names, loc=(0.65, 0.8))
            leg.set_frame_on(False)

        if label is not None:
            axes[0, 0].annotate(label, (0, 1),
                ha='left', va='top',
                xycoords='axes fraction')

        fig.savefig(filename, bbox_inches='tight', dpi=dpi)
        plt.close(fig)

    if animate_field is not None:
        _, ext = os.path.splitext(filename)
        if ext != '.gif':
            raise ValueError(
                "animation is only supported for .gif files")
        field_idx = fields.index(animate_field)
        cuts = np.linspace(
            low[field_idx],
            high[field_idx],
            animate_steps + 1)[:-1]
        gif = GIF()
        temp_dir = tempfile.mkdtemp()
        for i, cut in enumerate(cuts):
            frame_filename = os.path.join(temp_dir, 'frame_{0:d}.png'.format(i))
            label = '{0} > {1:.2f}'.format(animate_field, cut)
            log.info("creating frame for {0} ...".format(label))
            new_arrays = []
            for array in arrays:
                new_arrays.append(array[array[:, field_idx] > cut])
            single_frame(new_arrays,
                filename=frame_filename,
                label=label)
            gif.add_frame(frame_filename)
        gif.write(filename, delay=animate_delay, loop=animate_loop)
        shutil.rmtree(temp_dir)
    else:
        single_frame(arrays, filename=filename)

Example 45

Project: bloom
Source File: import_upstream.py
View license
def import_upstream(tarball_path, patches_path, version, name, replace):
    # Check for a url and download it
    url = urlparse(tarball_path)
    if url.scheme:  # Some scheme like http, https, or file...
        tmp_dir = tempfile.mkdtemp()
        try:
            info("Fetching file from url: '{0}'".format(tarball_path))
            req = load_url_to_file_handle(tarball_path)
            tarball_path = os.path.join(tmp_dir, os.path.basename(url.path))
            with open(tarball_path, 'wb') as f:
                chunk_size = 16 * 1024
                while True:
                    chunk = req.read(chunk_size)
                    if not chunk:
                        break
                    f.write(chunk)
            return import_upstream(tarball_path, patches_path, version, name, replace)
        finally:
            shutil.rmtree(tmp_dir)

    # If there is not tarball at the given path, fail
    if not os.path.exists(tarball_path):
        error("Specified archive does not exists: '{0}'".format(tarball_path),
              exit=True)

    # If either version or name are not provided, guess from archive name
    if not version or not name:
        # Parse tarball name
        tarball_file = os.path.basename(tarball_path)
        ending = None
        if tarball_file.endswith('.tar.gz'):
            ending = '.tar.gz'
        elif tarball_file.endswith('.zip'):
            ending = '.zip'
        else:
            error("Cannot detect type of archive: '{0}'"
                  .format(tarball_file), exit=True)
        tarball_file = tarball_file[:-len(ending)]
        split_tarball_file = tarball_file.split('-')
        if len(split_tarball_file) < 2 and not version or len(split_tarball_file) < 1:
            error("Cannot detect name and/or version from archive: '{0}'"
                  .format(tarball_file), exit=True)
    if not name and len(split_tarball_file) == 1:
        name = split_tarball_file[0]
    elif not name and len(split_tarball_file) == 1:
        name = '-'.join(split_tarball_file[:-1])
    if not version and len(split_tarball_file) < 2:
        error("Cannot detect version from archive: '{0}'"
              .format(tarball_file) + " and the version was not spcified.",
              exit=True)
    version = version if version else split_tarball_file[-1]

    # Check if the patches_path (if given) exists
    patches_path_dict = None
    if patches_path:
        patches_path_dict = ls_tree(BLOOM_CONFIG_BRANCH, patches_path)
        if not patches_path_dict:
            error("Given patches path '{0}' does not exist in bloom branch."
                  .format(patches_path), exit=True)

    # Do version checking
    version_check(version)

    # Check for existing tags
    upstream_tag = 'upstream/{0}'.format(version)
    if tag_exists(upstream_tag):
        if not replace:
            error("Tag '{0}' already exists, use --replace to override it."
                  .format(upstream_tag), exit=True)
        warning("Removing tag: '{0}'".format(upstream_tag))
        delete_tag(upstream_tag)
        if not get_git_clone_state():
            delete_remote_tag(upstream_tag)
    name_tag = '{0}/{1}'.format(name or 'upstream', version)
    if name_tag != upstream_tag and tag_exists(name_tag):
        if not replace:
            error("Tag '{0}' already exists, use --replace to override it."
                  .format(name_tag), exit=True)
        warning("Removing tag: '{0}'".format(name_tag))
        delete_tag(name_tag)
        if not get_git_clone_state():
            delete_remote_tag(name_tag)

    # If there is not upstream branch, create one
    if not branch_exists('upstream'):
        info("Creating upstream branch.")
        create_branch('upstream', orphaned=True)
    else:
        track_branches(['upstream'])

    # Import the given tarball
    info("Importing archive into upstream branch...")
    import_tarball(tarball_path, 'upstream', version, name)

    # Handle patches_path
    if patches_path:
        import_patches(patches_path, patches_path_dict, 'upstream', version)

    # Create tags
    with inbranch('upstream'):
        # Assert packages in upstream are the correct version
        _, actual_version, _ = get_package_data('upstream')
        if actual_version != version:
            error("The package(s) in upstream are version '{0}', but the version to be released is '{1}', aborting."
                  .format(actual_version, version), exit=True)
        # Create the tag
        info("Creating tag: '{0}'".format(upstream_tag))
        create_tag(upstream_tag)
        if name_tag != upstream_tag:
            info("Creating tag: '{0}'".format(name_tag))
            create_tag(name_tag)

Example 46

Project: mock
Source File: mockchain.py
View license
def main(args):
    opts, args = parse_args(args)
    # take mock config + list of pkgs
    cfg = opts.chroot
    pkgs = args[1:]

    global config_opts
    config_opts = mockbuild.util.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR)

    if not opts.tmp_prefix:
        try:
            opts.tmp_prefix = os.getlogin()
        except OSError as e:
            print("Could not find login name for tmp dir prefix add --tmp_prefix")
            sys.exit(1)
    pid = os.getpid()
    opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid)

    # create a tempdir for our local info
    if opts.localrepo:
        local_tmp_dir = os.path.abspath(opts.localrepo)
        if not os.path.exists(local_tmp_dir):
            os.makedirs(local_tmp_dir)
            os.chmod(local_tmp_dir, 0o755)
    else:
        pre = 'mock-chain-%s-' % opts.uniqueext
        local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp')
        os.chmod(local_tmp_dir, 0o755)

    if opts.logfile:
        opts.logfile = os.path.join(local_tmp_dir, opts.logfile)
        if os.path.exists(opts.logfile):
            os.unlink(opts.logfile)

    log(opts.logfile, "starting logfile: %s" % opts.logfile)
    opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/')

    if not os.path.exists(opts.local_repo_dir):
        os.makedirs(opts.local_repo_dir, mode=0o755)

    local_baseurl = "file://%s" % opts.local_repo_dir
    log(opts.logfile, "results dir: %s" % opts.local_repo_dir)
    opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/')

    if not os.path.exists(opts.config_path):
        os.makedirs(opts.config_path, mode=0o755)

    log(opts.logfile, "config dir: %s" % opts.config_path)

    my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name']))

    # modify with localrepo
    res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo')
    if not res:
        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
        sys.exit(1)

    for baseurl in opts.repos:
        res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl)
        if not res:
            log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg))
            sys.exit(1)

    # these files needed from the mock.config dir to make mock run
    for fn in ['site-defaults.cfg', 'logging.ini']:
        pth = mockconfig_path + '/' + fn
        shutil.copyfile(pth, opts.config_path + '/' + fn)

    # createrepo on it
    err = createrepo(opts.local_repo_dir)[1]
    if err.strip():
        log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
        log(opts.logfile, "Err: %s" % err)
        sys.exit(1)

    download_dir = tempfile.mkdtemp()
    downloaded_pkgs = {}
    built_pkgs = []
    try_again = True
    to_be_built = pkgs
    return_code = 0
    num_of_tries = 0
    while try_again:
        num_of_tries += 1
        failed = []
        for pkg in to_be_built:
            if not pkg.endswith('.rpm'):
                log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
                failed.append(pkg)
                continue

            elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'):
                url = pkg
                try:
                    log(opts.logfile, 'Fetching %s' % url)
                    r = requests.get(url)
                    # pylint: disable=no-member
                    if r.status_code == requests.codes.ok:
                        fn = urlsplit(r.url).path.rsplit('/', 1)[1]
                        if 'content-disposition' in r.headers:
                            _, params = cgi.parse_header(r.headers['content-disposition'])
                            if 'filename' in params and params['filename']:
                                fn = params['filename']
                        pkg = download_dir + '/' + fn
                        with open(pkg, 'wb') as fd:
                            for chunk in r.iter_content(4096):
                                fd.write(chunk)
                except Exception as e:
                    log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e)))
                    failed.append(url)
                    continue
                else:
                    downloaded_pkgs[pkg] = url
            log(opts.logfile, "Start build: %s" % pkg)
            ret = do_build(opts, config_opts['chroot_name'], pkg)[0]
            log(opts.logfile, "End build: %s" % pkg)
            if ret == 0:
                failed.append(pkg)
                log(opts.logfile, "Error building %s." % os.path.basename(pkg))
                if opts.recurse:
                    log(opts.logfile, "Will try to build again (if some other package will succeed).")
                else:
                    log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir)
            elif ret == 1:
                log(opts.logfile, "Success building %s" % os.path.basename(pkg))
                built_pkgs.append(pkg)
                # createrepo with the new pkgs
                err = createrepo(opts.local_repo_dir)[1]
                if err.strip():
                    log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
                    log(opts.logfile, "Err: %s" % err)
            elif ret == 2:
                log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg))

        if failed and opts.recurse:
            if len(failed) != len(to_be_built):
                to_be_built = failed
                try_again = True
                log(opts.logfile, 'Some package succeeded, some failed.')
                log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed))
            else:
                log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries)
                for pkg in failed:
                    msg = pkg
                    if pkg in downloaded_pkgs:
                        msg = downloaded_pkgs[pkg]
                    log(opts.logfile, msg)
                try_again = False
        else:
            try_again = False
            if failed:
                return_code = 2

    # cleaning up our download dir
    shutil.rmtree(download_dir, ignore_errors=True)

    log(opts.logfile, "Results out to: %s" % opts.local_repo_dir)
    log(opts.logfile, "Pkgs built: %s" % len(built_pkgs))
    if built_pkgs:
        if failed:
            if len(built_pkgs):
                log(opts.logfile, "Some packages successfully built in this order:")
        else:
            log(opts.logfile, "Packages successfully built in this order:")
        for pkg in built_pkgs:
            log(opts.logfile, pkg)
    return return_code

Example 47

Project: fb2mobi
Source File: fb2mobi.py
View license
def process_file(config, infile, outfile=None):
    critical_error = False

    start_time = time.clock()
    temp_dir = tempfile.mkdtemp()

    if not os.path.exists(infile):
        config.log.critical('File {0} not found'.format(infile))
        return

    config.log.info('Converting "{0}"...'.format(os.path.split(infile)[1]))
    config.log.info('Using profile "{0}".'.format(config.current_profile['name']))

    # Проверка корректности параметров
    if infile:
        if not infile.lower().endswith(('.fb2', '.fb2.zip', '.zip', '.epub')):
            config.log.critical('"{0}" not *.fb2, *.fb2.zip, *.zip or *.epub'.format(infile))
            return

    if not config.current_profile['css'] and not infile.lower().endswith(('.epub')):
        config.log.warning('Profile does not have link to css file.')

    if 'xslt' in config.current_profile and not os.path.exists(config.current_profile['xslt']):
        config.log.critical('Transformation file {0} not found'.format(config.current_profile['xslt']))
        return

    if config.kindle_compression_level < 0 or config.kindle_compression_level > 2:
        config.log.warning('Parameter kindleCompressionLevel should be between 0 and 2, using default value (1).')
        config.kindle_compression_level = 1

    # Если не задано имя выходного файла - вычислим
    if not outfile:

        outdir, outputfile = os.path.split(infile)
        outputfile = get_mobi_filename(outputfile, config.transliterate)

        if config.output_dir:
            if not os.path.exists(config.output_dir):
                os.makedirs(config.output_dir)
            if config.input_dir and config.save_structure:
                rel_path = os.path.join(config.output_dir, os.path.split(os.path.relpath(infile, config.input_dir))[0])
                if not os.path.exists(rel_path):
                    os.makedirs(rel_path)
                outfile = os.path.join(rel_path, outputfile)
            else:
                outfile = os.path.join(config.output_dir, outputfile)
        else:
            outfile = os.path.join(outdir, outputfile)
    else:
        _output_format = os.path.splitext(outfile)[1].lower()[1:]
        if _output_format not in ('mobi', 'azw3', 'epub'):
            config.log.critical('Unknown output format: {0}'.format(_output_format))
            return -1
        else:
            if not config.mhl:
                config.output_format = _output_format
            outfile = '{0}.{1}'.format(os.path.splitext(outfile)[0], config.output_format)

    if config.output_format.lower() == 'epub':
        # Для epub всегда разбиваем по главам
        config.current_profile['chapterOnNewPage'] = True

    debug_dir = os.path.abspath(os.path.splitext(infile)[0])
    if os.path.splitext(debug_dir)[1].lower() == '.fb2':
        debug_dir = os.path.splitext(debug_dir)[0]

    input_epub = False

    if os.path.splitext(infile)[1].lower() == '.zip':
        config.log.info('Unpacking...')
        tmp_infile = infile
        try:
            infile = unzip(infile, temp_dir)
        except:
            config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
            return

        if not infile:
            config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
            return

    elif os.path.splitext(infile)[1].lower() == '.epub':
        config.log.info('Unpacking epub...')
        tmp_infile = infile
        try:
            infile = unzip_epub(infile, temp_dir)
        except:
            config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
            return

        if not infile:
            config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
            return

        input_epub = True

    if input_epub:
        # Let's see what we could do
        config.log.info('Processing epub...')
        epubparser = EpubProc(infile, config)
        epubparser.process()
        document_id = epubparser.book_uuid
    else:
        # Конвертируем в html
        config.log.info('Converting fb2 to html...')
        try:
            fb2parser = Fb2XHTML(infile, outfile, temp_dir, config)
            fb2parser.generate()
            document_id = fb2parser.book_uuid
            infile = os.path.join(temp_dir, 'OEBPS', 'content.opf')
        except:
            config.log.critical('Error while converting file "{0}"'.format(infile))
            config.log.debug('Getting details', exc_info=True)
            return

    config.log.info('Processing took {0} sec.'.format(round(time.clock() - start_time, 2)))

    if config.output_format.lower() in ('mobi', 'azw3'):
        # Запускаем kindlegen
        application_path = get_executable_path()
        if sys.platform == 'win32':
            if os.path.exists(os.path.join(application_path, 'kindlegen.exe')):
                kindlegen_cmd = os.path.join(application_path, 'kindlegen.exe')
            else:
                kindlegen_cmd = 'kindlegen.exe'
        else:
            if os.path.exists(os.path.join(application_path, 'kindlegen')):
                kindlegen_cmd = os.path.join(application_path, 'kindlegen')
            else:
                kindlegen_cmd = 'kindlegen'

        try:
            config.log.info('Running kindlegen...')
            kindlegen_cmd_pars = '-c{0}'.format(config.kindle_compression_level)

            startupinfo = None
            if os.name == 'nt':
                startupinfo = subprocess.STARTUPINFO()
                startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW

            with subprocess.Popen([kindlegen_cmd, infile, kindlegen_cmd_pars, '-locale', 'en'], stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT, startupinfo=startupinfo) as result:
                config.log.debug(str(result.stdout.read(), 'utf-8', errors='replace'))

        except OSError as e:
            if e.errno == os.errno.ENOENT:
                config.log.critical('{0} not found'.format(kindlegen_cmd))
                critical_error = True
            else:
                config.log.critical(e.winerror)
                config.log.critical(e.strerror)
                config.log.debug('Getting details', exc_info=True, stack_info=True)
                raise e

    elif config.output_format.lower() == 'epub':
        # Собираем epub
        outfile = os.path.splitext(outfile)[0] + '.epub'
        config.log.info('Creating epub...')
        create_epub(temp_dir, outfile)

    if config.debug:
        # В режиме отладки копируем получившиеся файлы в выходной каталог
        config.log.info('Copying intermediate files to {0}...'.format(debug_dir))
        if os.path.exists(debug_dir):
            rm_tmp_files(debug_dir)
        shutil.copytree(temp_dir, debug_dir)

    # Копируем mobi(azw3) из временного в выходной каталог
    if not critical_error:
        ext = config.output_format.lower()
        if ext in ('mobi', 'azw3'):
            result_book = infile.replace('.opf', '.mobi')
            if not os.path.isfile(result_book):
                config.log.critical('kindlegen error, conversion interrupted.')
                critical_error = True
            else:
                try:
                    remove_personal = config.current_profile['kindleRemovePersonalLabel'] if not ext in ('mobi') or not config.send_to_kindle['send'] else False
                    if ext in ('mobi') and config.noMOBIoptimization:
                        config.log.info('Copying resulting file...')
                        shutil.copyfile(result_book, outfile)
                    else:
                        config.log.info('Optimizing resulting file...')
                        splitter = mobi_split(result_book, document_id, remove_personal, ext)
                        open(os.path.splitext(outfile)[0] + '.' + ext, 'wb').write(splitter.getResult() if ext == 'mobi' else splitter.getResult8())
                except:
                    config.log.critical('Error optimizing file, conversion interrupted.')
                    config.log.debug('Getting details', exc_info=True, stack_info=True)
                    critical_error = True

    if not critical_error:
        config.log.info('Book conversion completed in {0} sec.\n'.format(round(time.clock() - start_time, 2)))

        if config.send_to_kindle['send']:
            if config.output_format.lower() != 'mobi':
                config.log.warning('Kindle Personal Documents Service only accepts personal mobi files')
            else:
                config.log.info('Sending book...')
                try:
                    kindle = SendToKindle()
                    kindle.smtp_server = config.send_to_kindle['smtpServer']
                    kindle.smtp_port = config.send_to_kindle['smtpPort']
                    kindle.smtp_login = config.send_to_kindle['smtpLogin']
                    kindle.smtp_password = config.send_to_kindle['smtpPassword']
                    kindle.user_email = config.send_to_kindle['fromUserEmail']
                    kindle.kindle_email = config.send_to_kindle['toKindleEmail']
                    kindle.convert = False
                    kindle.send_mail([outfile])

                    config.log.info('Book has been sent to "{0}"'.format(config.send_to_kindle['toKindleEmail']))

                    if config.send_to_kindle['deleteSendedBook']:
                        try:
                            os.remove(outfile)
                        except:
                            config.log.error('Unable to remove file "{0}".'.format(outfile))
                            return -1

                except KeyboardInterrupt:
                    print('User interrupt. Exiting...')
                    sys.exit(-1)

                except:
                    config.log.error('Error sending file')
                    config.log.debug('Getting details', exc_info=True, stack_info=True)

    # Чистим временные файлы
    rm_tmp_files(temp_dir)

Example 48

Project: broc
Source File: Syntax.py
View license
def DIRECTORY(v): 
    """
    Add sub directory
    Args:
       v : the name of subdirectory, v is relative path
    """ 
    # gather all dependent module  
    env = Environment.GetCurrent()
    child_broc_dir = os.path.abspath(os.path.join(env.ModulePath(), v))
    if env.ModulePath() not in child_broc_dir:
            raise BrocArgumentIllegalError("DIRECTORY(%s) is wrong: %s not in %s" % \
                                          (child_broc_dir, env.ModulePath())

    child_broc_file = os.path.join(parent.module.root_path, v, 'BROC')
    if sys.argv[0] == 'PLANISH':
        parent = sys.argv[1]
        if not os.path.exists(child_broc_file):
            raise BrocArgumentIllegalError('Not found %s in Tag Directory(%s)' % (child_broc_file, v))
        try:
            execfile(child_broc_file)
        except BaseException as err:
            traceback.print_exc()
            raise BrocArgumentIllegalError(err)
    else: # find all targets to build
        if not os.path.exists(child_broc_file):
            raise BrocArgumentIllegalError('Not found %s in Tag Directory(%s)' % (child_broc_file, v))
        # Log.Log().LevPrint("INFO", 'add sub directory (%s) for module %s' % (v, env._module.module_cvspath)) 
        env.AddSubDir(v)

def PUBLISH(srcs, out_dir):
    """
    copy srcs to out_dir
    Args:
        srcs: the files needed to move should belongs to the module
        out_dir: the destination directory that must start with $OUT
        if argument is illeagl, raise BrocArgumentIllegalError 
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    if not out_dir.strip().startswith('$OUT'):
        raise BrocArgumentIllegalError("PUBLISH argument dst(%s) must start with $OUT \
                                         in %s " % (out_dir, env.BrocPath()))
    src_lists = srcs.split()
    for s in src_lists:
        abs_s = os.path.normpath(os.path.join(env.BrocDir(), s))
        if env.ModulePath() not in abs_s:
            raise NotInSelfModuleError(abs_s, env.ModulePath())

    env.AddPublish(srcs, out_dir)


def SVN_PATH():
    """
    return local path of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.SvnPath()


def SVN_URL():
    """
    return url of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.SvnUrl()


def SVN_REVISION():
    """
    return revision of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.SvnRevision()


def SVN_LAST_CHANGED_REV():
    """
    return last changed rev
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.SvnLastChangedRev()


def GIT_PATH():
    """
    return local path of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.GitPath()
        
    
def GIT_URL():
    """
    return url of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.GitUrl()


def GIT_BRANCH():
    """
    return the branch name of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.GitBranch()


def GIT_COMMIT_ID():
    """
    return the commit id of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.GitCommitID()


def GIT_TAG():
    """
    return the tag of module
    """
    if sys.argv[0] == 'PLANISH':
        return
    env = Environment.GetCurrent()
    return env.GitTag()

class BrocLoader(object):
    """
    the class loading BROC file
    """
    class __impl(object):
        """
        the implementation of singleton interface
        """
        def __init__(self):
            """
            """
            self._root = None
            self._nodes = dict()                   # module
            self._checked_configs = set()          # storing content of tag CONFIGS
            self._broc_dir = tempfile.mkdtemp()    # the temporary directory storing all BROC files 
            self._queue = Queue.Queue()
            self._lack_broc = set()                # the set of module who lack BROC file 
    
        def Id(self):
            """
            test method, return singleton id
            """
            return id(self)

        def SetRoot(self, root):
            """
            Args:
                root : the BrocNode object
            """
            if not self._root:
                self._root = root
                BrocTree.BrocTree().SetRoot(root)
                self._queue.put(root)

        def AddNode(self, node):
            """
            add new node
            Args:
                node : the object of BrocNode
            """
            if node.module.module_cvspath not in self._nodes:
                self._nodes[node.module.module_cvspath] = []
            
            self._nodes[node.module.module_cvspath].append(node)

        def AllNodes(self):
            """
            """
            return self._nodes

        def LackBrocModules(self):
            """
            return the set object containing the modules that lack BROC file
            """
            return self._lack_broc

        def LoadBROC(self):
            """
            to run main module BROC file
            """
            # main thread to load BROC
            # first node is root node representing main module
            while not self._queue.empty():
                parent = self._queue.get()
                sys.argv = ['PLANISH', parent]
                broc_file = self._download_broc(parent)
                if not broc_file:
                    self._lack_broc.add(parent.module.origin_config)
                    continue
                try:
                    execfile(broc_file)
                except BaseException as err:
                    traceback.print_exc()
            # print dependent tree
            BrocTree.BrocTree().Dump()

        def handle_configs(self, s, parent):
            """
            Args:
                s : [email protected]@xx set at tag CONFIGS 
                parent : the BrocNode object
            """
            if s in self._checked_configs:
                return 
            tree = BrocTree.BrocTree()
            repo_domain = BrocConfig.BrocConfig().RepoDomain(parent.module.repo_kind)
            postfix_branch = BrocConfig.BrocConfig().SVNPostfixBranch()
            postfix_tag = BrocConfig.BrocConfig().SVNPostfixTag()
            child_module = PlanishUtil.ParseConfig(s, 
                                           parent.module.workspace, 
                                           parent.module.dep_level + 1, 
                                           parent.module.repo_kind, 
                                           repo_domain, 
                                           postfix_branch, 
                                           postfix_tag) 
            # Log.Log().LevPrint("MSG", 'create node(%s), level %d' % (s, child_module.dep_level)) 
            child_node = BrocTree.BrocNode(child_module, parent, False)
            parent.AddChild(child_node)
            self.AddNode(child_node)
            self._queue.put(child_node)
            self._checked_configs.add(s)
            
        def _download_broc(self, node):
            """
            download BROC file from repository
            Args:
                node : the BrocNode object
            Returns:
                return abs path of BROC file if download success
                return None if download failed
            """
            broc_path = None
            cmd = None
            # for svn 
            # Log.Log().LevPrint("MSG", 'download BROC %s' % node.module.url)
            if node.module.repo_kind == BrocModule_pb2.Module.SVN:
                hash_value = Function.CalcHash(node.module.url)
                broc_url = os.path.join(node.module.url, 'BROC')
                broc_path = os.path.join(self._broc_dir, "%s_BROC" % hash_value)
                if node.module.revision:
                    broc_url = "%s -r %s" % (broc_url, node.module.revision)
                cmd = "svn export %s %s" % (broc_url, broc_path)
            else:
                # for GIT
                broc_path = os.path.join(node.module.workspace, node.module.module_cvspath, 'BROC')
                broc_dir = os.path.dirname(broc_path)
                if not os.path.exists(broc_path):
                    cmd += "git clone %s %s &&" \
                          % ("%s.git" % node.module.url, "%s" % broc_dir)

                    if node.module.br_name and node.module.br_name != 'master':
                        br_name = node.module.br_name
                        cmd += "cd %s && (git checkout %s || (git fetch origin %s:%s && git checkout %s))" \
                               % (broc_dir, br_name, br_name, br_name, br_name)
                    elif node.module.tag_name:
                        tag_name = node.module.tag_name
                        cmd += "cd %s && (git checkout %s || (git fetch origin %s:%s && git checkout %s))" \
                               % (broc_dir, tag_name, tag_name, tag_name, tag_name)

            if cmd: 
                Log.Log().LevPrint("MSG", "Getting BROC(%s) ..." % cmd)
                ret, msg = Function.RunCommand(cmd) 
                if ret != 0:
                    Log.Log().LevPrint("ERROR", msg)
                    return None

            return broc_path

    # class BrocLoader
    __instance = None
    def __init__(self):
        """ Create singleton instance """
        # Check whether we already have an instance
        if BrocLoader.__instance is None:
            # Create and remember instance
            BrocLoader.__instance = BrocLoader.__impl()

        # Store instance reference as the only member in the handle
        self.__dict__['_BrocLoader__instance'] = BrocLoader.__instance

    def __getattr__(self, attr):
        """ Delegate access to implementation """
        return getattr(self.__instance, attr)

    def __setattr__(self, attr, value):
        """ Delegate access to implementation """
        return setattr(self.__instance, attr, value)

Example 49

View license
def test_Entity():
    # Update the project
    project_name = str(uuid.uuid4())
    project = Project(name=project_name)
    project = syn.store(project)
    schedule_for_cleanup(project)
    project = syn.getEntity(project)
    assert project.name == project_name
    
    # Create and get a Folder
    folder = Folder('Test Folder', parent=project, description='A place to put my junk', foo=1000)
    folder = syn.createEntity(folder)
    folder = syn.getEntity(folder)
    assert folder.name == 'Test Folder'
    assert folder.parentId == project.id
    assert folder.description == 'A place to put my junk'
    assert folder.foo[0] == 1000
    
    # Update and get the Folder
    folder.pi = 3.14159265359
    folder.description = 'The rejects from the other folder'
    folder = syn.store(folder)
    folder = syn.get(folder)
    assert folder.name == 'Test Folder'
    assert folder.parentId == project.id
    assert folder.description == 'The rejects from the other folder'
    assert folder.pi[0] == 3.14159265359

    # Test CRUD on Files, check unicode
    path = utils.make_bogus_data_file()
    schedule_for_cleanup(path)
    a_file = File(path, parent=folder, description=u'Description with funny characters: Déjà vu, ประเทศไทย, 中国',
                  contentType='text/flapdoodle',
                  foo='An arbitrary value',
                  bar=[33,44,55],
                  bday=Datetime(2013,3,15),
                  band=u"Motörhead",
                  lunch=u"すし")
    a_file = syn.store(a_file)
    assert a_file.path == path

    a_file = syn.getEntity(a_file)
    assert a_file.description == u'Description with funny characters: Déjà vu, ประเทศไทย, 中国', u'description= %s' % a_file.description
    assert a_file['foo'][0] == 'An arbitrary value', u'foo= %s' % a_file['foo'][0]
    assert a_file['bar'] == [33,44,55]
    assert a_file['bday'][0] == Datetime(2013,3,15)
    assert a_file.contentType == 'text/flapdoodle', u'contentType= %s' % a_file.contentType
    assert a_file['band'][0] == u"Motörhead", u'band= %s' % a_file['band'][0]
    assert a_file['lunch'][0] == u"すし", u'lunch= %s' % a_file['lunch'][0]
    
    a_file = syn.downloadEntity(a_file)
    assert filecmp.cmp(path, a_file.path)

    assert_raises(ValueError,File,a_file.path,parent=folder,dataFileHandleId=56456)
    b_file = File(name="blah",parent=folder,dataFileHandleId=a_file.dataFileHandleId)
    b_file = syn.store(b_file)

    assert b_file.dataFileHandleId == a_file.dataFileHandleId
    # Update the File
    a_file.path = path
    a_file['foo'] = 'Another arbitrary chunk of text data'
    a_file['new_key'] = 'A newly created value'
    a_file = syn.updateEntity(a_file)
    assert a_file['foo'][0] == 'Another arbitrary chunk of text data'
    assert a_file['bar'] == [33,44,55]
    assert a_file['bday'][0] == Datetime(2013,3,15)
    assert a_file.new_key[0] == 'A newly created value'
    assert a_file.path == path
    assert a_file.versionNumber == 1, "unexpected version number: " +  str(a_file.versionNumber)

    #Test create, store, get Links
    link = Link(a_file['id'], 
                targetVersion=a_file.versionNumber,
                parent=project)
    link = syn.store(link)
    assert link['linksTo']['targetId'] == a_file['id']
    assert link['linksTo']['targetVersionNumber'] == a_file.versionNumber
    assert link['linksToClassName'] == a_file['concreteType']
    
    testLink = syn.get(link)
    assert testLink == link

    link = syn.get(link,followLink= True)
    assert link['foo'][0] == 'Another arbitrary chunk of text data'
    assert link['bar'] == [33,44,55]
    assert link['bday'][0] == Datetime(2013,3,15)
    assert link.new_key[0] == 'A newly created value'
    assert utils.equal_paths(link.path, path)
    assert link.versionNumber == 1, "unexpected version number: " +  str(a_file.versionNumber)

    # Upload a new File and verify
    new_path = utils.make_bogus_data_file()
    schedule_for_cleanup(new_path)
    a_file = syn.uploadFile(a_file, new_path)
    a_file = syn.downloadEntity(a_file)
    assert filecmp.cmp(new_path, a_file.path)
    assert a_file.versionNumber == 2

    # Make sure we can still get the older version of file
    old_random_data = syn.get(a_file.id, version=1)
    assert filecmp.cmp(old_random_data.path, path)

    tmpdir = tempfile.mkdtemp()
    schedule_for_cleanup(tmpdir)

    ## test file name override
    a_file.fileNameOverride = "peaches_en_regalia.zoinks"
    syn.store(a_file)
    ## TODO We haven't defined how filename override interacts with
    ## TODO previously cached files so, side-step that for now by
    ## TODO making sure the file is not in the cache!
    syn.cache.remove(a_file.dataFileHandleId, delete=True)
    a_file_retreived = syn.get(a_file, downloadLocation=tmpdir)
    assert os.path.basename(a_file_retreived.path) == a_file.fileNameOverride, os.path.basename(a_file_retreived.path)

    ## test getting the file from the cache with downloadLocation parameter (SYNPY-330)
    a_file_cached = syn.get(a_file.id, downloadLocation=tmpdir)
    assert a_file_cached.path is not None
    assert os.path.basename(a_file_cached.path) == a_file.fileNameOverride, a_file_cached.path

    print("\n\nList of files in project:\n")
    syn._list(project, recursive=True)

Example 50

Project: saxo
Source File: script.py
View license
@action
def test(args):
    if args.directory is not None:
        common.error("Tests cannot be run in conjunction with a directory")

    import queue
    import shutil
    import socket
    import subprocess
    import tempfile

    # Save PEP 3122!
    if "." in __name__:
        from . import saxo
    else:
        import saxo

    saxo_script = sys.modules["__main__"].__file__
    saxo_test_server = os.path.join(saxo.path, "test", "server.py")

    tmp = tempfile.mkdtemp()
    outgoing = queue.Queue()

    if not sys.executable:
        common.error("Couldn't find the python executable")

    if not os.path.isdir(tmp):
        common.error("There is no %s directory" % tmp)

    print("python executable:", sys.executable)
    print("saxo path:", saxo.path)
    print("saxo script:", saxo_script)
    print("saxo test server:", saxo_test_server)
    print()
    sys.stdout.flush()

    def run_server():
        server = subprocess.Popen([sys.executable, "-u", saxo_test_server],
            stdout=subprocess.PIPE)

        for line in server.stdout:
            line = line.decode("utf-8", "replace")
            line = line.rstrip("\n")
            outgoing.put("S: " + line)

        outgoing.put("Server finished")

    def run_client():
        saxo_test = os.path.join(tmp, "saxo-test")
        outgoing.put("Running in %s" % saxo_test)

        cmd = [sys.executable, saxo_script, "create", saxo_test]
        code = subprocess.call(cmd)
        if code:
            print("Error creating the client configuration")
            sys.exit(1)

        test_config = os.path.join(saxo.path, "test", "config")
        saxo_test_config = os.path.join(saxo_test, "config")
        with open(test_config) as f:
            with open(saxo_test_config, "w") as w:
                for line in f:
                    line = line.replace("localhost", socket.gethostname())
                    w.write(line)
        # shutil.copy2(test_config, saxo_test_config)

        client = subprocess.Popen([sys.executable, "-u",
                saxo_script, "-f", "start", saxo_test],
            stdout=subprocess.PIPE)

        for line in client.stdout:
            line = line.decode("utf-8", "replace")
            line = line.rstrip("\n")
            outgoing.put("C: " + line)

        manifest01 = {"commands", "config", "database.sqlite3",
            "pid", "plugins"}
        manifest02 = manifest01 | {"client.sock"}

        if set(os.listdir(saxo_test)) <= manifest01:
            shutil.rmtree(saxo_test)
        elif set(os.listdir(saxo_test)) <= manifest02:
            outgoing.put("Warning: client.sock had not been removed")
            shutil.rmtree(saxo_test)
        else:
            outgoing.put("Refusing to delete the saxo test directory")
            outgoing.put("Data was found which does not match the manifest")
            outgoing.put(saxo_test)

    common.thread(run_server)
    common.thread(run_client)

    error = False
    completed = False
    client_buffer = []
    while True:
        line = outgoing.get()

        if line.startswith("S: "):
            print(line)
            if line.startswith("S: ERROR"):
               error = True
            if line.startswith("S: Tests complete"):
               completed = True
            if not line.startswith("S: Test"):
                for c in client_buffer:
                    print(c)
            del client_buffer[:]

        elif line.startswith("C: "):
            client_buffer.append(line)

        else:
            print(line)

        sys.stdout.flush()

        if line == "Server finished":
            break

    if not os.listdir(tmp):
        os.rmdir(tmp)
    else:
        print("Warning: Did not remove:", tmp)

    if completed and (not error):
        sys.exit(0)
    else:
        sys.exit(1)