os.environ.get

Here are the examples of the python api os.environ.get taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: avocado
Source File: output.py
View license
def reconfigure(args):
    """
    Adjust logging handlers accordingly to app args and re-log messages.
    """
    # Reconfigure stream loggers
    enabled = getattr(args, "show", None)
    if not isinstance(enabled, list):
        enabled = ["app"]
        args.show = enabled
    if getattr(args, "show_job_log", False):
        del enabled[:]
        enabled.append("test")
    if getattr(args, "silent", False):
        del enabled[:]
    # "silent" is incompatible with "paginator"
    elif getattr(args, "paginator", False) == "on" and TERM_SUPPORT.enabled:
        STD_OUTPUT.enable_paginator()
    if "none" in enabled:
        del enabled[:]
    elif "all" in enabled:
        enabled.extend([_ for _ in BUILTIN_STREAMS if _ not in enabled])
    if os.environ.get("AVOCADO_LOG_EARLY") and "early" not in enabled:
        enabled.append("early")
    if os.environ.get("AVOCADO_LOG_DEBUG") and "debug" not in enabled:
        enabled.append("debug")
    # TODO: Avocado relies on stdout/stderr on some places, re-log them here
    # for now. This should be removed once we replace them with logging.
    if enabled:
        STD_OUTPUT.enable_outputs()
    else:
        STD_OUTPUT.enable_stderr()
    STD_OUTPUT.print_records()
    app_logger = logging.getLogger("avocado.app")
    if "app" in enabled:
        app_handler = ProgressStreamHandler()
        app_handler.setFormatter(logging.Formatter("%(message)s"))
        app_handler.addFilter(FilterInfoAndLess())
        app_handler.stream = STD_OUTPUT.stdout
        app_logger.addHandler(app_handler)
        app_logger.propagate = False
        app_logger.level = logging.DEBUG
    else:
        disable_log_handler("avocado.app")
    app_err_handler = ProgressStreamHandler()
    app_err_handler.setFormatter(logging.Formatter("%(message)s"))
    app_err_handler.addFilter(FilterWarnAndMore())
    app_err_handler.stream = STD_OUTPUT.stderr
    app_logger.addHandler(app_err_handler)
    app_logger.propagate = False
    if not os.environ.get("AVOCADO_LOG_EARLY"):
        logging.getLogger("avocado.test.stdout").propagate = False
        logging.getLogger("avocado.test.stderr").propagate = False
        if "early" in enabled:
            add_log_handler("", logging.StreamHandler, STD_OUTPUT.stdout,
                            logging.DEBUG)
            add_log_handler("avocado.test", logging.StreamHandler,
                            STD_OUTPUT.stdout, logging.DEBUG)
        else:
            disable_log_handler("")
            disable_log_handler("avocado.test")
    if "remote" in enabled:
        add_log_handler("avocado.fabric", stream=STD_OUTPUT.stdout,
                        level=logging.DEBUG)
        add_log_handler("paramiko", stream=STD_OUTPUT.stdout,
                        level=logging.DEBUG)
    else:
        disable_log_handler("avocado.fabric")
        disable_log_handler("paramiko")
    # Not enabled by env
    if not os.environ.get('AVOCADO_LOG_DEBUG'):
        if "debug" in enabled:
            add_log_handler("avocado.app.debug", stream=STD_OUTPUT.stdout)
        else:
            disable_log_handler("avocado.app.debug")

    # Add custom loggers
    for name in [_ for _ in enabled if _ not in BUILTIN_STREAMS.iterkeys()]:
        stream_level = re.split(r'(?<!\\):', name, maxsplit=1)
        name = stream_level[0]
        if len(stream_level) == 1:
            level = logging.DEBUG
        else:
            level = (int(stream_level[1]) if stream_level[1].isdigit()
                     else logging.getLevelName(stream_level[1].upper()))
        try:
            add_log_handler(name, logging.StreamHandler, STD_OUTPUT.stdout,
                            level)
        except ValueError, details:
            app_logger.error("Failed to set logger for --show %s:%s: %s.",
                             name, level, details)
            sys.exit(exit_codes.AVOCADO_FAIL)
    # Remove the in-memory handlers
    for handler in logging.root.handlers:
        if isinstance(handler, MemStreamHandler):
            logging.root.handlers.remove(handler)

    # Log early_messages
    for record in MemStreamHandler.log:
        logging.getLogger(record.name).handle(record)

Example 2

Project: radical.pilot
Source File: shell.py
View license
    def initialize_child(self):

        from .... import pilot as rp

        self.declare_input (rps.EXECUTING_PENDING, rpc.AGENT_EXECUTING_QUEUE)
        self.declare_worker(rps.EXECUTING_PENDING, self.work)

        self.declare_output(rps.AGENT_STAGING_OUTPUT_PENDING, rpc.AGENT_STAGING_OUTPUT_QUEUE)

        self.declare_publisher ('unschedule', rpc.AGENT_UNSCHEDULE_PUBSUB)
        self.declare_publisher ('state',      rpc.AGENT_STATE_PUBSUB)

        # all components use the command channel for control messages
        self.declare_publisher ('command', rpc.AGENT_COMMAND_PUBSUB)
        self.declare_subscriber('command', rpc.AGENT_COMMAND_PUBSUB, self.command_cb)

        # Mimic what virtualenv's "deactivate" would do
        self._deactivate = "# deactivate pilot virtualenv\n"

        old_path  = os.environ.get('_OLD_VIRTUAL_PATH',       None)
        old_ppath = os.environ.get('_OLD_VIRTUAL_PYTHONPATH', None)
        old_home  = os.environ.get('_OLD_VIRTUAL_PYTHONHOME', None)
        old_ps1   = os.environ.get('_OLD_VIRTUAL_PS1',        None)

        if old_ppath: self._deactivate += 'export PATH="%s"\n'        % old_ppath
        if old_path : self._deactivate += 'export PYTHONPATH="%s"\n'  % old_path
        if old_home : self._deactivate += 'export PYTHON_HOME="%s"\n' % old_home
        if old_ps1  : self._deactivate += 'export PS1="%s"\n'         % old_ps1

        self._deactivate += 'unset VIRTUAL_ENV\n\n'

        # FIXME: we should not alter the environment of the running agent, but
        #        only make sure that the CU finds a pristine env.  That also
        #        holds for the unsetting below -- AM
        if old_path : os.environ['PATH']        = old_path
        if old_ppath: os.environ['PYTHONPATH']  = old_ppath
        if old_home : os.environ['PYTHON_HOME'] = old_home
        if old_ps1  : os.environ['PS1']         = old_ps1

        if 'VIRTUAL_ENV' in os.environ :
            del(os.environ['VIRTUAL_ENV'])

        # simplify shell startup / prompt detection
        os.environ['PS1'] = '$ '

        # FIXME:
        #
        # The AgentExecutingComponent needs the LaunchMethods to construct
        # commands.  Those need the scheduler for some lookups and helper
        # methods, and the scheduler needs the LRMS.  The LRMS can in general
        # only initialized in the original agent environment -- which ultimately
        # limits our ability to place the CU execution on other nodes.
        #
        # As a temporary workaround we pass a None-Scheduler -- this will only
        # work for some launch methods, and specifically not for ORTE, DPLACE
        # and RUNJOB.
        #
        # The clean solution seems to be to make sure that, on 'allocating', the
        # scheduler derives all information needed to use the allocation and
        # attaches them to the CU, so that the launch methods don't need to look
        # them up again.  This will make the 'opaque_slots' more opaque -- but
        # that is the reason of their existence (and opaqueness) in the first
        # place...

        self._task_launcher = rp.agent.LM.create(
                name   = self._cfg['task_launch_method'],
                cfg    = self._cfg,
                logger = self._log)

        self._mpi_launcher = rp.agent.LM.create(
                name   = self._cfg['mpi_launch_method'],
                cfg    = self._cfg,
                logger = self._log)

        # TODO: test that this actually works
        # Remove the configured set of environment variables from the
        # environment that we pass to Popen.
        for e in os.environ.keys():
            env_removables = list()
            if self._mpi_launcher : env_removables += self._mpi_launcher.env_removables
            if self._task_launcher: env_removables += self._task_launcher.env_removables
            for r in  env_removables:
                if e.startswith(r):
                    os.environ.pop(e, None)

        # if we need to transplant any original env into the CU, we dig the
        # respective keys from the dump made by bootstrap_1.sh
        self._env_cu_export = dict()
        if self._cfg.get('export_to_cu'):
            with open('env.orig', 'r') as f:
                for line in f.readlines():
                    if '=' in line:
                        k,v = line.split('=', 1)
                        key = k.strip()
                        val = v.strip()
                        if key in self._cfg['export_to_cu']:
                            self._env_cu_export[key] = val

        # the registry keeps track of units to watch, indexed by their shell
        # spawner process ID.  As the registry is shared between the spawner and
        # watcher thread, we use a lock while accessing it.
        self._registry      = dict()
        self._registry_lock = threading.RLock()

        self._cus_to_cancel  = list()
        self._cancel_lock    = threading.RLock()

        self._cached_events = list() # keep monitoring events for pid's which
                                     # are not yet known

        # get some threads going -- those will do all the work.
        import saga.utils.pty_shell as sups
        self.launcher_shell = sups.PTYShell("fork://localhost/")
        self.monitor_shell  = sups.PTYShell("fork://localhost/")

        # run the spawner on the shells
        # tmp = tempfile.gettempdir()
        # Moving back to shared file system again, until it reaches maturity,
        # as this breaks launch methods with a hop, e.g. ssh.
        tmp = os.getcwd() # FIXME: see #658
        self._pilot_id    = self._cfg['pilot_id']
        self._spawner_tmp = "/%s/%s-%s" % (tmp, self._pilot_id, self._cname)

        ret, out, _  = self.launcher_shell.run_sync \
                           ("/bin/sh %s/agent/radical-pilot-spawner.sh %s" \
                           % (os.path.dirname (rp.__file__), self._spawner_tmp))
        if  ret != 0 :
            raise RuntimeError ("failed to bootstrap launcher: (%s)(%s)", ret, out)

        ret, out, _  = self.monitor_shell.run_sync \
                           ("/bin/sh %s/agent/radical-pilot-spawner.sh %s" \
                           % (os.path.dirname (rp.__file__), self._spawner_tmp))
        if  ret != 0 :
            raise RuntimeError ("failed to bootstrap monitor: (%s)(%s)", ret, out)

        # run watcher thread
        self._terminate = threading.Event()
        self._watcher   = threading.Thread(target=self._watch, name="Watcher")
        self._watcher.daemon = True
        self._watcher.start ()

        self._prof.prof('run setup done', uid=self._pilot_id)

        # communicate successful startup
        self.publish('command', {'cmd' : 'alive',
                                 'arg' : self.cname})

Example 3

Project: radical.pilot
Source File: loadleveler.py
View license
    def _configure(self):

        loadl_node_list = None
        loadl_cpus_per_node = None

        # Determine method for determining hosts,
        # either through hostfile or BG/Q environment.
        loadl_hostfile = os.environ.get('LOADL_HOSTFILE')
        self.loadl_bg_block = os.environ.get('LOADL_BG_BLOCK')
        if loadl_hostfile is None and self.loadl_bg_block is None:
            msg = "Neither $LOADL_HOSTFILE or $LOADL_BG_BLOCK set!"
            self._log.error(msg)
            raise RuntimeError(msg)

        # Determine the size of the pilot allocation
        if loadl_hostfile is not None:
            # Non Blue Gene Load Leveler installation.

            loadl_total_tasks_str = os.environ.get('LOADL_TOTAL_TASKS')
            if loadl_total_tasks_str is None:
                msg = "$LOADL_TOTAL_TASKS not set!"
                self._log.error(msg)
                raise RuntimeError(msg)
            else:
                loadl_total_tasks = int(loadl_total_tasks_str)

            # Construct the host list
            loadl_nodes = [line.strip() for line in open(loadl_hostfile)]
            self._log.info("Found LOADL_HOSTFILE %s. Expanded to: %s",
                          loadl_hostfile, loadl_nodes)
            loadl_node_list = list(set(loadl_nodes))

            # Verify that $LLOAD_TOTAL_TASKS == len($LOADL_HOSTFILE)
            if loadl_total_tasks != len(loadl_nodes):
                self._log.error("$LLOAD_TOTAL_TASKS(%d) != len($LOADL_HOSTFILE)(%d)",
                               loadl_total_tasks, len(loadl_nodes))

            # Determine the number of cpus per node.  Assume:
            # cores_per_node = lenght(nodefile) / len(unique_nodes_in_nodefile)
            loadl_cpus_per_node = len(loadl_nodes) / len(loadl_node_list)

        elif self.loadl_bg_block is not None:
            # Blue Gene specific.
            loadl_bg_midplane_list_str = None
            loadl_bg_block_size_str = None

            loadl_job_name = os.environ.get('LOADL_JOB_NAME')
            if loadl_job_name is None:
                msg = "$LOADL_JOB_NAME not set!"
                self._log.error(msg)
                raise RuntimeError(msg)

            # Get the board list and block shape from 'llq -l' output
            output = subprocess.check_output(["llq", "-l", loadl_job_name])
            loadl_bg_board_list_str = None
            loadl_bg_block_shape_str = None
            for line in output.splitlines():
                # Detect BG board list
                if "BG Node Board List: " in line:
                    loadl_bg_board_list_str = line.split(':')[1].strip()
                elif "BG Midplane List: " in line:
                    loadl_bg_midplane_list_str = line.split(':')[1].strip()
                elif "BG Shape Allocated: " in line:
                    loadl_bg_block_shape_str = line.split(':')[1].strip()
                elif "BG Size Allocated: " in line:
                    loadl_bg_block_size_str = line.split(':')[1].strip()
            if not loadl_bg_board_list_str:
                msg = "No board list found in llq output!"
                self._log.error(msg)
                raise RuntimeError(msg)
            self._log.debug("BG Node Board List: %s" % loadl_bg_board_list_str)
            if not loadl_bg_midplane_list_str:
                msg = "No midplane list found in llq output!"
                self._log.error(msg)
                raise RuntimeError(msg)
            self._log.debug("BG Midplane List: %s" % loadl_bg_midplane_list_str)
            if not loadl_bg_block_shape_str:
                msg = "No board shape found in llq output!"
                self._log.error(msg)
                raise RuntimeError(msg)
            self._log.debug("BG Shape Allocated: %s" % loadl_bg_block_shape_str)
            if not loadl_bg_block_size_str:
                msg = "No board size found in llq output!"
                self._log.error(msg)
                raise RuntimeError(msg)
            loadl_bg_block_size = int(loadl_bg_block_size_str)
            self._log.debug("BG Size Allocated: %d" % loadl_bg_block_size)

            # Build nodes data structure to be handled by Torus Scheduler
            try:
                self.torus_block = self._bgq_construct_block(
                    loadl_bg_block_shape_str, loadl_bg_board_list_str,
                    loadl_bg_block_size, loadl_bg_midplane_list_str)
            except Exception as e:
                msg = "Couldn't construct block: %s" % e.message
                self._log.error(msg)
                raise RuntimeError(msg)
            self._log.debug("Torus block constructed:")
            for e in self.torus_block:
                self._log.debug("%s %s %s %s" %
                                (e[0], [e[1][key] for key in sorted(e[1])], e[2], e[3]))

            try:
                loadl_node_list = [entry[SchedulerTorus.TORUS_BLOCK_NAME] for entry in self.torus_block]
            except Exception as e:
                msg = "Couldn't construct node list."
                self._log.error(msg)
                raise RuntimeError(msg)
            #self._log.debug("Node list constructed: %s" % loadl_node_list)

            # Construct sub-block table
            try:
                self.shape_table = self._bgq_create_sub_block_shape_table(loadl_bg_block_shape_str)
            except Exception as e:
                msg = "Couldn't construct shape table: %s" % e.message
                self._log.error(msg)
                raise RuntimeError(msg)
            self._log.debug("Shape table constructed: ")
            for (size, dim) in [(key, self.shape_table[key]) for key in sorted(self.shape_table)]:
                self._log.debug("%s %s" % (size, [dim[key] for key in sorted(dim)]))

            # Determine the number of cpus per node
            loadl_cpus_per_node = self.BGQ_CORES_PER_NODE

            # BGQ Specific Torus labels
            self.torus_dimension_labels = self.BGQ_DIMENSION_LABELS

        self.node_list = loadl_node_list
        self.cores_per_node = loadl_cpus_per_node

        self._log.debug("Sleeping for #473 ...")
        time.sleep(5)
        self._log.debug("Configure done")

Example 4

Project: radical.pilot
Source File: session.py
View license
    def __init__ (self, database_url=None, database_name=None, name=None):
        """Creates a new session.

        If called without a uid, a new Session instance is created and 
        stored in the database. If uid is set, an existing session is 
        retrieved from the database. 

        **Arguments:**
            * **database_url** (`string`): The MongoDB URL.  If none is given,
              RP uses the environment variable RADICAL_PILOT_DBURL.  If that is
              not set, an error will be raises.

            * **database_name** (`string`): An alternative database name 
              (default: 'radicalpilot').

            * **uid** (`string`): If uid is set, we try 
              re-connect to an existing session instead of creating a new one.

            * **name** (`string`): An optional human readable name.

        **Returns:**
            * A new Session instance.

        **Raises:**
            * :class:`radical.pilot.DatabaseError`

        """

        logger = ru.get_logger('radical.pilot')

        if database_name:
            logger.warning("The 'database_name' parameter is deprecated - please specify an URL path")
        else:
            database_name = 'radicalpilot'

        # init the base class inits
        saga.Session.__init__ (self)
        self._dh        = ru.DebugHelper()
        self._valid     = False
        self._terminate = threading.Event()
        self._terminate.clear()

        # before doing anything else, set up the debug helper for the lifetime
        # of the session.
        self._debug_helper = ru.DebugHelper ()

        # Dictionaries holding all manager objects created during the session.
        self._pilot_manager_objects = dict()
        self._unit_manager_objects  = dict()

        # The resource configuration dictionary associated with the session.
        self._resource_configs = {}

        if  not database_url:
            database_url = os.environ.get("RADICAL_PILOT_DBURL")

        if  not database_url:
            logger.warning('using default dburl %s', default_dburl)
            database_url = default_dburl

        dburl = ru.Url(database_url)

        # if the database url contains a path element, we interpret that as
        # database name (without the leading slash)
        if  not dburl.path         or \
            dburl.path[0]   != '/' or \
            len(dburl.path) <=  1  :
            logger.warning("incomplete URLs are deprecated -- missing database name!")
            dburl.path = database_name # defaults to 'radicalpilot'

        logger.info("using database %s" % dburl)

        # ----------------------------------------------------------------------
        # create new session
        self._dbs       = None
        self._uid       = None
        self._connected = None
        self._dburl     = None

        try:
            if name :
                uid = name
                ru.reset_id_counters(prefix=['pmgr', 'umgr', 'pilot', 'unit', 'unit.%(counter)06d'])
            else :
                uid = ru.generate_id ('rp.session', mode=ru.ID_PRIVATE)
                ru.reset_id_counters(prefix=['pmgr', 'umgr', 'pilot', 'unit', 'unit.%(counter)06d'])

            # initialize profiling
            self.prof = ru.Profiler('%s' % uid)
            self.prof.prof('start session', uid=uid)

            logger.report.info ('<<new session: ')
            logger.report.plain('[%s]' % uid)
            logger.report.info ('<<database   : ')
            logger.report.plain('[%s]' % dburl)

            self._dbs = dbSession(sid   = uid,
                                  name  = name,
                                  dburl = dburl)

            # only now the session should have an uid
            self._dburl = self._dbs._dburl
            self._name  = name
            self._uid   = uid

            # from here on we should be able to close the session again
            self._valid = True
            logger.info("New Session created: %s." % str(self))

        except Exception, ex:
            logger.report.error(">>err\n")
            logger.exception ('session create failed')
            raise PilotException("Couldn't create new session (database URL '%s' incorrect?): %s" \
                            % (dburl, ex))  

        # Loading all "default" resource configurations
        module_path  = os.path.dirname(os.path.abspath(__file__))
        default_cfgs = "%s/configs/resource_*.json" % module_path
        config_files = glob.glob(default_cfgs)

        for config_file in config_files:

            try :
                logger.info("Load resource configurations from %s" % config_file)
                rcs = ResourceConfig.from_file(config_file)
            except Exception as e :
                logger.error ("skip config file %s: %s" % (config_file, e))
                continue

            for rc in rcs:
                logger.info("Load resource configurations for %s" % rc)
                self._resource_configs[rc] = rcs[rc].as_dict() 

        user_cfgs     = "%s/.radical/pilot/configs/resource_*.json" % os.environ.get ('HOME')
        config_files  = glob.glob(user_cfgs)

        for config_file in config_files:

            try :
                rcs = ResourceConfig.from_file(config_file)
            except Exception as e :
                logger.error ("skip config file %s: %s" % (config_file, e))
                continue

            for rc in rcs:
                logger.info("Load resource configurations for %s" % rc)

                if  rc in self._resource_configs :
                    # config exists -- merge user config into it
                    ru.dict_merge (self._resource_configs[rc],
                                   rcs[rc].as_dict(),
                                   policy='overwrite')
                else :
                    # new config -- add as is
                    self._resource_configs[rc] = rcs[rc].as_dict() 

        default_aliases = "%s/configs/resource_aliases.json" % module_path
        self._resource_aliases = ru.read_json_str (default_aliases)['aliases']

        self.prof.prof('configs parsed', uid=self._uid)

        _rec = os.environ.get('RADICAL_PILOT_RECORD_SESSION')
        if _rec:
            self._rec = "%s/%s" % (_rec, self._uid)
            os.system('mkdir -p %s' % self._rec)
            ru.write_json({'dburl' : str(self._dburl)}, "%s/session.json" % self._rec)
            logger.info("recording session in %s" % self._rec)
        else:
            self._rec = None

        logger.report.ok('>>ok\n')

Example 5

View license
def merge_master_to_feature():
    ORG_NAME=os.environ.get('GITHUB_ORG_NAME')
    REPO_NAME=os.environ.get('GITHUB_REPO_NAME')
    MASTER_BRANCH=os.environ.get('MASTER_BRANCH','master')
    USERNAME=os.environ.get('GITHUB_USERNAME')
    PASSWORD=os.environ.get('GITHUB_PASSWORD')
    BUILD_COMMIT=os.environ.get('BUILD_COMMIT', None)
    
    g = Github(USERNAME,PASSWORD)

    try:
        org = g.get_organization(ORG_NAME)
    except:
        org = g.get_user(ORG_NAME)
    repo = org.get_repo(REPO_NAME)
    
    master = repo.get_branch(MASTER_BRANCH)
    
    exception = None
    
    pulls = repo.get_pulls()
    
    for branch in repo.get_branches():
        # Skip any branches which don't start with feature/
        if not branch.name.startswith('feature/'):
            print 'Skipping branch %s: does not start with feature/' % branch.name
            continue
    
        # Skip the master branch
        if branch.name == master.name:
            print 'Skipping branch %s: is master branch' % branch.name
            continue
    
        # Skip branches which are not behind dev
        # Changed to check if the files list is empty since merge creates a new commit on dev
        # which makes the merged feature branch behind by a commit but with no files.
    
        # Get a comparison of master vs branch.  compare.ahead_by means master is head of the branch.
        # This orientation is necessary so the compare.files list lists files changed in master but not
        # in the branch.
        if BUILD_COMMIT:
            compare = repo.compare(branch.commit.sha, BUILD_COMMIT)
        else:
            compare = repo.compare(branch.commit.sha, master.commit.sha)
        if not compare.files:
            print 'Skipping branch %s: branch has no files different than %s' % (branch.name, master.name)
            continue
    
        # Try to do a merge directly in Github
        try:
            merge = repo.merge(branch.name, master.name)
            print 'Merged %s commits into %s (%s)' % (compare.ahead_by, branch.name, merge.sha)
        except GithubException, e:
            # Auto-merge failed
            if e.data.get('message') == u'Merge conflict':
                existing_pull = None
                for pull in pulls:
                    if pull.base.ref == branch.name:
                        existing_pull = pull
                        print 'Skipping branch %s: pull request already exists' % branch.name
    
                if not existing_pull:
                    # If the failure was due to merge conflict, create a pull request
                    pull = repo.create_pull(
                        title="Merge conflict merging %s into %s" % (master.name, branch.name),
                        body="mrbelvedere tried to merge new commits to %s but hit a merge conflict.  Please resolve manually" % master.name,
                        base=branch.name,
                        head=master.name,
                    )
                    print 'Create pull request %s to resolve merge conflict in %s' % (pull.number, branch.name)
    
                    # Assign pull request to branch committers
                    commits = repo.get_commits(sha = branch.commit.sha)
                    assignee = None
                    # Find the most recent committer who is not the user used by this script
                    # NOTE: This presumes the user being used by this script is a robot user, not a real dev
                    for commit in commits:
                        try:
                            # Sometimes commit.committer.login does not exist in the response.  If so, continue 
                            if commit.committer.login != USERNAME:
                                assignee = commit.committer
                                break
                        except AttributeError:
                            pass
    
                    if assignee:
                        repo.get_issue(pull.number).edit(assignee = assignee)
    
            else:
                # For other types of failures, store the last exception and raise at the end
                exception = e
    
    if exception:
        # If an exception other than a merge conflict was encountered, raise it
        raise exception

Example 6

Project: khard
Source File: config.py
View license
        def __init__(self):
            self.config = None
            self.address_book_list = []
            self.original_uid_dict = {}
            self.uid_dict = {}

            # set locale
            locale.setlocale(locale.LC_ALL, '')

            # load config file
            xdg_config_home = os.environ.get("XDG_CONFIG_HOME") or \
                os.path.expanduser("~/.config")
            config_file = os.environ.get("KHARD_CONFIG") or \
                os.path.join(xdg_config_home, "khard", "khard.conf")
            if not os.path.exists(config_file):
                print("Config file %s not available" % config_file)
                sys.exit(2)

            # parse config file contents
            try:
                self.config = configobj.ConfigObj(
                    config_file, interpolation=False)
            except configobj.ParseError as err:
                print("Error in config file\n%s" % err)
                sys.exit(2)

            # general settings
            if "general" not in self.config:
                print('Error in config file\n'
                      'Missing main section "[general]".')
                sys.exit(2)

            # debug
            if 'debug' not in self.config['general']:
                self.config['general']['debug'] = False
            elif self.config['general']['debug'] == "yes":
                self.config['general']['debug'] = True
            elif self.config['general']['debug'] == "no":
                self.config['general']['debug'] = False
            else:
                print("Error in config file\n"
                      "Invalid value for debug parameter\n"
                      "Possible values: yes, no")
                sys.exit(2)

            # editor
            self.config['general']['editor'] = \
                self.config['general'].get("editor") \
                or os.environ.get("EDITOR")
            if self.config['general']['editor'] is None:
                print("Error in config file\n"
                      "Set path to your preferred text editor in khard's "
                      "config file or the $EDITOR shell variable\n"
                      "Example for khard.conf: editor = vim")
                sys.exit(2)
            self.config['general']['editor'] = find_executable(
                os.path.expanduser(self.config['general']['editor']))
            if self.config['general']['editor'] is None:
                print("Error in config file\n"
                      "Invalid editor path or executable not found.")
                sys.exit(2)

            # merge editor
            self.config['general']['merge_editor'] = \
                self.config['general'].get("merge_editor") \
                or os.environ.get("MERGE_EDITOR")
            if self.config['general']['merge_editor'] is None:
                print("Error in config file\nSet path to your preferred text "
                      "merge editor in khard's config file or the "
                      "$MERGE_EDITOR shell variable\n"
                      "Example for khard.conf: merge_editor = vimdiff")
                sys.exit(2)
            self.config['general']['merge_editor'] = find_executable(
                os.path.expanduser(self.config['general']['merge_editor']))
            if self.config['general']['merge_editor'] is None:
                print("Error in config file\n"
                      "Invalid merge editor path or executable not found.")
                sys.exit(2)

            # default action
            if "default_action" not in self.config['general']:
                print("Error in config file\n"
                      "Missing default action parameter.")
                sys.exit(2)
            elif self.config['general']['default_action'] not in \
                    Actions.get_list_of_all_actions():
                print("Error in config file\nInvalid value for default_action "
                      "parameter\nPossible values: %s" % ', '.join(
                          sorted(Actions.get_list_of_all_actions())))
                sys.exit(2)

            # contact table settings
            if "contact table" not in self.config:
                self.config['contact table'] = {}

            # sort contact table by first or last name
            if "sort" not in self.config['contact table']:
                self.config['contact table']['sort'] = "first_name"
            elif self.config['contact table']['sort'] not in \
                    ["first_name", "last_name"]:
                print("Error in config file\n"
                      "Invalid value for sort parameter\n"
                      "Possible values: first_name, last_name")
                sys.exit(2)

            # display names in contact table by first or last name
            if "display" not in self.config['contact table']:
                # if display by name attribute is not present in the config
                # file use the sort attribute value for backwards compatibility
                self.config['contact table']['display'] = \
                        self.config['contact table']['sort']
            elif self.config['contact table']['display'] not in \
                    ["first_name", "last_name"]:
                print("Error in config file\n"
                      "Invalid value for display parameter\n"
                      "Possible values: first_name, last_name")
                sys.exit(2)

            # reverse contact table
            if 'reverse' not in self.config['contact table']:
                self.config['contact table']['reverse'] = False
            elif self.config['contact table']['reverse'] == "yes":
                self.config['contact table']['reverse'] = True
            elif self.config['contact table']['reverse'] == "no":
                self.config['contact table']['reverse'] = False
            else:
                print("Error in config file\n"
                      "Invalid value for reverse parameter\n"
                      "Possible values: yes, no")
                sys.exit(2)

            # group contact table by address book
            if "group_by_addressbook" not in self.config['contact table']:
                self.config['contact table']['group_by_addressbook'] = False
            elif self.config['contact table']['group_by_addressbook'] == "yes":
                self.config['contact table']['group_by_addressbook'] = True
            elif self.config['contact table']['group_by_addressbook'] == "no":
                self.config['contact table']['group_by_addressbook'] = False
            else:
                print("Error in config file\n"
                      "Invalid value for group_by_addressbook parameter\n"
                      "Possible values: yes, no")
                sys.exit(2)

            # nickname
            if "show_nicknames" not in self.config['contact table']:
                self.config['contact table']['show_nicknames'] = False
            elif self.config['contact table']['show_nicknames'] == "yes":
                self.config['contact table']['show_nicknames'] = True
            elif self.config['contact table']['show_nicknames'] == "no":
                self.config['contact table']['show_nicknames'] = False
            else:
                print("Error in config file\n"
                      "Invalid value for show_nicknames parameter\n"
                      "Possible values: yes, no")
                sys.exit(2)

            # show uids
            if "show_uids" not in self.config['contact table']:
                self.config['contact table']['show_uids'] = True
            elif self.config['contact table']['show_uids'] == "yes":
                self.config['contact table']['show_uids'] = True
            elif self.config['contact table']['show_uids'] == "no":
                self.config['contact table']['show_uids'] = False
            else:
                print("Error in config file\n"
                      "Invalid value for show_uids parameter\n"
                      "Possible values: yes, no")
                sys.exit(2)

            # vcard settings
            if "vcard" not in self.config:
                self.config['vcard'] = {}

            # get supported private objects
            if "private_objects" not in self.config['vcard']:
                self.config['vcard']['private_objects'] = []
            else:
                # check if object only contains letters, digits or -
                for object in self.config['vcard']['private_objects']:
                    if object != re.sub("[^a-zA-Z0-9-]", "", object):
                        print("Error in config file\n"
                              "private object %s may only contain letters, "
                              "digits and the \"-\" character." % object)
                        sys.exit(2)
                    if object == re.sub("[^-]", "", object) \
                            or object.startswith("-") \
                            or object.endswith("-"):
                        print("Error in config file\n"
                              "A \"-\" in a private object label must be "
                              "at least surrounded by one letter or digit.")
                        sys.exit(2)

            # preferred vcard version
            if "preferred_version" not in self.config['vcard']:
                self.config['vcard']['preferred_version'] = "3.0"
            elif self.config['vcard']['preferred_version'] not in \
                    self.get_supported_vcard_versions():
                print("Error in config file\n"
                      "Invalid value for preferred_version parameter\n"
                      "Possible values: %s"
                      % self.get_supported_vcard_versions())
                sys.exit(2)

            # speed up program by pre-searching in the vcard source files
            if 'search_in_source_files' not in self.config['vcard']:
                self.config['vcard']['search_in_source_files'] = False
            elif self.config['vcard']['search_in_source_files'] == "yes":
                self.config['vcard']['search_in_source_files'] = True
            elif self.config['vcard']['search_in_source_files'] == "no":
                self.config['vcard']['search_in_source_files'] = False
            else:
                print("Error in config file\n"
                      "Invalid value for search_in_source_files parameter\n"
                      "Possible values: yes, no")
                sys.exit(2)

            # skip unparsable vcards
            if 'skip_unparsable' not in self.config['vcard']:
                self.config['vcard']['skip_unparsable'] = False
            elif self.config['vcard']['skip_unparsable'] == "yes":
                self.config['vcard']['skip_unparsable'] = True
            elif self.config['vcard']['skip_unparsable'] == "no":
                self.config['vcard']['skip_unparsable'] = False
            else:
                print("Error in config file\n"
                      "Invalid value for skip_unparsable parameter\n"
                      "Possible values: yes, no")
                sys.exit(2)

            # load address books
            if "addressbooks" not in self.config:
                print('Error in config file\n'
                      'Missing main section "[addressbooks]".')
                sys.exit(2)
            if len(self.config['addressbooks'].keys()) == 0:
                print("Error in config file\n"
                      "No address book entries available.")
                sys.exit(2)
            for name in self.config['addressbooks'].keys():
                # create address book object
                try:
                    address_book = AddressBook(
                        name, self.config['addressbooks'][name]['path'])
                except KeyError as e:
                    print("Error in config file\n"
                          "Missing path to the \"%s\" address book." % name)
                    sys.exit(2)
                except IOError as e:
                    print("Error in config file\n%s" % e)
                    sys.exit(2)
                else:
                    # add address book to list
                    self.address_book_list.append(address_book)

Example 7

Project: st2
Source File: client.py
View license
    def __init__(self, base_url=None, auth_url=None, api_url=None, stream_url=None,
                 api_version=None, cacert=None, debug=False, token=None, api_key=None):
        # Get CLI options. If not given, then try to get it from the environment.
        self.endpoints = dict()

        # Populate the endpoints
        if base_url:
            self.endpoints['base'] = base_url
        else:
            self.endpoints['base'] = os.environ.get('ST2_BASE_URL', DEFAULT_BASE_URL)

        api_version = api_version or os.environ.get('ST2_API_VERSION', DEFAULT_API_VERSION)

        if api_url:
            self.endpoints['api'] = api_url
        else:
            self.endpoints['api'] = os.environ.get(
                'ST2_API_URL', '%s:%s/%s' % (self.endpoints['base'], DEFAULT_API_PORT, api_version))

        if auth_url:
            self.endpoints['auth'] = auth_url
        else:
            self.endpoints['auth'] = os.environ.get(
                'ST2_AUTH_URL', '%s:%s' % (self.endpoints['base'], DEFAULT_AUTH_PORT))

        if stream_url:
            self.endpoints['stream'] = auth_url
        else:
            self.endpoints['stream'] = os.environ.get(
                'ST2_STREAM_URL', '%s:%s/%s' %
                                  (self.endpoints['base'], DEFAULT_STREAM_PORT, api_version))

        if cacert is not None:
            self.cacert = cacert
        else:
            self.cacert = os.environ.get('ST2_CACERT', None)

        # Note: boolean is also a valid value for "cacert"
        is_cacert_string = isinstance(self.cacert, six.string_types)
        if (self.cacert and is_cacert_string and not os.path.isfile(self.cacert)):
            raise ValueError('CA cert file "%s" does not exist.' % (self.cacert))

        self.debug = debug

        # Note: This is a nasty hack for now, but we need to get rid of the decrator abuse
        if token:
            os.environ['ST2_AUTH_TOKEN'] = token

        self.token = token

        if api_key:
            os.environ['ST2_API_KEY'] = api_key

        self.api_key = api_key

        # Instantiate resource managers and assign appropriate API endpoint.
        self.managers = dict()
        self.managers['Token'] = ResourceManager(
            models.Token, self.endpoints['auth'], cacert=self.cacert, debug=self.debug)
        self.managers['RunnerType'] = ResourceManager(
            models.RunnerType, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Action'] = ResourceManager(
            models.Action, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['ActionAlias'] = ActionAliasResourceManager(
            models.ActionAlias, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['ActionAliasExecution'] = ResourceManager(
            models.ActionAliasExecution, self.endpoints['api'],
            cacert=self.cacert, debug=self.debug)
        self.managers['ApiKey'] = ResourceManager(
            models.ApiKey, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Config'] = ConfigManager(
            models.Config, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['ConfigSchema'] = ResourceManager(
            models.ConfigSchema, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['LiveAction'] = LiveActionResourceManager(
            models.LiveAction, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Pack'] = PackResourceManager(
            models.Pack, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Policy'] = ResourceManager(
            models.Policy, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['PolicyType'] = ResourceManager(
            models.PolicyType, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Rule'] = ResourceManager(
            models.Rule, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Sensor'] = ResourceManager(
            models.Sensor, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['TriggerType'] = ResourceManager(
            models.TriggerType, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Trigger'] = ResourceManager(
            models.Trigger, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['TriggerInstance'] = TriggerInstanceResourceManager(
            models.TriggerInstance, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['KeyValuePair'] = ResourceManager(
            models.KeyValuePair, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Webhook'] = ResourceManager(
            models.Webhook, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Timer'] = ResourceManager(
            models.Timer, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Trace'] = ResourceManager(
            models.Trace, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['RuleEnforcement'] = ResourceManager(
            models.RuleEnforcement, self.endpoints['api'], cacert=self.cacert, debug=self.debug)
        self.managers['Stream'] = StreamManager(
            self.endpoints['stream'], cacert=self.cacert, debug=self.debug)

Example 8

Project: Django-facebook
Source File: basecommand.py
View license
    def main(self, complete_args, args, initial_options):
        options, args = self.parser.parse_args(args)
        self.merge_options(initial_options, options)

        level = 1 # Notify
        level += options.verbose
        level -= options.quiet
        level = logger.level_for_integer(4-level)
        complete_log = []
        logger.consumers.extend(
            [(level, sys.stdout),
             (logger.DEBUG, complete_log.append)])
        if options.log_explicit_levels:
            logger.explicit_levels = True

        self.setup_logging()

        if options.require_venv and not options.venv:
            # If a venv is required check if it can really be found
            if not os.environ.get('VIRTUAL_ENV'):
                logger.fatal('Could not find an activated virtualenv (required).')
                sys.exit(3)
            # Automatically install in currently activated venv if required
            options.respect_venv = True

        if args and args[-1] == '___VENV_RESTART___':
            ## FIXME: We don't do anything this this value yet:
            args = args[:-2]
            options.venv = None
        else:
            # If given the option to respect the activated environment
            # check if no venv is given as a command line parameter
            if options.respect_venv and os.environ.get('VIRTUAL_ENV'):
                if options.venv and os.path.exists(options.venv):
                    # Make sure command line venv and environmental are the same
                    if (os.path.realpath(os.path.expanduser(options.venv)) !=
                            os.path.realpath(os.environ.get('VIRTUAL_ENV'))):
                        logger.fatal("Given virtualenv (%s) doesn't match "
                                     "currently activated virtualenv (%s)."
                                     % (options.venv, os.environ.get('VIRTUAL_ENV')))
                        sys.exit(3)
                else:
                    options.venv = os.environ.get('VIRTUAL_ENV')
                    logger.info('Using already activated environment %s' % options.venv)
        if options.venv:
            logger.info('Running in environment %s' % options.venv)
            site_packages=False
            if options.site_packages:
                site_packages=True
            restart_in_venv(options.venv, options.venv_base, site_packages,
                            complete_args)
            # restart_in_venv should actually never return, but for clarity...
            return

        ## FIXME: not sure if this sure come before or after venv restart
        if options.log:
            log_fp = open_logfile(options.log, 'a')
            logger.consumers.append((logger.DEBUG, log_fp))
        else:
            log_fp = None

        socket.setdefaulttimeout(options.timeout or None)

        urlopen.setup(proxystr=options.proxy, prompting=not options.no_input)

        exit = 0
        try:
            self.run(options, args)
        except (InstallationError, UninstallationError):
            e = sys.exc_info()[1]
            logger.fatal(str(e))
            logger.info('Exception information:\n%s' % format_exc())
            exit = 1
        except BadCommand:
            e = sys.exc_info()[1]
            logger.fatal(str(e))
            logger.info('Exception information:\n%s' % format_exc())
            exit = 1
        except:
            logger.fatal('Exception:\n%s' % format_exc())
            exit = 2

        if log_fp is not None:
            log_fp.close()
        if exit:
            log_fn = options.log_file
            text = '\n'.join(complete_log)
            logger.fatal('Storing complete log in %s' % log_fn)
            log_fp = open_logfile(log_fn, 'w')
            log_fp.write(text)
            log_fp.close()
        return exit

Example 9

Project: python-coloredlogs
Source File: __init__.py
View license
def install(level=None, **kw):
    """
    Enable colored terminal output for Python's :mod:`logging` module.

    :param level: The default logging level (an integer or a string with a
                  level name, defaults to :data:`logging.INFO`).
    :param logger: The logger to which the stream handler should be attached (a
                   :class:`~logging.Logger` object, defaults to the root logger).
    :param fmt: Set the logging format (a string like those accepted by
                :class:`~logging.Formatter`, defaults to
                :data:`DEFAULT_LOG_FORMAT`).
    :param datefmt: Set the date/time format (a string, defaults to
                    :data:`DEFAULT_DATE_FORMAT`).
    :param level_styles: A dictionary with custom level styles (defaults to
                         :data:`DEFAULT_LEVEL_STYLES`).
    :param field_styles: A dictionary with custom field styles (defaults to
                         :data:`DEFAULT_FIELD_STYLES`).
    :param stream: The stream where log messages should be written to (a
                   file-like object, defaults to :data:`sys.stderr`).
    :param isatty: :data:`True` to use a :class:`ColoredFormatter`,
                   :data:`False` to use a normal :class:`~logging.Formatter`
                   (defaults to auto-detection using
                   :func:`~humanfriendly.terminal.terminal_supports_colors()`).
    :param reconfigure: If :data:`True` (the default) multiple calls to
                        :func:`coloredlogs.install()` will each override
                        the previous configuration.
    :param use_chroot: Refer to :class:`HostNameFilter`.
    :param programname: Refer to :class:`ProgramNameFilter`.
    :param syslog: If :data:`True` then :func:`~coloredlogs.syslog.enable_system_logging()`
                   will be called without arguments (defaults to :data:`False`).

    The :func:`coloredlogs.install()` function is similar to
    :func:`logging.basicConfig()`, both functions take a lot of optional
    keyword arguments but try to do the right thing by default:

    1. If `reconfigure` is :data:`True` (it is by default) and an existing
       :class:`~logging.StreamHandler` is found that is connected to either
       :data:`~sys.stdout` or :data:`~sys.stderr` the handler will be removed.
       This means that first calling :func:`logging.basicConfig()` and then
       calling :func:`coloredlogs.install()` will replace the stream handler
       instead of adding a duplicate stream handler. If `reconfigure` is
       :data:`False` and an existing handler is found no further steps are
       taken (to avoid installing a duplicate stream handler).

    2. A :class:`~logging.StreamHandler` is created and connected to the stream
       given by the `stream` keyword argument (:data:`sys.stderr` by
       default). The stream handler's level is set to the value of the `level`
       keyword argument.

    3. A :class:`ColoredFormatter` is created if the `isatty` keyword argument
       allows it (or auto-detection allows it), otherwise a normal
       :class:`~logging.Formatter` is created. The formatter is initialized
       with the `fmt` and `datefmt` keyword arguments (or their computed
       defaults).

    4. :func:`HostNameFilter.install()` and :func:`ProgramNameFilter.install()`
       are called to enable the use of additional fields in the log format.

    5. The formatter is added to the handler and the handler is added to the
       logger. The logger's level is set to :data:`logging.NOTSET` so that each
       handler gets to decide which records they filter. This makes it possible
       to have controllable verbosity on the terminal while logging at full
       verbosity to the system log or a file.
    """
    logger = kw.get('logger') or logging.getLogger()
    reconfigure = kw.get('reconfigure', True)
    stream = kw.get('stream', sys.stderr)
    # Remove any existing stream handler that writes to stdout or stderr, even
    # if the stream handler wasn't created by coloredlogs because multiple
    # stream handlers (in the same hierarchy) writing to stdout or stderr would
    # create duplicate output.
    standard_streams = (sys.stdout, sys.stderr)
    match_streams = standard_streams if stream in standard_streams else (stream,)
    match_handler = lambda handler: match_stream_handler(handler, match_streams)
    handler, logger = replace_handler(logger, match_handler, reconfigure)
    # Make sure reconfiguration is allowed or not relevant.
    if not (handler and not reconfigure):
        # Make it easy to enable system logging.
        if kw.get('syslog', False):
            from coloredlogs import syslog
            syslog.enable_system_logging()
        # Figure out whether we can use ANSI escape sequences.
        use_colors = kw.get('isatty', None)
        if use_colors or use_colors is None:
            if NEED_COLORAMA:
                if HAVE_COLORAMA:
                    # On Windows we can only use ANSI escape
                    # sequences if Colorama is available.
                    colorama.init()
                    use_colors = True
                else:
                    # If Colorama isn't available then we specifically
                    # shouldn't emit ANSI escape sequences!
                    use_colors = False
            elif use_colors is None:
                # Auto-detect terminal support on other platforms.
                use_colors = terminal_supports_colors(stream)
        # Create a stream handler.
        handler = logging.StreamHandler(stream)
        if level is None:
            level = os.environ.get('COLOREDLOGS_LOG_LEVEL') or 'INFO'
        handler.setLevel(level_to_number(level))
        # Prepare the arguments to the formatter. The caller is
        # allowed to customize `fmt' and/or `datefmt' as desired.
        formatter_options = dict(fmt=kw.get('fmt'), datefmt=kw.get('datefmt'))
        # Come up with a default log format?
        if not formatter_options['fmt']:
            # Use the log format defined by the environment variable
            # $COLOREDLOGS_LOG_FORMAT or fall back to the default.
            formatter_options['fmt'] = os.environ.get('COLOREDLOGS_LOG_FORMAT') or DEFAULT_LOG_FORMAT
        # If the caller didn't specify a date/time format we'll use the format
        # defined by the environment variable $COLOREDLOGS_DATE_FORMAT (or fall
        # back to the default).
        if not formatter_options['datefmt']:
            formatter_options['datefmt'] = os.environ.get('COLOREDLOGS_DATE_FORMAT') or DEFAULT_DATE_FORMAT
        # Do we need to make %(hostname) available to the formatter?
        HostNameFilter.install(
            handler=handler,
            fmt=formatter_options['fmt'],
            use_chroot=kw.get('use_chroot', True),
        )
        # Do we need to make %(programname) available to the formatter?
        ProgramNameFilter.install(
            handler=handler,
            fmt=formatter_options['fmt'],
            programname=kw.get('programname'),
        )
        # Inject additional formatter arguments specific to ColoredFormatter?
        if use_colors:
            for name, environment_name in (('field_styles', 'COLOREDLOGS_FIELD_STYLES'),
                                           ('level_styles', 'COLOREDLOGS_LEVEL_STYLES')):
                value = kw.get(name)
                if value is None:
                    # If no styles have been specified we'll fall back
                    # to the styles defined by the environment variable.
                    environment_value = os.environ.get(environment_name)
                    if environment_value is not None:
                        value = parse_encoded_styles(environment_value)
                if value is not None:
                    formatter_options[name] = value
        # Create a (possibly colored) formatter.
        formatter_type = ColoredFormatter if use_colors else logging.Formatter
        handler.setFormatter(formatter_type(**formatter_options))
        # Install the stream handler.
        logger.setLevel(logging.NOTSET)
        logger.addHandler(handler)

Example 10

Project: stacks
Source File: main.py
View license
def main():
    for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
        signal.signal(sig, handler)

    parser, args = cli.parse_options()

    if not args.subcommand:
        parser.print_help()
        sys.exit(0)

    config_file = vars(args).get('config', None)
    config_dir = vars(args).get('config_dir', None)
    env = vars(args).get('env', None)
    config = config_load(env, config_file, config_dir)

    if args.subcommand == 'config':
        print_config(config, args.property_name, output_format=args.output_format)
        sys.exit(0)

    config['get_ami_id'] = aws.get_ami_id
    config['get_vpc_id'] = aws.get_vpc_id
    config['get_zone_id'] = aws.get_zone_id
    config['get_stack_output'] = aws.get_stack_output
    config['get_stack_resource'] = aws.get_stack_resource

    # Figure out profile value in the following order
    # - cli arg
    # - env variable
    # - default profile if exists
    if args.profile:
        profile = args.profile
    elif os.environ.get('AWS_DEFAULT_PROFILE'):
        profile = os.environ.get('AWS_DEFAULT_PROFILE')
    elif profile_exists('default'):
        profile = 'default'
    elif args.profile and not profile_exists(args.profile):
        print('Profile "{}" does not exist.'.format(args.profile))
        sys.exit(1)
    else:
        profile = None

    # Figure out region value in the following order
    # - cli arg
    # - env variable
    # - region from config
    if args.region:
        region = args.region
    elif os.environ.get('AWS_DEFAULT_REGION'):
        region = os.environ.get('AWS_DEFAULT_REGION')
    else:
        region = get_region_name(profile)
        if not region:
            print('Region is not specified.')
            sys.exit(1)
    config['region'] = region

    # Not great, but try to catch everything. Above should be refactored in a
    # function which handles setting up connections to different aws services
    try:
        ec2_conn = boto.ec2.connect_to_region(region, profile_name=profile)
        vpc_conn = boto.vpc.connect_to_region(region, profile_name=profile)
        cf_conn = boto.cloudformation.connect_to_region(region, profile_name=profile)
        r53_conn = boto.route53.connect_to_region(region, profile_name=profile)
        s3_conn = boto.s3.connect_to_region(region, profile_name=profile)
        config['ec2_conn'] = ec2_conn
        config['vpc_conn'] = vpc_conn
        config['cf_conn'] = cf_conn
        config['r53_conn'] = r53_conn
        config['s3_conn'] = s3_conn
    except:
        print(sys.exc_info()[1])
        sys.exit(1)

    if args.subcommand == 'resources':
        output = cf.stack_resources(cf_conn, args.name, args.logical_id)
        if output:
            print(output)
        cf_conn.close()

    if args.subcommand == 'outputs':
        output = cf.stack_outputs(cf_conn, args.name, args.output_name)
        if output:
            print(output)
        cf_conn.close()

    if args.subcommand == 'list':
        output = cf.list_stacks(cf_conn, args.name, args.verbose)
        if output:
            print(output)
        cf_conn.close()

    if args.subcommand == 'create' or args.subcommand == 'update':
        if args.property:
            properties = validate_properties(args.property)
            config.update(properties)

        if args.subcommand == 'create':
            cf.create_stack(cf_conn, args.name, args.template, config,
                            dry=args.dry_run, follow=args.events_follow)
        else:
            cf.create_stack(cf_conn, args.name, args.template, config,
                            update=True, dry=args.dry_run,
                            follow=args.events_follow, create_on_update=args.create_on_update)

    if args.subcommand == 'delete':
        cf.delete_stack(cf_conn, args.name, region, profile, args.yes)
        if args.events_follow:
            cf.get_events(cf_conn, args.name, args.events_follow, 10)

    if args.subcommand == 'events':
        cf.get_events(cf_conn, args.name, args.events_follow, args.lines)

Example 11

Project: typhoon-blade
Source File: rules_generator.py
View license
    def generate_compliation_flags(self):
        """Generates compliation flags. """
        toolchain_dir = os.environ.get('TOOLCHAIN_DIR', '')
        if toolchain_dir and not toolchain_dir.endswith('/'):
            toolchain_dir += '/'
        cpp_str = toolchain_dir + os.environ.get('CPP', 'cpp')
        cc_str = toolchain_dir + os.environ.get('CC', 'gcc')
        cxx_str = toolchain_dir + os.environ.get('CXX', 'g++')
        nvcc_str = toolchain_dir + os.environ.get('NVCC', 'nvcc')
        ld_str = toolchain_dir + os.environ.get('LD', 'g++')
        console.info('CPP=%s' % cpp_str)
        console.info('CC=%s' % cc_str)
        console.info('CXX=%s' % cxx_str)
        console.info('NVCC=%s' % nvcc_str)
        console.info('LD=%s' % ld_str)

        self.ccflags_manager.set_cpp_str(cpp_str)

        # To modify CC, CXX, LD according to the building environment and
        # project configuration
        build_with_distcc = (self.distcc_enabled and
                             self.build_environment.distcc_env_prepared)
        cc_str = self._append_prefix_to_building_var(
                         prefix='distcc',
                         building_var=cc_str,
                         condition=build_with_distcc)

        cxx_str = self._append_prefix_to_building_var(
                         prefix='distcc',
                         building_var=cxx_str,
                         condition=build_with_distcc)

        build_with_ccache = self.build_environment.ccache_installed
        cc_str = self._append_prefix_to_building_var(
                         prefix='ccache',
                         building_var=cc_str,
                         condition=build_with_ccache)

        cxx_str = self._append_prefix_to_building_var(
                         prefix='ccache',
                         building_var=cxx_str,
                         condition=build_with_ccache)

        build_with_dccc = (self.dccc_enabled and
                           self.build_environment.dccc_env_prepared)
        ld_str = self._append_prefix_to_building_var(
                        prefix='dccc',
                        building_var=ld_str,
                        condition=build_with_dccc)

        cc_env_str = 'CC="%s", CXX="%s"' % (cc_str, cxx_str)
        ld_env_str = 'LINK="%s"' % ld_str
        nvcc_env_str = 'NVCC="%s"' % nvcc_str

        cc_config = configparse.blade_config.get_config('cc_config')
        extra_incs = cc_config['extra_incs']
        extra_incs_str = ', '.join(['"%s"' % inc for inc in extra_incs])
        if not extra_incs_str:
            extra_incs_str = '""'

        (cppflags_except_warning, linkflags) = self.ccflags_manager.get_flags_except_warning()

        builder_list = []
        cuda_incs_str = ' '.join(['-I%s' % inc for inc in self.cuda_inc])
        self._add_rule(
            'nvcc_object_bld = Builder(action = MakeAction("%s -ccbin g++ %s '
            '$NVCCFLAGS -o $TARGET -c $SOURCE", compile_source_message))' % (
                    nvcc_str, cuda_incs_str))
        builder_list.append('BUILDERS = {"NvccObject" : nvcc_object_bld}')

        self._add_rule(
            'nvcc_binary_bld = Builder(action = MakeAction("%s %s '
            '$NVCCFLAGS -o $TARGET ", link_program_message))' % (
                    nvcc_str, cuda_incs_str))
        builder_list.append('BUILDERS = {"NvccBinary" : nvcc_binary_bld}')

        for builder in builder_list:
            self._add_rule('top_env.Append(%s)' % builder)

        self._add_rule('top_env.Replace(%s, %s, '
                       'CPPPATH=[%s, "%s", "%s"], '
                       'CPPFLAGS=%s, CFLAGS=%s, CXXFLAGS=%s, '
                       '%s, LINKFLAGS=%s)' %
                       (cc_env_str, nvcc_env_str,
                        extra_incs_str, self.build_dir, self.python_inc,
                        cc_config['cppflags'] + cppflags_except_warning,
                        cc_config['cflags'],
                        cc_config['cxxflags'],
                        ld_env_str, linkflags))

        self._setup_cache()

        if build_with_distcc:
            self.build_environment.setup_distcc_env()

        for rule in self.build_environment.get_rules():
            self._add_rule(rule)

        self._setup_warnings()

Example 12

Project: turbolift
Source File: __init__.py
View license
def auth_plugins(auth_plugins=None):
    """Authentication plugins.

    Usage, Add any plugin here that will serve as a rapid means to
    authenticate to an OpenStack environment.

    Syntax is as follows:
    >>> __auth_plugins__ = {
    ...     'new_plugin_name': {
    ...         'os_auth_url': 'https://localhost:5000/v2.0/tokens',
    ...         'os_prefix': {
    ...             'os_apikey': 'apiKeyCredentials',
    ...             'os_password': 'passwordCredentials'
    ...         },
    ...         'args': {
    ...             'commands': [
    ...                 '--new-plugin-name-auth'
    ...             ],
    ...             'choices': [
    ...                 'RegionOne'
    ...             ],
    ...             'help': 'Authentication plugin for New Plugin Name',
    ...             'default': os.environ.get('OS_NEW_PLUGIN_AUTH', None),
    ...             'metavar': '[REGION]'
    ...         }
    ...     }
    ... }

    If the subdomain is in the auth url, as is the case with hp, add
    "%(region)s" to the "os_auth_url" value. The region value from the list of
    choices will be used as the string replacement. Note that if the
    `os_prefix` key is added the system will override the authentication body
    prefix with the string provided. At this time the choices are os_apikey,
    os_password, os_token. All key entries are optional and should one not be
    specified with a credential type a `NotImplementedError` will be raised.

    :param auth_plugins: Additional plugins to add in
    :type auth_plugins: ``dict``
    :returns: ``dict``
    """

    __auth_plugins__ = {
        'os_rax_auth': {
            'os_auth_url': 'https://identity.api.rackspacecloud.com/v2.0/'
                           'tokens',
            'os_prefix': {
                'os_apikey': 'RAX-KSKEY:apiKeyCredentials',
                'os_password': 'passwordCredentials'
            },
            'args': {
                'commands': [
                    '--os-rax-auth'
                ],
                'choices': [
                    'dfw',
                    'ord',
                    'iad',
                    'syd',
                    'hkg',
                    'lon'
                ],
                'help': 'Authentication Plugin for Rackspace Cloud'
                        ' env[OS_RAX_AUTH]',
                'default': os.environ.get('OS_RAX_AUTH', None),
                'metavar': '[REGION]'
            }
        },
        'rax_auth_v1': {
            'os_auth_version': 'v1.0',
            'os_auth_url': 'https://identity.api.rackspacecloud.com/v1.0',
            'args': {
                'commands': [
                    '--rax-auth-v1'
                ],
                'action': 'store_true',
                'help': 'Authentication Plugin for Rackspace Cloud V1'
            }
        },
        'os_rax_auth_lon': {
            'os_auth_url': 'https://lon.identity.api.rackspacecloud.com/'
                           'v2.0/tokens',
            'os_prefix': {
                'os_apikey': 'RAX-KSKEY:apiKeyCredentials',
                'os_password': 'passwordCredentials'
            },
            'args': {
                'commands': [
                    '--os-rax-auth-lon'
                ],
                'choices': [
                    'lon'
                ],
                'help': 'Authentication Plugin for Rackspace Cloud'
                        ' env[OS_RAX_AUTH_LON]',
                'default': os.environ.get('OS_RAX_AUTH_LON', None),
                'metavar': '[REGION]'
            }
        },
        'os_hp_auth': {
            'os_auth_url': 'https://%(region)s.identity.hpcloudsvc.com:35357/'
                           'v2.0/tokens',
            'os_prefix': {
                'os_password': 'passwordCredentials'
            },
            'args': {
                'commands': [
                    '--os-hp-auth'
                ],
                'choices': [
                    'region-b.geo-1',
                    'region-a.geo-1'
                ],
                'help': 'Authentication Plugin for HP Cloud'
                        ' env[OS_HP_AUTH]',
                'default': os.environ.get('OS_HP_AUTH', None),
                'metavar': '[REGION]'
            }
        }
    }
    if auth_plugins:
        __auth_plugins__.update(auth_plugins)

    return __auth_plugins__

Example 13

Project: awsmfa
Source File: __main__.py
View license
def parse_args(args):
    if args is None:
        args = sys.argv[1:]
    parser = argparse.ArgumentParser(
        prog='awsmfa',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--version',
                        action='version',
                        version=VERSION,
                        help='Display version number and exit.')
    parser.add_argument('role_to_assume',
                        nargs='?',
                        metavar='role-to-assume',
                        default=os.environ.get('AWS_MFA_ROLE_TO_ASSUME'),
                        help='Full ARN of the role you wish to assume. If not '
                             'provided, the temporary credentials will '
                             'inherit the user\'s policies. The temporary '
                             'credentials will also satisfy the '
                             'aws:MultiFactorAuthPresent condition variable. '
                             'If the AWS_MFA_ROLE_TO_ASSUME environment '
                             'variable is set, it will be used as the default '
                             'value.')
    parser.add_argument('--aws-credentials',
                        default=os.path.join(os.path.expanduser('~'),
                                             '.aws/credentials'),
                        help='Full path to the ~/.aws/credentials file.')
    parser.add_argument('-d', '--duration',
                        type=int,
                        default=int(os.environ.get('AWS_MFA_DURATION',
                                                   SIX_HOURS_IN_SECONDS)),
                        help='The number of seconds that you wish the '
                             'temporary credentials to be valid for. For role '
                             'assumption, this will be limited to an hour. If '
                             'the AWS_MFA_DURATION environment variable is '
                             'set, it will be used as the default value.')
    parser.add_argument('-i', '--identity-profile',
                        default=os.environ.get('AWS_MFA_IDENTITY_PROFILE',
                                               'identity'),
                        help='Name of the section in the credentials file '
                             'representing your long-lived credentials. '
                             'All values in this section '
                             '(including custom parameters such as "region" '
                             'or "s3") will be copied to the '
                             '--target-profile, with the access key, secret '
                             'key, and session key replaced by the temporary '
                             'credentials. If the AWS_MFA_IDENTITY_PROFILE '
                             'environment variable is set, it will be used as '
                             'the default value.')
    parser.add_argument('--serial-number',
                        default=os.environ.get('AWS_MFA_SERIAL_NUMBER', None),
                        help='Full ARN of the MFA device. If not provided, '
                             'this will be read from the '
                             'AWS_MFA_SERIAL_NUMBER environment variable or '
                             'queried from IAM automatically. For automatic '
                             'detection to work, your identity profile must '
                             'have IAM policies that allow "aws iam '
                             'get-user" and "aws iam list-mfa-devices".')
    parser.add_argument('-t', '--target-profile',
                        default=os.environ.get('AWS_MFA_TARGET_PROFILE',
                                               'default'),
                        help='Name of the section in the credentials file to '
                             'overwrite with temporary credentials. This '
                             'defaults to "default" because most tools read '
                             'that profile. The existing values in this '
                             'profile will be overwritten. If the '
                             'AWS_MFA_TARGET_PROFILE environment variable is '
                             'set, it will be used as the default value.')
    parser.add_argument('--role-session-name',
                        default='awsmfa_%s' % datetime.datetime.now().strftime(
                            '%Y%m%dT%H%M%S'),
                        help='The name of the temporary session. Applies only '
                             'when assuming a role.')
    parser.add_argument('-c', '--token-code',
                        default=os.environ.get('AWS_MFA_TOKEN_CODE'),
                        help='The 6 digit numeric MFA code generated by your '
                             'device, or "skip". If the AWS_MFA_TOKEN_CODE '
                             'environment variable is set, it will be used as '
                             'the default value. If this is \"skip\", '
                             'temporary credentials will still be acquired '
                             'but they will not satisfy the '
                             'sts:MultiFactorAuthPresent condition.')
    parser.add_argument('--rotate-identity-keys',
                        default=safe_bool(os.environ.get(
                            'AWS_MFA_ROTATE_IDENTITY_KEYS', False)),
                        action='store_true',
                        help='Rotate the identity profile access keys '
                             'immediately upon successful acquisition of '
                             'temporary credentials. This deletes your '
                             'identity profile access keys from the '
                             '--aws-credentials file and from AWS using the '
                             'IAM DeleteAccessKey API, and then writes a new '
                             'identity access key pair using the results of '
                             'IAM CreateAccessKey. If the '
                             'AWS_MFA_ROTATE_IDENTITY_KEYS environment '
                             'variable is set to True, this behavior is '
                             'enabled by default.')
    parser.add_argument('--env',
                        default=safe_bool(os.environ.get(
                            'AWS_MFA_ENV', False)),
                        action='store_true',
                        help='Print the AWS_ACCESS_KEY_ID, '
                             'AWS_SECRET_ACCESS_KEY, and AWS_SESSION_TOKEN '
                             'environment variables in a form suitable for '
                             'evaluation in a shell.')
    args = parser.parse_args(args)
    return args

Example 14

Project: DIRAC
Source File: LSFTimeLeft.py
View license
  def __init__( self ):
    """ Standard constructor
    """
    self.log = gLogger.getSubLogger( 'LSFTimeLeft' )
    self.jobID = os.environ.get( 'LSB_JOBID' )
    self.queue = os.environ.get( 'LSB_QUEUE' )
    self.bin = os.environ.get( 'LSF_BINDIR' )
    self.host = os.environ.get( 'LSB_HOSTS' )
    self.year = time.strftime( '%Y', time.gmtime() )
    self.log.verbose( 'LSB_JOBID=%s, LSB_QUEUE=%s, LSF_BINDIR=%s, LSB_HOSTS=%s' % ( self.jobID,
                                                                                    self.queue,
                                                                                    self.bin,
                                                                                    self.host ) )

    self.cpuLimit = None
    self.cpuRef = None
    self.normRef = None
    self.wallClockLimit = None
    self.hostNorm = None

    cmd = '%s/bqueues -l %s' % ( self.bin, self.queue )
    result = runCommand( cmd )
    if not result['OK']:
      return

    lines = str( result['Value'] ).split( '\n' )
    self.log.debug( 'From %s' % cmd, '\n'.join( [line if len( line ) <= 128 else line[:128] + ' [...]' for line in lines] ) )
    for i in xrange( len( lines ) ):
      if re.search( '.*CPULIMIT.*', lines[i] ):
        info = lines[i + 1].split()
        if len( info ) >= 4:
          self.cpuLimit = float( info[0] ) * 60
          self.cpuRef = info[3]
        elif len( info ) == 2 and info[1] == "min":
          self.cpuLimit = float( info[0] ) * 60
          self.cpuRef = None
        else:
          self.log.warn( 'Problem parsing "%s" for CPU limit' % lines[i + 1] )
          self.cpuLimit = -1
      elif re.search( '.*RUNLIMIT.*', lines[i] ):
        info = lines[i + 1].split()
        if len( info ) >= 1:
          self.wallClockLimit = float( info[0] ) * 60
        else:
          self.log.warn( 'Problem parsing "%s" for wall clock limit' % lines[i + 1] )
          self.wallClockLimit = -1

    modelMaxNorm = 0
    if self.cpuRef:
      # Now try to get the CPU_FACTOR for this reference CPU,
      # it must be either a Model, a Host or the largest Model

      cmd = '%s/lshosts -w %s' % ( self.bin, self.cpuRef )
      result = runCommand( cmd )
      if result['OK']:
        # At CERN this command will return an error since there is no host defined
        # with the name of the reference Host.
        lines = str( result['Value'] ).split( '\n' )
        l1 = lines[0].split()
        l2 = lines[1].split()
        if len( l1 ) > len( l2 ):
          self.log.error( "Failed lshost command", "%s:\n %s\n %s" % ( cmd, lines[0], lines[0] ) )
        else:
          for i in xrange( len( l1 ) ):
            if l1[i] == 'cpuf':
              try:
                self.normRef = float( l2[i] )
                self.log.info( 'Reference Normalization taken from Host', '%s: %s' % ( self.cpuRef, self.normRef ) )
              except ValueError as e:
                self.log.exception( 'Exception parsing lshosts output', '', e )

      if not self.normRef:
        # Try if there is a model define with the name of cpuRef
        cmd = '%s/lsinfo -m' % ( self.bin )
        result = runCommand( cmd )
        if result['OK']:
          lines = str( result['Value'] ).split( '\n' )
          for line in lines[1:]:
            words = line.split()
            if len( words ) > 1:
              try:
                norm = float( words[1] )
                if norm > modelMaxNorm:
                  modelMaxNorm = norm
                if words[0].find( self.cpuRef ) > -1:
                  self.normRef = norm
                  self.log.info( 'Reference Normalization taken from Host Model',
                                 '%s: %s' % ( self.cpuRef, self.normRef ) )
              except ValueError as e:
                self.log.exception( 'Exception parsing lsfinfo output', '', e )

      if not self.normRef:
        # Now parse LSF configuration files
        if not os.path.isfile( './lsf.sh' ):
          os.symlink( os.path.join( os.environ['LSF_ENVDIR'], 'lsf.conf' ) , './lsf.sh' )
        # As the variables are not exported, we must force it
        ret = sourceEnv( 10, ['./lsf', '&& export LSF_CONFDIR' ] )
        if ret['OK']:
          lsfEnv = ret['outputEnv']
          shared = None
          try:
            egoShared = os.path.join( lsfEnv['LSF_CONFDIR'], 'ego.shared' )
            lsfShared = os.path.join( lsfEnv['LSF_CONFDIR'], 'lsf.shared' )
            if os.path.exists( egoShared ):
              shared = egoShared
            elif os.path.exists( lsfShared ):
              shared = lsfShared
          except KeyError as e:
            self.log.exception( 'Exception getting LSF configuration', '', e )
          if shared:
            with open( shared ) as f:
              hostModelSection = False
              for line in f.readlines():
                if line.find( 'Begin HostModel' ) == 0:
                  hostModelSection = True
                  continue
                if not hostModelSection:
                  continue
                if line.find( 'End HostModel' ) == 0:
                  break
                line = line.strip()
                if line and line.split()[0] == self.cpuRef:
                  try:
                    self.normRef = float( line.split()[1] )
                    self.log.info( 'Reference Normalization taken from Configuration File',
                                   '(%s) %s: %s' % ( shared, self.cpuRef, self.normRef ) )
                  except ValueError as e:
                    self.log.exception( 'Exception reading LSF configuration', '', e )
          else:
            self.log.warn( 'Could not find LSF configuration' )
        else:
          self.log.error( 'Cannot source the LSF environment', ret['Message'] )
    if not self.normRef:
      # If nothing works take this as the unit
      self.normRef = 1.
      # If nothing worked, take the maximum defined for a Model
      # if modelMaxNorm:
      #  self.normRef = modelMaxNorm
      #  self.log.info( 'Reference Normalization taken from Max Model:', self.normRef )

    # Now get the Normalization for the current Host
    if self.host:
      cmd = '%s/lshosts -w %s' % ( self.bin, self.host )
      result = runCommand( cmd )
      if result['OK']:
        lines = str( result['Value'] ).split( '\n' )
        l1 = lines[0].split()
        l2 = lines[1].split()
        if len( l1 ) > len( l2 ):
          self.log.error( "Failed lshost command", "%s:\n %s\n %s" % ( cmd, lines[0], lines[0] ) )
        else:
          for i in xrange( len( l1 ) ):
            if l1[i] == 'cpuf':
              try:
                self.hostNorm = float( l2[i] )
                self.log.info( 'Host Normalization', '%s: %s' % ( self.host, self.hostNorm ) )
              except ValueError as e:
                self.log.exception( 'Exception parsing lshosts output', l1, e )
              finally:
                break

      if self.hostNorm and self.normRef:
        self.hostNorm /= self.normRef
        self.log.info( 'CPU power w.r.t. batch unit', self.hostNorm )

      if self.hostNorm:
        # Set the limits in real seconds
        self.cpuLimit /= self.hostNorm
        self.wallClockLimit /= self.hostNorm

Example 15

Project: twarc
Source File: twarc.py
View license
def main():
    """
    The twarc command line.
    """
    parser = argparse.ArgumentParser("twarc")
    parser.add_argument('-v', '--version', action='version',
                        version='%(prog)s {version}'.format(
                            version=__version__))
    parser.add_argument("--search", dest="search",
                        help="search for tweets matching a query")
    parser.add_argument("--max_id", dest="max_id",
                        help="maximum tweet id to search for")
    parser.add_argument("--since_id", dest="since_id",
                        help="smallest id to search for")
    parser.add_argument("--result_type", dest="result_type",
                        choices=["mixed", "recent", "popular"],
                        default="recent", help="search result type")
    parser.add_argument("--lang", dest="lang",
                        help="limit to ISO 639-1 language code"),
    parser.add_argument("--geocode", dest="geocode",
                        help="limit by latitude,longitude,radius")
    parser.add_argument("--track", dest="track",
                        help="stream tweets matching track filter")
    parser.add_argument("--follow", dest="follow",
                        help="stream tweets from user ids")
    parser.add_argument("--locations", dest="locations",
                        help="stream tweets from a particular location")
    parser.add_argument("--sample", action="store_true",
                        help="stream sample live tweets")
    parser.add_argument("--timeline", dest="timeline",
                        help="get user timeline for a screen name")
    parser.add_argument("--timeline_user_id", dest="timeline_user_id",
                        help="get user timeline for a user id")
    parser.add_argument("--lookup_screen_names", dest="lookup_screen_names",
                        nargs='+', help="look up users by screen name; \
                                         returns user objects")
    parser.add_argument("--lookup_user_ids", dest="lookup_user_ids", nargs='+',
                        help="look up users by user id; returns user objects")
    parser.add_argument("--follower_ids", dest="follower_ids", nargs=1,
                        help="retrieve follower lists; returns follower ids")
    parser.add_argument("--friend_ids", dest="friend_ids", nargs=1,
                        help="retrieve friend (following) list; returns friend ids")
    parser.add_argument("--hydrate", action="append", dest="hydrate",
                        help="rehydrate tweets from a file of tweet ids, \
                              use - for stdin")
    parser.add_argument("--trends_available", action="store_true",
                        help="show all regions available for trend summaries")
    parser.add_argument("--trends_place", dest="trends_place", nargs=1,
                        type=int, metavar="WOEID",
                        help="recent trends for WOEID specified")
    parser.add_argument("--trends_closest", dest="trends_closest", nargs=1,
                        metavar="LAT,LONG",
                        help="show available trend regions for LAT,LONG")
    parser.add_argument("--trends_place_exclude",
                        dest="trends_place_exclude", nargs=1,
                        type=int, metavar="WOEID",
                        help="recent trends for WOEID specified sans hashtags")
    parser.add_argument("--log", dest="log",
                        default="twarc.log", help="log file")
    parser.add_argument("--consumer_key",
                        default=None, help="Twitter API consumer key")
    parser.add_argument("--consumer_secret",
                        default=None, help="Twitter API consumer secret")
    parser.add_argument("--access_token",
                        default=None, help="Twitter API access key")
    parser.add_argument("--access_token_secret",
                        default=None, help="Twitter API access token secret")
    parser.add_argument('-c', '--config',
                        default=default_config_filename(),
                        help="Config file containing Twitter keys and secrets")
    parser.add_argument('-p', '--profile', default='main',
                        help="Name of a profile in your configuration file")
    parser.add_argument('-w', '--warnings', action='store_true',
                        help="Include warning messages in output")
    parser.add_argument("--connection_errors", type=int, default="0",
                        help="Number of connection errors before giving up. Default is to keep trying.")
    parser.add_argument("--http_errors", type=int, default="0",
                        help="Number of http errors before giving up. Default is to keep trying.")

    args = parser.parse_args()

    logging.basicConfig(
        filename=args.log,
        level=logging.INFO,
        format="%(asctime)s %(levelname)s %(message)s"
    )

    consumer_key = args.consumer_key or os.environ.get('CONSUMER_KEY')
    consumer_secret = args.consumer_secret or os.environ.get('CONSUMER_SECRET')
    access_token = args.access_token or os.environ.get('ACCESS_TOKEN')
    access_token_secret = args.access_token_secret or os.environ.get('ACCESS_TOKEN_SECRET')

    if not (consumer_key and consumer_secret and
            access_token and access_token_secret):
        credentials = load_config(args.config, args.profile)
        if credentials:
            consumer_key = credentials['consumer_key']
            consumer_secret = credentials['consumer_secret']
            access_token = credentials['access_token']
            access_token_secret = credentials['access_token_secret']
        else:
            print("Please enter Twitter authentication credentials")
            consumer_key = get_input('consumer key: ')
            consumer_secret = get_input('consumer secret: ')
            access_token = get_input('access_token: ')
            access_token_secret = get_input('access token secret: ')
            save_keys(args.profile, consumer_key, consumer_secret,
                      access_token, access_token_secret)

    t = Twarc(consumer_key=consumer_key,
              consumer_secret=consumer_secret,
              access_token=access_token,
              access_token_secret=access_token_secret,
              connection_errors=args.connection_errors,
              http_errors=args.http_errors)

    tweets = []
    users = []
    user_ids = []
    trends_json = []

    # Calls that return tweets
    if args.search or args.geocode:
        tweets = t.search(
            args.search,
            since_id=args.since_id,
            max_id=args.max_id,
            lang=args.lang,
            result_type=args.result_type,
            geocode=args.geocode
        )
    elif args.track or args.follow or args.locations:
        tweets = t.filter(track=args.track, follow=args.follow,
                          locations=args.locations)
    elif args.hydrate:
        input_iterator = fileinput.FileInput(
            args.hydrate,
            mode='rU',
            openhook=fileinput.hook_compressed,
        )
        tweets = t.hydrate(input_iterator)
    elif args.sample:
        tweets = t.sample()
    elif args.timeline:
        tweets = t.timeline(screen_name=args.timeline)
    elif args.timeline_user_id:
        tweets = t.timeline(user_id=args.timeline_user_id)

    # Calls that return user profile objects
    elif args.lookup_user_ids:
        users = t.user_lookup(user_ids=args.lookup_user_ids)
    elif args.lookup_screen_names:
        users = t.user_lookup(screen_names=args.lookup_screen_names)

    # Calls that return lists of user ids
    elif args.follower_ids:
        # Note: only one at a time, so assume exactly one
        user_ids = t.follower_ids(screen_name=args.follower_ids[0])
    elif args.friend_ids:
        # Note: same here, only one at a time, so assume exactly one
        user_ids = t.friend_ids(screen_name=args.friend_ids[0])

    # Calls that return JSON relating to trends
    elif args.trends_available:
        trends_json = t.trends_available()
    elif args.trends_place:
        trends_json = t.trends_place(args.trends_place)
    elif args.trends_place_exclude:
        trends_json = t.trends_place(args.trends_place_exclude,
                                     exclude='hashtags')
    elif args.trends_closest:
        # Note: using "lon" as var name instead of restricted "long"
        try:
            lat, lon = [float(s.strip())
                        for s in args.trends_closest[0].split(',')]
            if lat > 180 or lat < -180 or lon > 180 or lon < -180:
                raise "Unacceptable values"
        except Exception as e:
            parser.error('LAT and LONG must be floats within [-180.0, 180.0]')
        trends_json = t.trends_closest(lat, lon)

    else:
        raise argparse.ArgumentTypeError(
            'must supply one of:  --search --track --follow --locations'
            ' --timeline --timeline_user_id'
            ' --lookup_screen_names --lookup_user_ids'
            ' --follower_ids --friend_ids'
            ' --trends_available --trends_closest'
            ' --trends_place --trends_place_exclude'
            ' --sample --hydrate')

    # iterate through the tweets and write them to stdout
    for tweet in tweets:
        # include warnings in output only if they asked for it
        if 'id_str' in tweet or args.warnings:
            print(json.dumps(tweet))

        # add some info to the log
        if 'id_str' in tweet:
            if 'user' in tweet:
                logging.info("archived https://twitter.com/%s/status/%s",
                             tweet['user']['screen_name'], tweet['id_str'])
        elif 'limit' in tweet:
            t = datetime.datetime.utcfromtimestamp(
                float(tweet['limit']['timestamp_ms']) / 1000)
            t = t.isoformat("T") + "Z"
            logging.warn("%s tweets undelivered at %s",
                         tweet['limit']['track'], t)
        elif 'warning' in tweet:
            logging.warn(tweet['warning']['message'])
        else:
            logging.warn(json.dumps(tweet))

    # iterate through the user objects and write them to stdout
    for user in users:
        # include warnings in output only if they asked for it
        if 'id_str' in user or args.warnings:
            print(json.dumps(user))

            # add some info to the log
            if 'screen_name' in user:
                logging.info("archived user profile for @%s / id_str=%s",
                             user['screen_name'], user['id_str'])
        else:
            logging.warn(json.dumps(user))

    # iterate through the user ids and write them to stdout
    for user_id in user_ids:
        print(str(user_id))

    # iterate through trend JSON and write each to stdout
    for trend_info in trends_json:
        print(json.dumps(trend_info))

Example 16

Project: pyxero
Source File: runserver.py
View license
    def do_GET(self):
        """
        Handle GET request
        """
        consumer_key = os.environ.get('XERO_CONSUMER_KEY')
        consumer_secret = os.environ.get('XERO_CONSUMER_SECRET')
        private_key_path = os.environ.get('XERO_RSA_CERT_KEY_PATH')
        entrust_cert_path = os.environ.get('XERO_ENTRUST_CERT_PATH')
        entrust_private_key_path = os.environ.get('XERO_ENTRUST_PRIVATE_KEY_PATH')

        if consumer_key is None or consumer_secret is None:
            raise ValueError(
                'Please define both XERO_CONSUMER_KEY and XERO_CONSUMER_SECRET environment variables')

        if not private_key_path:
            raise ValueError(
                'Use the XERO_RSA_CERT_KEY_PATH env variable to specify the path to your RSA '
                'certificate private key file')

        if not entrust_cert_path:
            raise ValueError(
                'Use the XERO_ENTRUST_CERT_PATH env variable to specify the path to your Entrust '
                'certificate')

        if not entrust_private_key_path:
            raise ValueError(
                'Use the XERO_ENTRUST_PRIVATE_KEY_PATH env variable to specify the path to your '
                'Entrust private no-pass key')

        with open(private_key_path, 'r') as f:
            rsa_key = f.read()
            f.close()

        client_cert = (entrust_cert_path, entrust_private_key_path)

        print("Serving path: {}".format(self.path))
        path = urlparse(self.path)

        if path.path == '/do-auth':
            client_cert = (entrust_cert_path, entrust_private_key_path)
            credentials = PartnerCredentials(
                consumer_key, consumer_secret, rsa_key, client_cert,
                callback_uri='http://localhost:8000/oauth')

            # Save generated credentials details to persistent storage
            for key, value in credentials.state.items():
                OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})

            # Redirect to Xero at url provided by credentials generation
            self.redirect_response(credentials.url)
            return

        elif path.path == '/oauth':
            params = dict(parse_qsl(path.query))
            if 'oauth_token' not in params or 'oauth_verifier' not in params or 'org' not in params:
                self.send_error(500, message='Missing parameters required.')
                return

            stored_values = OAUTH_PERSISTENT_SERVER_STORAGE
            client_cert = (entrust_cert_path, entrust_private_key_path)
            stored_values.update({'rsa_key': rsa_key, 'client_cert': client_cert})

            credentials = PartnerCredentials(**stored_values)

            try:
                credentials.verify(params['oauth_verifier'])

                # Resave our verified credentials
                for key, value in credentials.state.items():
                    OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})

            except XeroException as e:
                self.send_error(500, message='{}: {}'.format(e.__class__, e.message))
                return

            # Once verified, api can be invoked with xero = Xero(credentials)
            self.redirect_response('/verified')
            return

        elif path.path == '/verified':

            stored_values = OAUTH_PERSISTENT_SERVER_STORAGE
            stored_values.update({'rsa_key': rsa_key, 'client_cert': client_cert})
            credentials = PartnerCredentials(**stored_values)

            # Partner credentials expire after 30 minutes. Here's how to re-activate on expiry
            if credentials.expired():
                credentials.refresh()

            try:
                xero = Xero(credentials)

            except XeroException as e:
                self.send_error(500, message='{}: {}'.format(e.__class__, e.message))
                return

            page_body = 'Your contacts:<br><br>'

            contacts = xero.contacts.all()

            if contacts:
                page_body += '<br>'.join([str(contact) for contact in contacts])
            else:
                page_body += 'No contacts'
            self.page_response(title='Xero Contacts', body=page_body)
            return

        SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)

Example 17

Project: python-compat-runtime
Source File: vmstub.py
View license
  def _MakeCallImpl(self):
    """Makes an asynchronous API call over the service bridge.

    For this to work the following must be set:
      self.package: the API package name;
      self.call: the name of the API call/method to invoke;
      self.request: the API request body as a serialized protocol buffer.

    The actual API call is made by requests.post via a thread pool
    (multiprocessing.dummy.Pool). The thread pool restricts the number of
    concurrent requests to MAX_CONCURRENT_API_CALLS, so this method will
    block if that limit is exceeded, until other asynchronous calls resolve.

    If the main thread holds the import lock, waiting on thread work can cause
    a deadlock:
    https://docs.python.org/2/library/threading.html#importing-in-threaded-code

    Therefore, we try to detect this error case and fall back to sync calls.
    """
    assert self._state == apiproxy_rpc.RPC.IDLE, self._state

    self.lock = threading.Lock()
    self.event = threading.Event()






    if VMStub.ShouldUseRequestSecurityTicketForThread():


      ticket = os.environ.get(TICKET_HEADER,
                              os.environ.get(DEV_TICKET_HEADER,
                                             self.stub.DefaultTicket()))
    else:
      ticket = self.stub.DefaultTicket()

    request = remote_api_pb.Request()
    request.set_service_name(self.package)
    request.set_method(self.call)
    request.set_request_id(ticket)
    request.set_request(self.request.SerializeToString())

    deadline = self.deadline or DEFAULT_TIMEOUT

    body_data = request.SerializeToString()
    headers = {
        SERVICE_DEADLINE_HEADER: deadline,
        SERVICE_ENDPOINT_HEADER: SERVICE_ENDPOINT_NAME,
        SERVICE_METHOD_HEADER: APIHOST_METHOD,
        'Content-type': RPC_CONTENT_TYPE,
    }


    dapper_header_value = os.environ.get(DAPPER_ENV_KEY)
    if dapper_header_value:
      headers[DAPPER_HEADER] = dapper_header_value





    api_host = os.environ.get('API_HOST', SERVICE_BRIDGE_HOST)
    api_port = os.environ.get('API_PORT', API_PORT)

    endpoint_url = urlparse.urlunparse(
        ('http', '%s:%s' % (api_host, api_port), PROXY_PATH,
         '', '', ''))

    self._state = apiproxy_rpc.RPC.RUNNING

    request_kwargs = dict(url=endpoint_url,
                          timeout=DEADLINE_DELTA_SECONDS + deadline,
                          headers=headers, data=body_data)



    if imp.lock_held() and not app_is_loaded:
      try:
        value = CaptureStacktrace(requests.post, **request_kwargs)
        success = True
      except Exception as e:
        value = e
        success = False
      self._result_future = SyncResult(value, success)

    else:


      self._result_future = self.stub.thread_pool.apply_async(
          CaptureStacktrace, args=[requests.post], kwds=request_kwargs)

Example 18

Project: pyhackedit
Source File: __init__.py
View license
def install(level=None, **kw):
    """
    Enable colored terminal output for Python's :mod:`logging` module.

    :param level: The default logging level (an integer or a string with a
                  level name, defaults to :data:`logging.INFO`).
    :param logger: The logger to which the stream handler should be attached (a
                   :class:`~logging.Logger` object, defaults to the root logger).
    :param fmt: Set the logging format (a string like those accepted by
                :class:`~logging.Formatter`, defaults to
                :data:`DEFAULT_LOG_FORMAT`).
    :param datefmt: Set the date/time format (a string, defaults to
                    :data:`DEFAULT_DATE_FORMAT`).
    :param level_styles: A dictionary with custom level styles (defaults to
                         :data:`DEFAULT_LEVEL_STYLES`).
    :param field_styles: A dictionary with custom field styles (defaults to
                         :data:`DEFAULT_FIELD_STYLES`).
    :param stream: The stream where log messages should be written to (a
                   file-like object, defaults to :data:`sys.stderr`).
    :param isatty: :data:`True` to use a :class:`ColoredFormatter`,
                   :data:`False` to use a normal :class:`~logging.Formatter`
                   (defaults to auto-detection using
                   :func:`~humanfriendly.terminal.terminal_supports_colors()`).
    :param reconfigure: If :data:`True` (the default) multiple calls to
                        :func:`coloredlogs.install()` will each override
                        the previous configuration.
    :param use_chroot: Refer to :class:`HostNameFilter`.
    :param programname: Refer to :class:`ProgramNameFilter`.
    :param syslog: If :data:`True` then :func:`~coloredlogs.syslog.enable_system_logging()`
                   will be called without arguments (defaults to :data:`False`).

    The :func:`coloredlogs.install()` function is similar to
    :func:`logging.basicConfig()`, both functions take a lot of optional
    keyword arguments but try to do the right thing by default:

    1. If `reconfigure` is :data:`True` (it is by default) and an existing
       :class:`~logging.StreamHandler` is found that is connected to either
       :data:`~sys.stdout` or :data:`~sys.stderr` the handler will be removed.
       This means that first calling :func:`logging.basicConfig()` and then
       calling :func:`coloredlogs.install()` will replace the stream handler
       instead of adding a duplicate stream handler. If `reconfigure` is
       :data:`False` and an existing handler is found no further steps are
       taken (to avoid installing a duplicate stream handler).

    2. A :class:`~logging.StreamHandler` is created and connected to the stream
       given by the `stream` keyword argument (:data:`sys.stderr` by
       default). The stream handler's level is set to the value of the `level`
       keyword argument.

    3. A :class:`ColoredFormatter` is created if the `isatty` keyword argument
       allows it (or auto-detection allows it), otherwise a normal
       :class:`~logging.Formatter` is created. The formatter is initialized
       with the `fmt` and `datefmt` keyword arguments (or their computed
       defaults).

    4. :func:`HostNameFilter.install()` and :func:`ProgramNameFilter.install()`
       are called to enable the use of additional fields in the log format.

    5. The formatter is added to the handler and the handler is added to the
       logger. The logger's level is set to :data:`logging.NOTSET` so that each
       handler gets to decide which records they filter. This makes it possible
       to have controllable verbosity on the terminal while logging at full
       verbosity to the system log or a file.
    """
    logger = kw.get('logger') or logging.getLogger()
    reconfigure = kw.get('reconfigure', True)
    stream = kw.get('stream', sys.stderr)
    # Remove any existing stream handler that writes to stdout or stderr, even
    # if the stream handler wasn't created by coloredlogs because multiple
    # stream handlers (in the same hierarchy) writing to stdout or stderr would
    # create duplicate output.
    standard_streams = (sys.stdout, sys.stderr)
    match_streams = standard_streams if stream in standard_streams else (stream,)
    match_handler = lambda handler: match_stream_handler(handler, match_streams)
    handler, logger = replace_handler(logger, match_handler, reconfigure)
    # Make sure reconfiguration is allowed or not relevant.
    if not (handler and not reconfigure):
        # Make it easy to enable system logging.
        if kw.get('syslog', False):
            from coloredlogs import syslog
            syslog.enable_system_logging()
        # Figure out whether we can use ANSI escape sequences.
        use_colors = kw.get('isatty', None)
        if use_colors or use_colors is None:
            if NEED_COLORAMA:
                if HAVE_COLORAMA:
                    # On Windows we can only use ANSI escape
                    # sequences if Colorama is available.
                    colorama.init()
                    use_colors = True
                else:
                    # If Colorama isn't available then we specifically
                    # shouldn't emit ANSI escape sequences!
                    use_colors = False
            elif use_colors is None:
                # Auto-detect terminal support on other platforms.
                use_colors = terminal_supports_colors(stream)
        # Create a stream handler.
        handler = logging.StreamHandler(stream)
        if level is None:
            level = os.environ.get('COLOREDLOGS_LOG_LEVEL') or 'INFO'
        handler.setLevel(level_to_number(level))
        # Prepare the arguments to the formatter. The caller is
        # allowed to customize `fmt' and/or `datefmt' as desired.
        formatter_options = dict(fmt=kw.get('fmt'), datefmt=kw.get('datefmt'))
        # Come up with a default log format?
        if not formatter_options['fmt']:
            # Use the log format defined by the environment variable
            # $COLOREDLOGS_LOG_FORMAT or fall back to the default.
            formatter_options['fmt'] = os.environ.get('COLOREDLOGS_LOG_FORMAT') or DEFAULT_LOG_FORMAT
        # If the caller didn't specify a date/time format we'll use the format
        # defined by the environment variable $COLOREDLOGS_DATE_FORMAT (or fall
        # back to the default).
        if not formatter_options['datefmt']:
            formatter_options['datefmt'] = os.environ.get('COLOREDLOGS_DATE_FORMAT') or DEFAULT_DATE_FORMAT
        # Do we need to make %(hostname) available to the formatter?
        HostNameFilter.install(
            handler=handler,
            fmt=formatter_options['fmt'],
            use_chroot=kw.get('use_chroot', True),
        )
        # Do we need to make %(programname) available to the formatter?
        ProgramNameFilter.install(
            handler=handler,
            fmt=formatter_options['fmt'],
            programname=kw.get('programname'),
        )
        # Inject additional formatter arguments specific to ColoredFormatter?
        if use_colors:
            for name, environment_name in (('field_styles', 'COLOREDLOGS_FIELD_STYLES'),
                                           ('level_styles', 'COLOREDLOGS_LEVEL_STYLES')):
                value = kw.get(name)
                if value is None:
                    # If no styles have been specified we'll fall back
                    # to the styles defined by the environment variable.
                    environment_value = os.environ.get(environment_name)
                    if environment_value is not None:
                        value = parse_encoded_styles(environment_value)
                if value is not None:
                    formatter_options[name] = value
        # Create a (possibly colored) formatter.
        formatter_type = ColoredFormatter if use_colors else logging.Formatter
        handler.setFormatter(formatter_type(**formatter_options))
        # Install the stream handler.
        logger.setLevel(logging.NOTSET)
        logger.addHandler(handler)

Example 19

Project: easybuild-easyblocks
Source File: openfoam.py
View license
    def configure_step(self):
        """Configure OpenFOAM build by setting appropriate environment variables."""
        # compiler & compiler flags
        comp_fam = self.toolchain.comp_family()

        extra_flags = ''
        if comp_fam == toolchain.GCC:  # @UndefinedVariable
            if get_software_version('GCC') >= LooseVersion('4.8'):
                # make sure non-gold version of ld is used, since OpenFOAM requires it
                # see http://www.openfoam.org/mantisbt/view.php?id=685
                extra_flags = '-fuse-ld=bfd'

            # older versions of OpenFOAM-Extend require -fpermissive
            if 'extend' in self.name.lower() and LooseVersion(self.version) < LooseVersion('2.0'):
                extra_flags += ' -fpermissive'

        elif comp_fam == toolchain.INTELCOMP:  # @UndefinedVariable
            # make sure -no-prec-div is used with Intel compilers
            extra_flags = '-no-prec-div'

        for env_var in ['CFLAGS', 'CXXFLAGS']:
            env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags))

        # patch out hardcoding of WM_* environment variables
        # for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER'
        for script in [os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc']]:
            self.log.debug("Patching out hardcoded $WM_* env vars in %s", script)
            # disable any third party stuff, we use EB controlled builds
            regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")]
            WM_env_var = ['WM_COMPILER', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR']
            # OpenFOAM >= 3.0.0 can use 64 bit integers
            if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
                WM_env_var.append('WM_LABEL_SIZE')
            for env_var in WM_env_var:
                regex_subs.append((r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var,
                                   r": ${\g<var>:=\g<val>}; export \g<var>"))

            apply_regex_substitutions(script, regex_subs)

        # inject compiler variables into wmake/rules files
        ldirs = glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*'))
        langs = ['c', 'c++']
        suffixes = ['', 'Opt']
        wmake_rules_files = [os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes]

        mpicc = os.environ['MPICC']
        mpicxx = os.environ['MPICXX']
        cc_seq = os.environ.get('CC_SEQ', os.environ['CC'])
        cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX'])

        if self.toolchain.mpi_family() == toolchain.OPENMPI:
            # no -cc/-cxx flags supported in OpenMPI compiler wrappers
            c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc)
            cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx)
        else:
            # -cc/-cxx should work for all MPICH-based MPIs (including Intel MPI)
            c_comp_cmd = '%s -cc="%s"' % (mpicc, cc_seq)
            cxx_comp_cmd = '%s -cxx="%s"' % (mpicxx, cxx_seq)

        comp_vars = {
            # specify MPI compiler wrappers and compiler commands + sequential compiler that should be used by them
            'cc': c_comp_cmd,
            'CC': cxx_comp_cmd,
            'cOPT': os.environ['CFLAGS'],
            'c++OPT': os.environ['CXXFLAGS'],
        }
        for wmake_rules_file in wmake_rules_files:
            fullpath = os.path.join(self.builddir, self.openfoamdir, wmake_rules_file)
            self.log.debug("Patching compiler variables in %s", fullpath)
            regex_subs = []
            for comp_var, newval in comp_vars.items():
                regex_subs.append((r"^(%s\s*=\s*).*$" % re.escape(comp_var), r"\1%s" % newval))
            apply_regex_substitutions(fullpath, regex_subs)

        # enable verbose build for debug purposes
        # starting with openfoam-extend 3.2, PS1 also needs to be set
        env.setvar("FOAM_VERBOSE", '1')

        # installation directory
        env.setvar("FOAM_INST_DIR", self.installdir)

        # third party directory
        self.thrdpartydir = "ThirdParty-%s" % self.version
        # only if third party stuff is actually installed
        if os.path.exists(self.thrdpartydir):
            os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir)
            env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir))

        env.setvar("WM_COMPILER", self.wm_compiler)
        env.setvar("WM_MPLIB", self.wm_mplib)

        # parallel build spec
        env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel']))

        # OpenFOAM >= 3.0.0 can use 64 bit integers
        if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
            if self.toolchain.options['i8']:
                env.setvar("WM_LABEL_SIZE", '64')
            else:
                env.setvar("WM_LABEL_SIZE", '32')

        # make sure lib/include dirs for dependencies are found
        openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0')
        if LooseVersion(self.version) < LooseVersion("2") or openfoam_extend_v3:
            self.log.debug("List of deps: %s" % self.cfg.dependencies())
            for dep in self.cfg.dependencies():
                dep_name = dep['name'].upper(),
                dep_root = get_software_root(dep['name'])
                env.setvar("%s_SYSTEM" % dep_name, "1")
                dep_vars = {
                    "%s_DIR": "%s",
                    "%s_BIN_DIR": "%s/bin",
                    "%s_LIB_DIR": "%s/lib",
                    "%s_INCLUDE_DIR": "%s/include",
                }
                for var, val in dep_vars.iteritems():
                    env.setvar(var % dep_name, val % dep_root)
        else:
            for depend in ['SCOTCH', 'METIS', 'CGAL', 'Paraview']:
                dependloc = get_software_root(depend)
                if dependloc:
                    if depend == 'CGAL' and get_software_root('Boost'):
                        env.setvar("CGAL_ROOT", dependloc)
                        env.setvar("BOOST_ROOT", get_software_root('Boost'))
                    else:
                        env.setvar("%s_ROOT" % depend.upper(), dependloc)

Example 20

Project: pymux
Source File: run_pymux.py
View license
def run():
    a = docopt.docopt(__doc__)
    socket_name = a['<socket>'] or os.environ.get('PYMUX')
    socket_name_from_env = not a['<socket>'] and os.environ.get('PYMUX')
    filename = a['<file>']
    command = a['<command>']
    true_color = a['--truecolor']
    ansi_colors_only = a['--ansicolor'] or \
        bool(os.environ.get('PROMPT_TOOLKIT_ANSI_COLORS_ONLY', False))

    # Parse pane_id from socket_name. It looks like "socket_name,pane_id".
    if socket_name and ',' in socket_name:
        socket_name, pane_id = socket_name.rsplit(',', 1)
    else:
        pane_id = None

    # Expand socket name. (Make it possible to just accept numbers.)
    if socket_name and socket_name.isdigit():
        socket_name = '%s/pymux.sock.%s.%s' % (
            tempfile.gettempdir(), getpass.getuser(), socket_name)

    # Configuration filename.
    default_config = os.path.abspath(os.path.expanduser('~/.pymux.conf'))
    if not filename and os.path.exists(default_config):
        filename = default_config

    if filename:
        filename = os.path.abspath(os.path.expanduser(filename))

    # Create 'Pymux'.
    mux = Pymux(source_file=filename, startup_command=command)

    # Setup logging.
    if a['<logfile>']:
        logging.basicConfig(filename=a['<logfile>'], level=logging.DEBUG)

    if a['standalone']:
        mux.run_standalone(true_color=true_color, ansi_colors_only=ansi_colors_only)

    elif a['list-sessions'] or a['<command>'] in ('ls', 'list-sessions'):
        for c in list_clients():
            print(c.socket_name)

    elif a['start-server']:
        if socket_name_from_env:
            _socket_from_env_warning()
            sys.exit(1)

        # Log to stdout.
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

        # Run server.
        socket_name = mux.listen_on_socket()
        try:
            mux.run_server()
        except KeyboardInterrupt:
            sys.exit(1)

    elif a['attach']:
        if socket_name_from_env:
            _socket_from_env_warning()
            sys.exit(1)

        detach_other_clients = a['-d']

        if socket_name:
            Client(socket_name).attach(
                detach_other_clients=detach_other_clients,
                true_color=true_color,
                ansi_colors_only=ansi_colors_only)
        else:
            # Connect to the first server.
            for c in list_clients():
                c.attach(detach_other_clients=detach_other_clients,
                         true_color=true_color,
                         ansi_colors_only=ansi_colors_only)
                break
            else:  # Nobreak.
                print('No pymux instance found.')
                sys.exit(1)

    elif a['<command>'] and socket_name:
        Client(socket_name).run_command(a['<command>'], pane_id)

    elif not socket_name:
        # Run client/server combination.
        socket_name = mux.listen_on_socket(socket_name)
        pid = daemonize()

        if pid > 0:
            # Create window. It is important that this happens in the daemon,
            # because the parent of the process running inside should be this
            # daemon. (Otherwise the `waitpid` call won't work.)
            mux.run_server()
        else:
            Client(socket_name).attach(
                true_color=true_color, ansi_colors_only=ansi_colors_only)

    else:
        if socket_name_from_env:
            _socket_from_env_warning()
            sys.exit(1)
        else:
            print('Invalid command.')
            sys.exit(1)

Example 21

Project: pymux
Source File: run_pymux.py
View license
def run():
    a = docopt.docopt(__doc__)
    socket_name = a['<socket>'] or os.environ.get('PYMUX')
    socket_name_from_env = not a['<socket>'] and os.environ.get('PYMUX')
    filename = a['<file>']
    command = a['<command>']
    true_color = a['--truecolor']
    ansi_colors_only = a['--ansicolor'] or \
        bool(os.environ.get('PROMPT_TOOLKIT_ANSI_COLORS_ONLY', False))

    # Parse pane_id from socket_name. It looks like "socket_name,pane_id".
    if socket_name and ',' in socket_name:
        socket_name, pane_id = socket_name.rsplit(',', 1)
    else:
        pane_id = None

    # Expand socket name. (Make it possible to just accept numbers.)
    if socket_name and socket_name.isdigit():
        socket_name = '%s/pymux.sock.%s.%s' % (
            tempfile.gettempdir(), getpass.getuser(), socket_name)

    # Configuration filename.
    default_config = os.path.abspath(os.path.expanduser('~/.pymux.conf'))
    if not filename and os.path.exists(default_config):
        filename = default_config

    if filename:
        filename = os.path.abspath(os.path.expanduser(filename))

    # Create 'Pymux'.
    mux = Pymux(source_file=filename, startup_command=command)

    # Setup logging.
    if a['<logfile>']:
        logging.basicConfig(filename=a['<logfile>'], level=logging.DEBUG)

    if a['standalone']:
        mux.run_standalone(true_color=true_color, ansi_colors_only=ansi_colors_only)

    elif a['list-sessions'] or a['<command>'] in ('ls', 'list-sessions'):
        for c in list_clients():
            print(c.socket_name)

    elif a['start-server']:
        if socket_name_from_env:
            _socket_from_env_warning()
            sys.exit(1)

        # Log to stdout.
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

        # Run server.
        socket_name = mux.listen_on_socket()
        try:
            mux.run_server()
        except KeyboardInterrupt:
            sys.exit(1)

    elif a['attach']:
        if socket_name_from_env:
            _socket_from_env_warning()
            sys.exit(1)

        detach_other_clients = a['-d']

        if socket_name:
            Client(socket_name).attach(
                detach_other_clients=detach_other_clients,
                true_color=true_color,
                ansi_colors_only=ansi_colors_only)
        else:
            # Connect to the first server.
            for c in list_clients():
                c.attach(detach_other_clients=detach_other_clients,
                         true_color=true_color,
                         ansi_colors_only=ansi_colors_only)
                break
            else:  # Nobreak.
                print('No pymux instance found.')
                sys.exit(1)

    elif a['<command>'] and socket_name:
        Client(socket_name).run_command(a['<command>'], pane_id)

    elif not socket_name:
        # Run client/server combination.
        socket_name = mux.listen_on_socket(socket_name)
        pid = daemonize()

        if pid > 0:
            # Create window. It is important that this happens in the daemon,
            # because the parent of the process running inside should be this
            # daemon. (Otherwise the `waitpid` call won't work.)
            mux.run_server()
        else:
            Client(socket_name).attach(
                true_color=true_color, ansi_colors_only=ansi_colors_only)

    else:
        if socket_name_from_env:
            _socket_from_env_warning()
            sys.exit(1)
        else:
            print('Invalid command.')
            sys.exit(1)

Example 22

Project: nbviewer
Source File: app.py
View license
def make_app():
    # NBConvert config
    config = Config()
    config.NbconvertApp.fileext = 'html'
    config.CSSHTMLHeaderTransformer.enabled = False
    # don't strip the files prefix - we use it for redirects
    # config.Exporter.filters = {'strip_files_prefix': lambda s: s}

    # DEBUG env implies both autoreload and log-level
    if os.environ.get("DEBUG"):
        options.debug = True
        logging.getLogger().setLevel(logging.DEBUG)

    # setup memcache
    mc_pool = ThreadPoolExecutor(options.mc_threads)

    # setup formats
    formats = configure_formats(options, config, log.app_log)

    if options.processes:
        pool = ProcessPoolExecutor(options.processes)
    else:
        pool = ThreadPoolExecutor(options.threads)

    memcache_urls = os.environ.get('MEMCACHIER_SERVERS',
        os.environ.get('MEMCACHE_SERVERS')
    )

    # Handle linked Docker containers
    if(os.environ.get('NBCACHE_PORT')):
        tcp_memcache = os.environ.get('NBCACHE_PORT')
        memcache_urls = tcp_memcache.split('tcp://')[1]

    if(os.environ.get('NBINDEX_PORT')):
        log.app_log.info("Indexing notebooks")
        tcp_index = os.environ.get('NBINDEX_PORT')
        index_url = tcp_index.split('tcp://')[1]
        index_host, index_port = index_url.split(":")
        indexer = ElasticSearch(index_host, index_port)
    else:
        log.app_log.info("Not indexing notebooks")
        indexer = NoSearch()

    if options.no_cache:
        log.app_log.info("Not using cache")
        cache = MockCache()
    elif pylibmc and memcache_urls:
        kwargs = dict(pool=mc_pool)
        username = os.environ.get('MEMCACHIER_USERNAME', '')
        password = os.environ.get('MEMCACHIER_PASSWORD', '')
        if username and password:
            kwargs['binary'] = True
            kwargs['username'] = username
            kwargs['password'] = password
            log.app_log.info("Using SASL memcache")
        else:
            log.app_log.info("Using plain memecache")

        cache = AsyncMultipartMemcache(memcache_urls.split(','), **kwargs)
    else:
        log.app_log.info("Using in-memory cache")
        cache = DummyAsyncCache()

    # setup tornado handlers and settings

    template_paths = pjoin(here, 'templates')

    if options.template_path is not None:
        log.app_log.info("Using custom template path {}".format(
            options.template_path)
        )
        template_paths = [options.template_path, template_paths]

    static_path = pjoin(here, 'static')
    env = Environment(
        loader=FileSystemLoader(template_paths),
        autoescape=True
    )
    env.filters['markdown'] = markdown.markdown
    try:
        git_data = git_info(here)
    except Exception as e:
        app_log.error("Failed to get git info: %s", e)
        git_data = {}
    else:
        git_data['msg'] = escape(git_data['msg'])


    if options.no_cache:
        # force jinja to recompile template every time
        env.globals.update(cache_size=0)
    env.globals.update(nrhead=nrhead, nrfoot=nrfoot, git_data=git_data,
        jupyter_info=jupyter_info(), len=len,
    )
    AsyncHTTPClient.configure(HTTPClientClass)
    client = AsyncHTTPClient()
    client.cache = cache

    # load frontpage sections
    with io.open(options.frontpage, 'r') as f:
        frontpage_sections = json.load(f)

    # cache frontpage links for the maximum allowed time
    max_cache_uris = {''}
    for section in frontpage_sections:
        for link in section['links']:
            max_cache_uris.add('/' + link['target'])

    fetch_kwargs = dict(connect_timeout=10,)
    if options.proxy_host:
        fetch_kwargs.update(dict(proxy_host=options.proxy_host,
                                 proxy_port=options.proxy_port))

        log.app_log.info("Using web proxy {proxy_host}:{proxy_port}."
                         "".format(**fetch_kwargs))

    if options.no_check_certificate:
        fetch_kwargs.update(dict(validate_cert=False))

        log.app_log.info("Not validating SSL certificates")

    # prefer the jhub defined service prefix over the CLI
    base_url = os.getenv('JUPYTERHUB_SERVICE_PREFIX', options.base_url)

    settings = dict(
        log_function=log_request,
        jinja2_env=env,
        static_path=static_path,
        static_url_prefix=url_path_join(base_url, '/static/'),
        client=client,
        formats=formats,
        default_format=options.default_format,
        providers=options.providers,
        provider_rewrites=options.provider_rewrites,
        config=config,
        index=indexer,
        cache=cache,
        cache_expiry_min=options.cache_expiry_min,
        cache_expiry_max=options.cache_expiry_max,
        max_cache_uris=max_cache_uris,
        frontpage_sections=frontpage_sections,
        pool=pool,
        gzip=True,
        render_timeout=options.render_timeout,
        localfile_path=os.path.abspath(options.localfiles),
        fetch_kwargs=fetch_kwargs,
        mathjax_url=options.mathjax_url,
        statsd_host=options.statsd_host,
        statsd_port=options.statsd_port,
        statsd_prefix=options.statsd_prefix,
        base_url=base_url,
        hub_api_token=os.getenv('JUPYTERHUB_API_TOKEN'),
        hub_api_url=os.getenv('JUPYTERHUB_API_URL'),
        hub_base_url=os.getenv('JUPYTERHUB_BASE_URL'),
    )

    # handle handlers
    handlers = init_handlers(formats, options.providers, base_url)

    if options.localfiles:
        log.app_log.warning("Serving local notebooks in %s, this can be a security risk", options.localfiles)
        # use absolute or relative paths:
        local_handlers = [( url_path_join(base_url, r'/localfile/?(.*)'), LocalFileHandler)]
        handlers = (
            local_handlers +
            format_handlers(formats, local_handlers) +
            handlers
        )

    # create the app
    return web.Application(handlers, debug=options.debug, **settings)

Example 23

View license
def main():

    vm = None

    module = AnsibleModule(
        argument_spec=dict(
            hostname=dict(
                type='str',
                default=os.environ.get('VMWARE_HOST')
            ),
            username=dict(
                type='str',
                default=os.environ.get('VMWARE_USER')
            ),
            password=dict(
                type='str', no_log=True,
                default=os.environ.get('VMWARE_PASSWORD')
            ),
            state=dict(
                required=False,
                choices=[
                    'poweredon',
                    'poweredoff',
                    'present',
                    'absent',
                    'restarted',
                    'reconfigured'
                ],
                default='present'),
            validate_certs=dict(required=False, type='bool', default=True),
            template_src=dict(required=False, type='str', aliases=['template']),
            name=dict(required=True, type='str'),
            name_match=dict(required=False, type='str', default='first'),
            uuid=dict(required=False, type='str'),
            folder=dict(required=False, type='str', default='/vm', aliases=['folder']),
            disk=dict(required=False, type='list'),
            nic=dict(required=False, type='list'),
            hardware=dict(required=False, type='dict', default={}),
            force=dict(required=False, type='bool', default=False),
            datacenter=dict(required=False, type='str', default=None),
            esxi_hostname=dict(required=False, type='str', default=None),
            cluster=dict(required=False, type='str', default=None),
            wait_for_ip_address=dict(required=False, type='bool', default=True)
        ),
        supports_check_mode=True,
        mutually_exclusive=[],
        required_together=[
            ['state', 'force'],
            ['template'],
        ],
    )

    pyv = PyVmomiHelper(module)

    # Check if the VM exists before continuing
    vm = pyv.getvm(name=module.params['name'], 
                   folder=module.params['folder'], 
                   uuid=module.params['uuid'], 
                   name_match=module.params['name_match'])

    # VM already exists
    if vm:

        if module.params['state'] == 'absent':
            # destroy it
            if module.params['force']:
                # has to be poweredoff first
                result = pyv.set_powerstate(vm, 'poweredoff', module.params['force'])
            result = pyv.remove_vm(vm)
        elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted']:
            # set powerstate
            result = pyv.set_powerstate(vm, module.params['state'], module.params['force'])
        else:
            # Run for facts only
            try:
                module.exit_json(instance=pyv.gather_facts(vm))
            except Exception:
                e = get_exception()
                module.fail_json(
                    msg="Fact gather failed with exception %s" % e)

    # VM doesn't exist
    else:
        create_states = ['poweredon', 'poweredoff', 'present', 'restarted']
        if module.params['state'] in create_states:
            poweron = (module.params['state'] != 'poweredoff')
            # Create it ...
            result = pyv.deploy_template(
                        poweron=poweron, 
                        wait_for_ip=module.params['wait_for_ip_address']
                     )
            result['changed'] = True
        elif module.params['state'] == 'absent':
            result = {'changed': False, 'failed': False}
        else:
            result = {'changed': False, 'failed': False}

    # FIXME
    if not 'failed' in result:
        result['failed'] = False

    if result['failed']:
        module.fail_json(**result)
    else:
        module.exit_json(**result)

Example 24

Project: ansible-modules-extras
Source File: dnsimple.py
View license
def main():
    module = AnsibleModule(
        argument_spec = dict(
            account_email     = dict(required=False),
            account_api_token = dict(required=False, no_log=True),
            domain            = dict(required=False),
            record            = dict(required=False),
            record_ids        = dict(required=False, type='list'),
            type              = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
            ttl               = dict(required=False, default=3600, type='int'),
            value             = dict(required=False),
            priority          = dict(required=False, type='int'),
            state             = dict(required=False, choices=['present', 'absent']),
            solo              = dict(required=False, type='bool'),
        ),
        required_together = (
            ['record', 'value']
        ),
        supports_check_mode = True,
    )

    if not HAS_DNSIMPLE:
        module.fail_json(msg="dnsimple required for this module")

    account_email     = module.params.get('account_email')
    account_api_token = module.params.get('account_api_token')
    domain            = module.params.get('domain')
    record            = module.params.get('record')
    record_ids        = module.params.get('record_ids')
    record_type       = module.params.get('type')
    ttl               = module.params.get('ttl')
    value             = module.params.get('value')
    priority          = module.params.get('priority')
    state             = module.params.get('state')
    is_solo           = module.params.get('solo')

    if account_email and account_api_token:
        client = DNSimple(email=account_email, api_token=account_api_token)
    elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
        client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
    else:
        client = DNSimple()

    try:
        # Let's figure out what operation we want to do

        # No domain, return a list
        if not domain:
            domains = client.domains()
            module.exit_json(changed=False, result=[d['domain'] for d in domains])

        # Domain & No record
        if domain and record is None and not record_ids:
            domains = [d['domain'] for d in client.domains()]
            if domain.isdigit():
                dr = next((d for d in domains if d['id'] == int(domain)), None)
            else:
                dr = next((d for d in domains if d['name'] == domain), None)
            if state == 'present':
                if dr:
                    module.exit_json(changed=False, result=dr)
                else:
                    if module.check_mode:
                        module.exit_json(changed=True)
                    else:
                        module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
            elif state == 'absent':
                if dr:
                    if not module.check_mode:
                        client.delete(domain)
                    module.exit_json(changed=True)
                else:
                    module.exit_json(changed=False)
            else:
                module.fail_json(msg="'%s' is an unknown value for the state argument" % state)

        # need the not none check since record could be an empty string
        if domain and record is not None:
            records = [r['record'] for r in client.records(str(domain))]

            if not record_type:
                module.fail_json(msg="Missing the record type")

            if not value:
                module.fail_json(msg="Missing the record value")

            rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)

            if state == 'present':
                changed = False
                if is_solo:
                    # delete any records that have the same name and record type
                    same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
                    if rr:
                        same_type = [rid for rid in same_type if rid != rr['id']]
                    if same_type:
                        if not module.check_mode:
                            for rid in same_type:
                                client.delete_record(str(domain), rid)
                        changed = True
                if rr:
                    # check if we need to update
                    if rr['ttl'] != ttl or rr['prio'] != priority:
                        data = {}
                        if ttl:      data['ttl']  = ttl
                        if priority: data['prio'] = priority
                        if module.check_mode:
                            module.exit_json(changed=True)
                        else:
                            module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
                    else:
                        module.exit_json(changed=changed, result=rr)
                else:
                    # create it
                    data = {
                        'name':        record,
                        'record_type': record_type,
                        'content':     value,
                    }
                    if ttl:      data['ttl']  = ttl
                    if priority: data['prio'] = priority
                    if module.check_mode:
                        module.exit_json(changed=True)
                    else:
                        module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
            elif state == 'absent':
                if rr:
                    if not module.check_mode:
                        client.delete_record(str(domain), rr['id'])
                    module.exit_json(changed=True)
                else:
                    module.exit_json(changed=False)
            else:
                module.fail_json(msg="'%s' is an unknown value for the state argument" % state)

        # Make sure these record_ids either all exist or none
        if domain and record_ids:
            current_records = [str(r['record']['id']) for r in client.records(str(domain))]
            wanted_records  = [str(r) for r in record_ids]
            if state == 'present':
                difference = list(set(wanted_records) - set(current_records))
                if difference:
                    module.fail_json(msg="Missing the following records: %s" % difference)
                else:
                    module.exit_json(changed=False)
            elif state == 'absent':
                difference = list(set(wanted_records) & set(current_records))
                if difference:
                    if not module.check_mode:
                        for rid in difference:
                            client.delete_record(str(domain), rid)
                    module.exit_json(changed=True)
                else:
                    module.exit_json(changed=False)
            else:
                module.fail_json(msg="'%s' is an unknown value for the state argument" % state)

    except DNSimpleException:
        e = get_exception()
        module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)

    module.fail_json(msg="Unknown what you wanted me to do")

Example 25

Project: ansible-modules-extras
Source File: dnsimple.py
View license
def main():
    module = AnsibleModule(
        argument_spec = dict(
            account_email     = dict(required=False),
            account_api_token = dict(required=False, no_log=True),
            domain            = dict(required=False),
            record            = dict(required=False),
            record_ids        = dict(required=False, type='list'),
            type              = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
            ttl               = dict(required=False, default=3600, type='int'),
            value             = dict(required=False),
            priority          = dict(required=False, type='int'),
            state             = dict(required=False, choices=['present', 'absent']),
            solo              = dict(required=False, type='bool'),
        ),
        required_together = (
            ['record', 'value']
        ),
        supports_check_mode = True,
    )

    if not HAS_DNSIMPLE:
        module.fail_json(msg="dnsimple required for this module")

    account_email     = module.params.get('account_email')
    account_api_token = module.params.get('account_api_token')
    domain            = module.params.get('domain')
    record            = module.params.get('record')
    record_ids        = module.params.get('record_ids')
    record_type       = module.params.get('type')
    ttl               = module.params.get('ttl')
    value             = module.params.get('value')
    priority          = module.params.get('priority')
    state             = module.params.get('state')
    is_solo           = module.params.get('solo')

    if account_email and account_api_token:
        client = DNSimple(email=account_email, api_token=account_api_token)
    elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
        client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
    else:
        client = DNSimple()

    try:
        # Let's figure out what operation we want to do

        # No domain, return a list
        if not domain:
            domains = client.domains()
            module.exit_json(changed=False, result=[d['domain'] for d in domains])

        # Domain & No record
        if domain and record is None and not record_ids:
            domains = [d['domain'] for d in client.domains()]
            if domain.isdigit():
                dr = next((d for d in domains if d['id'] == int(domain)), None)
            else:
                dr = next((d for d in domains if d['name'] == domain), None)
            if state == 'present':
                if dr:
                    module.exit_json(changed=False, result=dr)
                else:
                    if module.check_mode:
                        module.exit_json(changed=True)
                    else:
                        module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
            elif state == 'absent':
                if dr:
                    if not module.check_mode:
                        client.delete(domain)
                    module.exit_json(changed=True)
                else:
                    module.exit_json(changed=False)
            else:
                module.fail_json(msg="'%s' is an unknown value for the state argument" % state)

        # need the not none check since record could be an empty string
        if domain and record is not None:
            records = [r['record'] for r in client.records(str(domain))]

            if not record_type:
                module.fail_json(msg="Missing the record type")

            if not value:
                module.fail_json(msg="Missing the record value")

            rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)

            if state == 'present':
                changed = False
                if is_solo:
                    # delete any records that have the same name and record type
                    same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
                    if rr:
                        same_type = [rid for rid in same_type if rid != rr['id']]
                    if same_type:
                        if not module.check_mode:
                            for rid in same_type:
                                client.delete_record(str(domain), rid)
                        changed = True
                if rr:
                    # check if we need to update
                    if rr['ttl'] != ttl or rr['prio'] != priority:
                        data = {}
                        if ttl:      data['ttl']  = ttl
                        if priority: data['prio'] = priority
                        if module.check_mode:
                            module.exit_json(changed=True)
                        else:
                            module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
                    else:
                        module.exit_json(changed=changed, result=rr)
                else:
                    # create it
                    data = {
                        'name':        record,
                        'record_type': record_type,
                        'content':     value,
                    }
                    if ttl:      data['ttl']  = ttl
                    if priority: data['prio'] = priority
                    if module.check_mode:
                        module.exit_json(changed=True)
                    else:
                        module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
            elif state == 'absent':
                if rr:
                    if not module.check_mode:
                        client.delete_record(str(domain), rr['id'])
                    module.exit_json(changed=True)
                else:
                    module.exit_json(changed=False)
            else:
                module.fail_json(msg="'%s' is an unknown value for the state argument" % state)

        # Make sure these record_ids either all exist or none
        if domain and record_ids:
            current_records = [str(r['record']['id']) for r in client.records(str(domain))]
            wanted_records  = [str(r) for r in record_ids]
            if state == 'present':
                difference = list(set(wanted_records) - set(current_records))
                if difference:
                    module.fail_json(msg="Missing the following records: %s" % difference)
                else:
                    module.exit_json(changed=False)
            elif state == 'absent':
                difference = list(set(wanted_records) & set(current_records))
                if difference:
                    if not module.check_mode:
                        for rid in difference:
                            client.delete_record(str(domain), rid)
                    module.exit_json(changed=True)
                else:
                    module.exit_json(changed=False)
            else:
                module.fail_json(msg="'%s' is an unknown value for the state argument" % state)

    except DNSimpleException:
        e = get_exception()
        module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)

    module.fail_json(msg="Unknown what you wanted me to do")

Example 26

Project: faxrobot
Source File: jobs.py
View license
def send_fax(id):

    from boto.s3.connection import S3Connection
    from boto.s3.key import Key
    from datetime import date
    from subprocess import check_output, CalledProcessError, STDOUT
    import stripe
    import traceback
    import json
    from library.mailer import email_recharge_payment, email_success
    from rq import Worker

    device = Worker.MODEM_DEVICE
    caller_id = Worker.CALLER_ID

    stripe.api_key = os.environ.get('STRIPE_SECRET_KEY')

    job = session.query(Job).get(id)

    if not job.status == 'ready' and not job.status == 'queued':
        return fail('JOBS_CANNOT_SEND_NOW', job, db)

    if job.data_deleted:
        return fail('JOBS_FAIL_DATA_DELETED', job, db)

    cost = job.cost if not job.cover else job.cost + job.cover_cost

    ########################################################################
    #   MAKE SURE THE CUSTOMER ACTUALLY HAS MONEY PHASE
    ########################################################################

    if os.environ.get('REQUIRE_PAYMENTS') == 'on':
        if job.account.credit - cost < 0 and not job.account.allow_overflow:
            if job.account.stripe_card and job.account.auto_recharge:
                try:
                    charge = stripe.Charge.create(
                        amount=1000,
                        currency="usd",
                        customer=job.account.stripe_token,
                        description="Auto-recharging account %s"% job.account.id
                    )
                    data = {
                        'account_id':       job.account.id,
                        'amount':           10,
                        'source':           'stripe',
                        'source_id':        charge["id"],
                        'job_id':           job.id,
                        'job_destination':  job.destination,
                        'ip_address':       job.ip_address,
                        'initial_balance':  job.account.credit,
                        'trans_type':       'auto_recharge'
                    }
                    trans = Transaction(**data)
                    session.add(trans)
                    session.commit()

                    job.account.add_credit(10, session)
                    email_recharge_payment(job.account, 10, trans.id,
                        charge.source.last4)

                except:
                    payment = {'_DEBUG': traceback.format_exc()}
                    data = {
                        'amount':       10,
                        'account_id':   job.account.id,
                        'source':       'stripe',
                        'debug':        json.dumps(payment),
                        'ip_address':   job.ip_address,
                        'payment_type': 'auto_recharge'
                    }
                    failed_payment = FailedPayment(**data)
                    session.add(failed_payment)
                    session.commit()
                    # JL TODO ~ Notify customer that the card was declined
                    return fail('JOBS_CARD_DECLINED', job, db)    
            else:
                return fail('JOBS_INSUFFICIENT_CREDIT', job, db)

    job.mod_date = datetime.now()
    job.start_date = datetime.now()
    job.attempts = job.attempts + 1
    job.device = device
    job.status = 'started'
    session.commit()

    files_to_send = []

    ########################################################################
    #   COVER SHEET GENERATION PHASE
    ########################################################################

    path = './tmp/' + job.access_key
    o('Touching temporary directory: %s' % path) ###########################

    try:
        if not os.path.exists(path):
            os.makedirs(path)
    except:
        return fail('JOBS_CREATE_DIR_FAIL', job, db)

    if job.cover:
        o('Generating cover sheet') ########################################

        try:
            o('Generating cover.png') ######################################

            cmd = ["convert", "-density", "400", "-flatten",
                   "./media/cover_sheets/default.pdf", "-gravity",
                   "None"]
            v = 300

            if job.cover_name:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_name])
                cmd.extend(["-annotate", "+2100+1215", job.cover_name])
                v = v + 80

            if job.cover_address:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_address])
                v = v + 80

            if job.cover_city or job.cover_state or job.cover_zip:
                cmd.extend(["-annotate", "+468+%s" % v, 
                    "%s, %s %s" % (job.cover_city, job.cover_state,
                    job.cover_zip)])
                v = v + 80

            if job.cover_country:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_country])
                v = v + 80

            if job.cover_phone:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_phone])
                v = v + 80

            if job.cover_email:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_email])
                v = v + 80

            if job.cover_to_name:
                cmd.extend(["-annotate", "+800+1215", job.cover_to_name])

            if job.cover_subject:
                cmd.extend(["-annotate", "+800+1340", job.cover_subject])

            cmd.extend(["-annotate", "+2100+1340", "%s" % job.num_pages])
            cmd.extend(["-annotate", "+800+1465", "%s" % date.today()])

            if job.cover_cc:
                cmd.extend(["-annotate", "+2100+1465", job.cover_cc])

            if "urgent" in job.cover_status:
                cmd.extend(["-annotate", "+473+1740", "X"])

            if "review" in job.cover_status:
                cmd.extend(["-annotate", "+825+1740", "X"])

            if "comment" in job.cover_status:
                cmd.extend(["-annotate", "+1285+1740", "X"])

            if "reply" in job.cover_status:
                cmd.extend(["-annotate", "+1910+1740", "X"])

            if "shred" in job.cover_status:
                cmd.extend(["-annotate", "+2420+1740", "X"])

            cmd.extend([
                "-pointsize",
                "11",
                "./tmp/" + job.access_key + "/cover.png"
            ])
            output = check_output(cmd)
        except CalledProcessError, e:
            return fail('JOBS_COVER_MAIN_FAIL', job, db, str(e))

        if job.cover_company:
            try:
                o('Generating company.png') ################################

                cmd = ["convert", "-density", "400", "-gravity", "Center",
                       "-background", "black", "-fill", "white",
                       "-pointsize", "20", "-size", "1400x",
                       "caption:%s" % job.cover_company, "-bordercolor",
                       "black", "-border", "30",
                       "./tmp/" + job.access_key + "/company.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_COMPANY_FAIL', job, db, str(e))

            try:
                o('Overlaying company.png on cover.png') ###################

                cmd = ["composite", "-density", "400", "-gravity",
                       "NorthEast", "-geometry", "+300+200",
                       "./tmp/" + job.access_key + "/company.png",
                       "./tmp/" + job.access_key + "/cover.png",
                       "./tmp/" + job.access_key + "/cover.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_OVERLAY_FAIL', job, db, str(e))

        if job.cover_comments:
            try:
                o('Generating comments.png') ###############################

                cmd = ["convert", "-density", "400", "-gravity",
                       "NorthWest", "-background", "white", "-fill",
                       "black", "-pointsize", "11", "-size", "2437x2000",
                       "-font", "Liberation-Mono-Regular",
                       "caption:%s" % job.cover_comments,
                       "./tmp/" + job.access_key + "/comments.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_COMMENTS_FAIL', job, db, str(e))

            try:
                o('Overlaying comments.png on cover.png') ##################

                cmd = ["composite", "-density", "400", "-gravity", "None",
                       "-geometry", "+468+2000",
                       "./tmp/" + job.access_key + "/comments.png",
                       "./tmp/" + job.access_key + "/cover.png",
                       "./tmp/" + job.access_key + "/cover.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_OVERLAY_FAIL', job, db, str(e))

        try:
            o('Converting cover.png to G3 .tiff') ##########################

            cmd = ["convert", "-scale", "50%",
                   "./tmp/" + job.access_key + "/cover.png",
                   "fax:./tmp/" + job.access_key + "/cover.tiff"]
            output = check_output(cmd)
        except CalledProcessError, e:
            return fail('JOBS_COVER_TIFF_FAIL', job, db, str(e))

        files_to_send.append(u'%s/cover.tiff' % path)

    ########################################################################
    #   LOAD FILES PHASE
    ########################################################################

    filename = job.filename if job.filename else "fax.txt"

    if os.environ.get('AWS_STORAGE') == "on":
        o('Connecting to S3') ##################################################
        try:
            conn = S3Connection(os.environ.get('AWS_ACCESS_KEY'),
                                os.environ.get('AWS_SECRET_KEY'))
            bucket = conn.get_bucket(os.environ.get('AWS_S3_BUCKET'))
        except:
            return fail('JOBS_CONNECT_S3_FAIL', job, db)

        try:
            for i in range(0, job.num_pages):

                num = ("0%s" % i) if i < 10 else "%s" % i

                o('Download: %s/%s.%s.tiff' % (job.access_key, filename, num))

                k = Key(bucket)
                k.key = 'fax/%s/%s.%s.tiff' %(job.access_key, filename, num)
                k.get_contents_to_filename('%s/%s.%s.tiff' % (path,
                        filename, num))

                files_to_send.append('%s/%s.%s.tiff' %(path, filename, num))
        except:
            return fail('JOBS_DOWNLOAD_S3_FAIL', job, db)
    else:
        for i in range(0, job.num_pages):
            num = ("0%s" % i) if i < 10 else "%s" % i
            files_to_send.append('%s/%s.%s.tiff' %(path, filename, num))

    ########################################################################
    #   SEND FAX PHASE
    ########################################################################
    try:
        o('Modem dialing: 1%s' % job.destination)

        cmd = ["efax", "-d", device, "-o1flll ", "-vchewmainrft ", "-l",
               caller_id, "-t", "1%s" % job.destination]
        cmd.extend(files_to_send)
        output = check_output(cmd, stderr=STDOUT)
        o('%s' % output)

    except CalledProcessError, e:

        output = str(e.output)

        if "No Answer" in output:
            if os.environ.get('REQUIRE_PAYMENTS') == 'on':
                o('No answer. Charge the customer anyway for wasting our time.')
                o('Debiting $%s on account ID %s' % (cost, job.account.id))
                commit_transaction(job, cost, 'no_answer_charge')            

            return fail('JOBS_TRANSMIT_NO_ANSWER', job, db, output)

        elif "number busy or modem in use" in output:
            o('Line busy.')
            return fail('JOBS_TRANSMIT_BUSY', job, db, output)

        else:
            o('Transmit error: %s' % output)
            return fail('JOBS_TRANSMIT_FAIL', job, db, output)

    o('Job completed without error!')
    job.debug = output
    job.mod_date = datetime.now()
    job.end_date = datetime.now()
    job.status = 'sent'
    session.commit()

    o('Deleting data lol.')
    job.delete_data(session)

    if job.account.email_success:
        email_success(job)

    if job.callback_url:
        send_job_callback(job, db)

    if os.environ.get('REQUIRE_PAYMENTS') == 'on':
        o('Debiting $%s on account ID %s' % (cost, job.account.id))
        commit_transaction(job, cost, 'job_complete')

Example 27

Project: faxrobot
Source File: jobs.py
View license
def send_fax(id):

    from boto.s3.connection import S3Connection
    from boto.s3.key import Key
    from datetime import date
    from subprocess import check_output, CalledProcessError, STDOUT
    import stripe
    import traceback
    import json
    from library.mailer import email_recharge_payment, email_success
    from rq import Worker

    device = Worker.MODEM_DEVICE
    caller_id = Worker.CALLER_ID

    stripe.api_key = os.environ.get('STRIPE_SECRET_KEY')

    job = session.query(Job).get(id)

    if not job.status == 'ready' and not job.status == 'queued':
        return fail('JOBS_CANNOT_SEND_NOW', job, db)

    if job.data_deleted:
        return fail('JOBS_FAIL_DATA_DELETED', job, db)

    cost = job.cost if not job.cover else job.cost + job.cover_cost

    ########################################################################
    #   MAKE SURE THE CUSTOMER ACTUALLY HAS MONEY PHASE
    ########################################################################

    if os.environ.get('REQUIRE_PAYMENTS') == 'on':
        if job.account.credit - cost < 0 and not job.account.allow_overflow:
            if job.account.stripe_card and job.account.auto_recharge:
                try:
                    charge = stripe.Charge.create(
                        amount=1000,
                        currency="usd",
                        customer=job.account.stripe_token,
                        description="Auto-recharging account %s"% job.account.id
                    )
                    data = {
                        'account_id':       job.account.id,
                        'amount':           10,
                        'source':           'stripe',
                        'source_id':        charge["id"],
                        'job_id':           job.id,
                        'job_destination':  job.destination,
                        'ip_address':       job.ip_address,
                        'initial_balance':  job.account.credit,
                        'trans_type':       'auto_recharge'
                    }
                    trans = Transaction(**data)
                    session.add(trans)
                    session.commit()

                    job.account.add_credit(10, session)
                    email_recharge_payment(job.account, 10, trans.id,
                        charge.source.last4)

                except:
                    payment = {'_DEBUG': traceback.format_exc()}
                    data = {
                        'amount':       10,
                        'account_id':   job.account.id,
                        'source':       'stripe',
                        'debug':        json.dumps(payment),
                        'ip_address':   job.ip_address,
                        'payment_type': 'auto_recharge'
                    }
                    failed_payment = FailedPayment(**data)
                    session.add(failed_payment)
                    session.commit()
                    # JL TODO ~ Notify customer that the card was declined
                    return fail('JOBS_CARD_DECLINED', job, db)    
            else:
                return fail('JOBS_INSUFFICIENT_CREDIT', job, db)

    job.mod_date = datetime.now()
    job.start_date = datetime.now()
    job.attempts = job.attempts + 1
    job.device = device
    job.status = 'started'
    session.commit()

    files_to_send = []

    ########################################################################
    #   COVER SHEET GENERATION PHASE
    ########################################################################

    path = './tmp/' + job.access_key
    o('Touching temporary directory: %s' % path) ###########################

    try:
        if not os.path.exists(path):
            os.makedirs(path)
    except:
        return fail('JOBS_CREATE_DIR_FAIL', job, db)

    if job.cover:
        o('Generating cover sheet') ########################################

        try:
            o('Generating cover.png') ######################################

            cmd = ["convert", "-density", "400", "-flatten",
                   "./media/cover_sheets/default.pdf", "-gravity",
                   "None"]
            v = 300

            if job.cover_name:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_name])
                cmd.extend(["-annotate", "+2100+1215", job.cover_name])
                v = v + 80

            if job.cover_address:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_address])
                v = v + 80

            if job.cover_city or job.cover_state or job.cover_zip:
                cmd.extend(["-annotate", "+468+%s" % v, 
                    "%s, %s %s" % (job.cover_city, job.cover_state,
                    job.cover_zip)])
                v = v + 80

            if job.cover_country:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_country])
                v = v + 80

            if job.cover_phone:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_phone])
                v = v + 80

            if job.cover_email:
                cmd.extend(["-annotate", "+468+%s" % v, job.cover_email])
                v = v + 80

            if job.cover_to_name:
                cmd.extend(["-annotate", "+800+1215", job.cover_to_name])

            if job.cover_subject:
                cmd.extend(["-annotate", "+800+1340", job.cover_subject])

            cmd.extend(["-annotate", "+2100+1340", "%s" % job.num_pages])
            cmd.extend(["-annotate", "+800+1465", "%s" % date.today()])

            if job.cover_cc:
                cmd.extend(["-annotate", "+2100+1465", job.cover_cc])

            if "urgent" in job.cover_status:
                cmd.extend(["-annotate", "+473+1740", "X"])

            if "review" in job.cover_status:
                cmd.extend(["-annotate", "+825+1740", "X"])

            if "comment" in job.cover_status:
                cmd.extend(["-annotate", "+1285+1740", "X"])

            if "reply" in job.cover_status:
                cmd.extend(["-annotate", "+1910+1740", "X"])

            if "shred" in job.cover_status:
                cmd.extend(["-annotate", "+2420+1740", "X"])

            cmd.extend([
                "-pointsize",
                "11",
                "./tmp/" + job.access_key + "/cover.png"
            ])
            output = check_output(cmd)
        except CalledProcessError, e:
            return fail('JOBS_COVER_MAIN_FAIL', job, db, str(e))

        if job.cover_company:
            try:
                o('Generating company.png') ################################

                cmd = ["convert", "-density", "400", "-gravity", "Center",
                       "-background", "black", "-fill", "white",
                       "-pointsize", "20", "-size", "1400x",
                       "caption:%s" % job.cover_company, "-bordercolor",
                       "black", "-border", "30",
                       "./tmp/" + job.access_key + "/company.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_COMPANY_FAIL', job, db, str(e))

            try:
                o('Overlaying company.png on cover.png') ###################

                cmd = ["composite", "-density", "400", "-gravity",
                       "NorthEast", "-geometry", "+300+200",
                       "./tmp/" + job.access_key + "/company.png",
                       "./tmp/" + job.access_key + "/cover.png",
                       "./tmp/" + job.access_key + "/cover.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_OVERLAY_FAIL', job, db, str(e))

        if job.cover_comments:
            try:
                o('Generating comments.png') ###############################

                cmd = ["convert", "-density", "400", "-gravity",
                       "NorthWest", "-background", "white", "-fill",
                       "black", "-pointsize", "11", "-size", "2437x2000",
                       "-font", "Liberation-Mono-Regular",
                       "caption:%s" % job.cover_comments,
                       "./tmp/" + job.access_key + "/comments.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_COMMENTS_FAIL', job, db, str(e))

            try:
                o('Overlaying comments.png on cover.png') ##################

                cmd = ["composite", "-density", "400", "-gravity", "None",
                       "-geometry", "+468+2000",
                       "./tmp/" + job.access_key + "/comments.png",
                       "./tmp/" + job.access_key + "/cover.png",
                       "./tmp/" + job.access_key + "/cover.png"]
                output = check_output(cmd)
            except CalledProcessError, e:
                return fail('JOBS_COVER_OVERLAY_FAIL', job, db, str(e))

        try:
            o('Converting cover.png to G3 .tiff') ##########################

            cmd = ["convert", "-scale", "50%",
                   "./tmp/" + job.access_key + "/cover.png",
                   "fax:./tmp/" + job.access_key + "/cover.tiff"]
            output = check_output(cmd)
        except CalledProcessError, e:
            return fail('JOBS_COVER_TIFF_FAIL', job, db, str(e))

        files_to_send.append(u'%s/cover.tiff' % path)

    ########################################################################
    #   LOAD FILES PHASE
    ########################################################################

    filename = job.filename if job.filename else "fax.txt"

    if os.environ.get('AWS_STORAGE') == "on":
        o('Connecting to S3') ##################################################
        try:
            conn = S3Connection(os.environ.get('AWS_ACCESS_KEY'),
                                os.environ.get('AWS_SECRET_KEY'))
            bucket = conn.get_bucket(os.environ.get('AWS_S3_BUCKET'))
        except:
            return fail('JOBS_CONNECT_S3_FAIL', job, db)

        try:
            for i in range(0, job.num_pages):

                num = ("0%s" % i) if i < 10 else "%s" % i

                o('Download: %s/%s.%s.tiff' % (job.access_key, filename, num))

                k = Key(bucket)
                k.key = 'fax/%s/%s.%s.tiff' %(job.access_key, filename, num)
                k.get_contents_to_filename('%s/%s.%s.tiff' % (path,
                        filename, num))

                files_to_send.append('%s/%s.%s.tiff' %(path, filename, num))
        except:
            return fail('JOBS_DOWNLOAD_S3_FAIL', job, db)
    else:
        for i in range(0, job.num_pages):
            num = ("0%s" % i) if i < 10 else "%s" % i
            files_to_send.append('%s/%s.%s.tiff' %(path, filename, num))

    ########################################################################
    #   SEND FAX PHASE
    ########################################################################
    try:
        o('Modem dialing: 1%s' % job.destination)

        cmd = ["efax", "-d", device, "-o1flll ", "-vchewmainrft ", "-l",
               caller_id, "-t", "1%s" % job.destination]
        cmd.extend(files_to_send)
        output = check_output(cmd, stderr=STDOUT)
        o('%s' % output)

    except CalledProcessError, e:

        output = str(e.output)

        if "No Answer" in output:
            if os.environ.get('REQUIRE_PAYMENTS') == 'on':
                o('No answer. Charge the customer anyway for wasting our time.')
                o('Debiting $%s on account ID %s' % (cost, job.account.id))
                commit_transaction(job, cost, 'no_answer_charge')            

            return fail('JOBS_TRANSMIT_NO_ANSWER', job, db, output)

        elif "number busy or modem in use" in output:
            o('Line busy.')
            return fail('JOBS_TRANSMIT_BUSY', job, db, output)

        else:
            o('Transmit error: %s' % output)
            return fail('JOBS_TRANSMIT_FAIL', job, db, output)

    o('Job completed without error!')
    job.debug = output
    job.mod_date = datetime.now()
    job.end_date = datetime.now()
    job.status = 'sent'
    session.commit()

    o('Deleting data lol.')
    job.delete_data(session)

    if job.account.email_success:
        email_success(job)

    if job.callback_url:
        send_job_callback(job, db)

    if os.environ.get('REQUIRE_PAYMENTS') == 'on':
        o('Debiting $%s on account ID %s' % (cost, job.account.id))
        commit_transaction(job, cost, 'job_complete')

Example 28

View license
	def addRomCollections(self, id, configObj, consoleList, isUpdate):
		
		romCollections = {}
		dialog = xbmcgui.Dialog()
		
		#scraping scenario
		scenarioIndex = dialog.select(util.localize(32173), [util.localize(32174), util.localize(32175)])
		Logutil.log('scenarioIndex: ' +str(scenarioIndex), util.LOG_LEVEL_INFO)
		if(scenarioIndex == -1):
			del dialog
			Logutil.log('No scenario selected. Action canceled.', util.LOG_LEVEL_INFO)
			return False, romCollections
		
		autoconfig = EmulatorAutoconfig(util.getEmuAutoConfigPath())
		
		while True:
					
			fileTypeList, errorMsg = self.buildMediaTypeList(configObj, isUpdate)
			romCollection = RomCollection()
			
			#console
			platformIndex = dialog.select(util.localize(32176), consoleList)
			Logutil.log('platformIndex: ' +str(platformIndex), util.LOG_LEVEL_INFO)
			if(platformIndex == -1):
				Logutil.log('No Platform selected. Action canceled.', util.LOG_LEVEL_INFO)
				break
			else:
				console = consoleList[platformIndex]
				if(console =='Other'):				
					keyboard = xbmc.Keyboard()
					keyboard.setHeading(util.localize(32177))			
					keyboard.doModal()
					if (keyboard.isConfirmed()):
						console = keyboard.getText()
						Logutil.log('Platform entered manually: ' +console, util.LOG_LEVEL_INFO)
					else:
						Logutil.log('No Platform entered. Action canceled.', util.LOG_LEVEL_INFO)
						break
				else:
					consoleList.remove(console)
					Logutil.log('selected platform: ' +console, util.LOG_LEVEL_INFO)
			
			romCollection.name = console
			romCollection.id = id
			id = id +1
			
			
			#check if we have general RetroPlayer support
			if(helper.isRetroPlayerSupported()):
				supportsRetroPlayer = True
				#if we have full python integration we can also check if specific platform supports RetroPlayer
				if(helper.retroPlayerSupportsPythonIntegration()):
					supportsRetroPlayer = False
					success, installedAddons = helper.readLibretroCores("all", True, romCollection.name)
					if(success and len(installedAddons) > 0):
						supportsRetroPlayer = True
					else:
						success, installedAddons = helper.readLibretroCores("uninstalled", False, romCollection.name)
						if(success and len(installedAddons) > 0):
							supportsRetroPlayer = True
					
				if(supportsRetroPlayer):
					retValue = dialog.yesno(util.localize(32999), util.localize(32198))
					if(retValue == True):
						romCollection.useBuiltinEmulator = True
			
			#only ask for emulator and params if we don't use builtin emulator
			if(not romCollection.useBuiltinEmulator):
				
				#maybe there is autoconfig support
				preconfiguredEmulator = None
				
				#emulator
				#xbox games on xbox will be launched directly
				if (os.environ.get( "OS", "xbox" ) == "xbox" and romCollection.name == 'Xbox'):
					romCollection.emulatorCmd = '%ROM%'
					Logutil.log('emuCmd set to "%ROM%" on Xbox.', util.LOG_LEVEL_INFO)
				#check for standalone games
				elif (romCollection.name == 'Linux' or romCollection.name == 'Macintosh' or romCollection.name == 'Windows'):
					romCollection.emulatorCmd = '"%ROM%"'
					Logutil.log('emuCmd set to "%ROM%" for standalone games.', util.LOG_LEVEL_INFO)
				else:
					#TODO: Windows and Linux support
					#xbmc.getCondVisibility('System.Platform.Windows')
					#xbmc.getCondVisibility('System.Platform.Linux')
					if(xbmc.getCondVisibility('System.Platform.Android')):
						Logutil.log('Running on Android. Trying to find emulator per autoconfig.', util.LOG_LEVEL_INFO)
						emulators = autoconfig.findEmulators('Android', romCollection.name, True)
						emulist = []
						for emulator in emulators:
							if(emulator.isInstalled):
								emulist.append(util.localize(32202) %emulator.name)
							else:
								emulist.append(emulator.name)
						if(len(emulist) > 0):
							emuIndex = dialog.select(util.localize(32203), emulist)
							Logutil.log('emuIndex: ' +str(emuIndex), util.LOG_LEVEL_INFO)
							if(emuIndex == -1):
								Logutil.log('No Emulator selected.', util.LOG_LEVEL_INFO)
							else:
								preconfiguredEmulator = emulators[emuIndex]
							
					if(preconfiguredEmulator):
						romCollection.emulatorCmd = preconfiguredEmulator.emuCmd
					else:
						consolePath = dialog.browse(1, util.localize(32178) %console, 'files')
						Logutil.log('consolePath: ' +str(consolePath), util.LOG_LEVEL_INFO)
						if(consolePath == ''):
							Logutil.log('No consolePath selected. Action canceled.', util.LOG_LEVEL_INFO)
							break
						romCollection.emulatorCmd = consolePath
				
				#params
				#on xbox we will create .cut files without params
				if (os.environ.get( "OS", "xbox" ) == "xbox"):
					romCollection.emulatorParams = ''
					Logutil.log('emuParams set to "" on Xbox.', util.LOG_LEVEL_INFO)
				elif (romCollection.name == 'Linux' or romCollection.name == 'Macintosh' or romCollection.name == 'Windows'):
					romCollection.emulatorParams = ''
					Logutil.log('emuParams set to "" for standalone games.', util.LOG_LEVEL_INFO)
				else:
					defaultParams = '"%ROM%"'
					if(preconfiguredEmulator):
						defaultParams = preconfiguredEmulator.emuParams
											
					keyboard = xbmc.Keyboard()
					keyboard.setDefault(defaultParams)
					keyboard.setHeading(util.localize(32179))			
					keyboard.doModal()
					if (keyboard.isConfirmed()):
						emuParams = keyboard.getText()
						Logutil.log('emuParams: ' +str(emuParams), util.LOG_LEVEL_INFO)
					else:
						Logutil.log('No emuParams selected. Action canceled.', util.LOG_LEVEL_INFO)
						break
					romCollection.emulatorParams = emuParams
			
			#roms
			romPath = dialog.browse(0, util.localize(32180) %console, 'files')
			if(romPath == ''):
				Logutil.log('No romPath selected. Action canceled.', util.LOG_LEVEL_INFO)
				break
									
			#TODO: find out how to deal with non-ascii characters
			try:
				unicode(romPath)
			except:
				Logutil.log("RCB can't acces your Rom Path. Make sure it does not contain any non-ascii characters.", util.LOG_LEVEL_INFO)
				xbmcgui.Dialog().ok(util.SCRIPTNAME, util.localize(32041), errorMsg)
				break
					
			#filemask
			
			#xbox games always use default.xbe as executable
			if (os.environ.get( "OS", "xbox" ) == "xbox" and romCollection.name == 'Xbox'):
				Logutil.log('filemask "default.xbe" for Xbox games on Xbox.', util.LOG_LEVEL_INFO)
				romPathComplete = util.joinPath(romPath, 'default.xbe')					
				romCollection.romPaths = []
				romCollection.romPaths.append(romPathComplete)
			else:
				keyboard = xbmc.Keyboard()
				keyboard.setHeading(util.localize(32181))			
				keyboard.doModal()
				if (keyboard.isConfirmed()):					
					fileMaskInput = keyboard.getText()
					Logutil.log('fileMask: ' +str(fileMaskInput), util.LOG_LEVEL_INFO)
					fileMasks = fileMaskInput.split(',')
					romCollection.romPaths = []
					for fileMask in fileMasks:
						romPathComplete = util.joinPath(romPath, fileMask.strip())					
						romCollection.romPaths.append(romPathComplete)
				else:
					Logutil.log('No fileMask selected. Action canceled.', util.LOG_LEVEL_INFO)
					break
	
			if (os.environ.get( "OS", "xbox" ) == "xbox"):
				romCollection.xboxCreateShortcut = True
				romCollection.xboxCreateShortcutAddRomfile = True
				romCollection.xboxCreateShortcutUseShortGamename = False
				
				#TODO use flags for complete platform list (not only xbox)
				if(romCollection.name == 'Xbox'):
					romCollection.useFoldernameAsGamename = True
					romCollection.searchGameByCRC = False
					romCollection.maxFolderDepth = 1
			
			
			if(scenarioIndex == 0):
				artworkPath = dialog.browse(0, util.localize(32193) %console, 'files', '', False, False, romPath)
				Logutil.log('artworkPath: ' +str(artworkPath), util.LOG_LEVEL_INFO)				
				#TODO: find out how to deal with non-ascii characters
				try:
					unicode(artworkPath)
				except:
					Logutil.log("RCB can't acces your artwork path. Make sure it does not contain any non-ascii characters.", util.LOG_LEVEL_INFO)
					xbmcgui.Dialog().ok(util.SCRIPTNAME, util.localize(32042), errorMsg)
					break
				
				if(artworkPath == ''):
					Logutil.log('No artworkPath selected. Action canceled.', util.LOG_LEVEL_INFO)
					break
				
				romCollection.descFilePerGame= True
				
				#mediaPaths
				romCollection.mediaPaths = []
				
				if(romCollection.name == 'MAME'):
					romCollection.mediaPaths.append(self.createMediaPath('boxfront', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('action', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('title', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('cabinet', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('marquee', artworkPath, scenarioIndex))					
				else:
					romCollection.mediaPaths.append(self.createMediaPath('boxfront', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('boxback', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('cartridge', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('screenshot', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('fanart', artworkPath, scenarioIndex))
				
				#other MAME specific properties
				if(romCollection.name == 'MAME'):
					romCollection.imagePlacingMain = ImagePlacing()
					romCollection.imagePlacingMain.name = 'gameinfomamecabinet'
					
					#MAME zip files contain several files but they must be passed to the emu as zip file
					romCollection.doNotExtractZipFiles = True
					
					#create MAWS scraper
					site = Site()
					site.name = 'maws.mameworld.info'
					scrapers = []
					scraper = Scraper()
					scraper.parseInstruction = '06 - maws.xml'
					scraper.source = 'http://maws.mameworld.info/maws/romset/%GAME%'
					scrapers.append(scraper)
					site.scrapers = scrapers
					romCollection.scraperSites = []
					romCollection.scraperSites.append(site)
			else:
				
				if(romCollection.name == 'MAME'):
					romCollection.imagePlacingMain = ImagePlacing()
					romCollection.imagePlacingMain.name = 'gameinfomamecabinet'
					#MAME zip files contain several files but they must be passed to the emu as zip file
					romCollection.doNotExtractZipFiles = True
				
				
				romCollection.mediaPaths = []
				
				lastArtworkPath = ''
				while True:
					
					fileTypeIndex = dialog.select(util.localize(32183), fileTypeList)
					Logutil.log('fileTypeIndex: ' +str(fileTypeIndex), util.LOG_LEVEL_INFO)					
					if(fileTypeIndex == -1):
						Logutil.log('No fileTypeIndex selected.', util.LOG_LEVEL_INFO)
						break
					
					fileType = fileTypeList[fileTypeIndex]
					fileTypeList.remove(fileType)
					
					if(lastArtworkPath == ''):					
						artworkPath = dialog.browse(0, util.localize(32182) %(console, fileType), 'files', '', False, False, romPath)
					else:
						artworkPath = dialog.browse(0, util.localize(32182) %(console, fileType), 'files', '', False, False, lastArtworkPath)
					
					try:
						unicode(artworkPath)
					except:				
						Logutil.log("RCB can't acces your artwork path. Make sure it does not contain any non-ascii characters.", util.LOG_LEVEL_INFO)
						xbmcgui.Dialog().ok(util.SCRIPTNAME, util.localize(32042), errorMsg)
						break
					
					lastArtworkPath = artworkPath
					Logutil.log('artworkPath: ' +str(artworkPath), util.LOG_LEVEL_INFO)
					if(artworkPath == ''):
						Logutil.log('No artworkPath selected.', util.LOG_LEVEL_INFO)
						break
					
					romCollection.mediaPaths.append(self.createMediaPath(fileType, artworkPath, scenarioIndex))
					
					retValue = dialog.yesno(util.localize(32999), util.localize(32184))
					if(retValue == False):
						break
				
				descIndex = dialog.select(util.localize(32185), [util.localize(32186), util.localize(32187), util.localize(32188)])
				Logutil.log('descIndex: ' +str(descIndex), util.LOG_LEVEL_INFO)
				if(descIndex == -1):
					Logutil.log('No descIndex selected. Action canceled.', util.LOG_LEVEL_INFO)
					break
				
				romCollection.descFilePerGame = (descIndex != 1)
				
				if(descIndex == 2):
					#leave scraperSites empty - they will be filled in configwriter
					pass
				
				else:
					descPath = ''
					
					if(romCollection.descFilePerGame):
						#get path
						pathValue = dialog.browse(0, util.localize(32189) %console, 'files')
						if(pathValue == ''):
							break
						
						#get file mask
						keyboard = xbmc.Keyboard()
						keyboard.setHeading(util.localize(32190))
						keyboard.setDefault('%GAME%.txt')
						keyboard.doModal()
						if (keyboard.isConfirmed()):
							filemask = keyboard.getText()
							
						descPath = util.joinPath(pathValue, filemask.strip())
					else:
						descPath = dialog.browse(1, util.localize(32189) %console, 'files', '', False, False, lastArtworkPath)
					
					Logutil.log('descPath: ' +str(descPath), util.LOG_LEVEL_INFO)
					if(descPath == ''):
						Logutil.log('No descPath selected. Action canceled.', util.LOG_LEVEL_INFO)
						break
					
					parserPath = dialog.browse(1, util.localize(32191) %console, 'files', '', False, False, descPath)
					Logutil.log('parserPath: ' +str(parserPath), util.LOG_LEVEL_INFO)
					if(parserPath == ''):
						Logutil.log('No parserPath selected. Action canceled.', util.LOG_LEVEL_INFO)
						break
					
					#create scraper
					site = Site()
					site.name = console
					site.descFilePerGame = (descIndex == 0)
					site.searchGameByCRC = True
					scrapers = []
					scraper = Scraper()
					scraper.parseInstruction = parserPath
					scraper.source = descPath
					scraper.encoding = 'iso-8859-1'
					scrapers.append(scraper)
					site.scrapers = scrapers
					romCollection.scraperSites = []
					romCollection.scraperSites.append(site)
			
			romCollections[romCollection.id] = romCollection						
			
			retValue = dialog.yesno(util.localize(32999), util.localize(32192))
			if(retValue == False):
				break
		
		del dialog
		
		return True, romCollections

Example 29

View license
	def addRomCollections(self, id, configObj, consoleList, isUpdate):
		
		romCollections = {}
		dialog = xbmcgui.Dialog()
		
		#scraping scenario
		scenarioIndex = dialog.select(util.localize(32173), [util.localize(32174), util.localize(32175)])
		Logutil.log('scenarioIndex: ' +str(scenarioIndex), util.LOG_LEVEL_INFO)
		if(scenarioIndex == -1):
			del dialog
			Logutil.log('No scenario selected. Action canceled.', util.LOG_LEVEL_INFO)
			return False, romCollections
		
		autoconfig = EmulatorAutoconfig(util.getEmuAutoConfigPath())
		
		while True:
					
			fileTypeList, errorMsg = self.buildMediaTypeList(configObj, isUpdate)
			romCollection = RomCollection()
			
			#console
			platformIndex = dialog.select(util.localize(32176), consoleList)
			Logutil.log('platformIndex: ' +str(platformIndex), util.LOG_LEVEL_INFO)
			if(platformIndex == -1):
				Logutil.log('No Platform selected. Action canceled.', util.LOG_LEVEL_INFO)
				break
			else:
				console = consoleList[platformIndex]
				if(console =='Other'):				
					keyboard = xbmc.Keyboard()
					keyboard.setHeading(util.localize(32177))			
					keyboard.doModal()
					if (keyboard.isConfirmed()):
						console = keyboard.getText()
						Logutil.log('Platform entered manually: ' +console, util.LOG_LEVEL_INFO)
					else:
						Logutil.log('No Platform entered. Action canceled.', util.LOG_LEVEL_INFO)
						break
				else:
					consoleList.remove(console)
					Logutil.log('selected platform: ' +console, util.LOG_LEVEL_INFO)
			
			romCollection.name = console
			romCollection.id = id
			id = id +1
			
			
			#check if we have general RetroPlayer support
			if(helper.isRetroPlayerSupported()):
				supportsRetroPlayer = True
				#if we have full python integration we can also check if specific platform supports RetroPlayer
				if(helper.retroPlayerSupportsPythonIntegration()):
					supportsRetroPlayer = False
					success, installedAddons = helper.readLibretroCores("all", True, romCollection.name)
					if(success and len(installedAddons) > 0):
						supportsRetroPlayer = True
					else:
						success, installedAddons = helper.readLibretroCores("uninstalled", False, romCollection.name)
						if(success and len(installedAddons) > 0):
							supportsRetroPlayer = True
					
				if(supportsRetroPlayer):
					retValue = dialog.yesno(util.localize(32999), util.localize(32198))
					if(retValue == True):
						romCollection.useBuiltinEmulator = True
			
			#only ask for emulator and params if we don't use builtin emulator
			if(not romCollection.useBuiltinEmulator):
				
				#maybe there is autoconfig support
				preconfiguredEmulator = None
				
				#emulator
				#xbox games on xbox will be launched directly
				if (os.environ.get( "OS", "xbox" ) == "xbox" and romCollection.name == 'Xbox'):
					romCollection.emulatorCmd = '%ROM%'
					Logutil.log('emuCmd set to "%ROM%" on Xbox.', util.LOG_LEVEL_INFO)
				#check for standalone games
				elif (romCollection.name == 'Linux' or romCollection.name == 'Macintosh' or romCollection.name == 'Windows'):
					romCollection.emulatorCmd = '"%ROM%"'
					Logutil.log('emuCmd set to "%ROM%" for standalone games.', util.LOG_LEVEL_INFO)
				else:
					#TODO: Windows and Linux support
					#xbmc.getCondVisibility('System.Platform.Windows')
					#xbmc.getCondVisibility('System.Platform.Linux')
					if(xbmc.getCondVisibility('System.Platform.Android')):
						Logutil.log('Running on Android. Trying to find emulator per autoconfig.', util.LOG_LEVEL_INFO)
						emulators = autoconfig.findEmulators('Android', romCollection.name, True)
						emulist = []
						for emulator in emulators:
							if(emulator.isInstalled):
								emulist.append(util.localize(32202) %emulator.name)
							else:
								emulist.append(emulator.name)
						if(len(emulist) > 0):
							emuIndex = dialog.select(util.localize(32203), emulist)
							Logutil.log('emuIndex: ' +str(emuIndex), util.LOG_LEVEL_INFO)
							if(emuIndex == -1):
								Logutil.log('No Emulator selected.', util.LOG_LEVEL_INFO)
							else:
								preconfiguredEmulator = emulators[emuIndex]
							
					if(preconfiguredEmulator):
						romCollection.emulatorCmd = preconfiguredEmulator.emuCmd
					else:
						consolePath = dialog.browse(1, util.localize(32178) %console, 'files')
						Logutil.log('consolePath: ' +str(consolePath), util.LOG_LEVEL_INFO)
						if(consolePath == ''):
							Logutil.log('No consolePath selected. Action canceled.', util.LOG_LEVEL_INFO)
							break
						romCollection.emulatorCmd = consolePath
				
				#params
				#on xbox we will create .cut files without params
				if (os.environ.get( "OS", "xbox" ) == "xbox"):
					romCollection.emulatorParams = ''
					Logutil.log('emuParams set to "" on Xbox.', util.LOG_LEVEL_INFO)
				elif (romCollection.name == 'Linux' or romCollection.name == 'Macintosh' or romCollection.name == 'Windows'):
					romCollection.emulatorParams = ''
					Logutil.log('emuParams set to "" for standalone games.', util.LOG_LEVEL_INFO)
				else:
					defaultParams = '"%ROM%"'
					if(preconfiguredEmulator):
						defaultParams = preconfiguredEmulator.emuParams
											
					keyboard = xbmc.Keyboard()
					keyboard.setDefault(defaultParams)
					keyboard.setHeading(util.localize(32179))			
					keyboard.doModal()
					if (keyboard.isConfirmed()):
						emuParams = keyboard.getText()
						Logutil.log('emuParams: ' +str(emuParams), util.LOG_LEVEL_INFO)
					else:
						Logutil.log('No emuParams selected. Action canceled.', util.LOG_LEVEL_INFO)
						break
					romCollection.emulatorParams = emuParams
			
			#roms
			romPath = dialog.browse(0, util.localize(32180) %console, 'files')
			if(romPath == ''):
				Logutil.log('No romPath selected. Action canceled.', util.LOG_LEVEL_INFO)
				break
									
			#TODO: find out how to deal with non-ascii characters
			try:
				unicode(romPath)
			except:
				Logutil.log("RCB can't acces your Rom Path. Make sure it does not contain any non-ascii characters.", util.LOG_LEVEL_INFO)
				xbmcgui.Dialog().ok(util.SCRIPTNAME, util.localize(32041), errorMsg)
				break
					
			#filemask
			
			#xbox games always use default.xbe as executable
			if (os.environ.get( "OS", "xbox" ) == "xbox" and romCollection.name == 'Xbox'):
				Logutil.log('filemask "default.xbe" for Xbox games on Xbox.', util.LOG_LEVEL_INFO)
				romPathComplete = util.joinPath(romPath, 'default.xbe')					
				romCollection.romPaths = []
				romCollection.romPaths.append(romPathComplete)
			else:
				keyboard = xbmc.Keyboard()
				keyboard.setHeading(util.localize(32181))			
				keyboard.doModal()
				if (keyboard.isConfirmed()):					
					fileMaskInput = keyboard.getText()
					Logutil.log('fileMask: ' +str(fileMaskInput), util.LOG_LEVEL_INFO)
					fileMasks = fileMaskInput.split(',')
					romCollection.romPaths = []
					for fileMask in fileMasks:
						romPathComplete = util.joinPath(romPath, fileMask.strip())					
						romCollection.romPaths.append(romPathComplete)
				else:
					Logutil.log('No fileMask selected. Action canceled.', util.LOG_LEVEL_INFO)
					break
	
			if (os.environ.get( "OS", "xbox" ) == "xbox"):
				romCollection.xboxCreateShortcut = True
				romCollection.xboxCreateShortcutAddRomfile = True
				romCollection.xboxCreateShortcutUseShortGamename = False
				
				#TODO use flags for complete platform list (not only xbox)
				if(romCollection.name == 'Xbox'):
					romCollection.useFoldernameAsGamename = True
					romCollection.searchGameByCRC = False
					romCollection.maxFolderDepth = 1
			
			
			if(scenarioIndex == 0):
				artworkPath = dialog.browse(0, util.localize(32193) %console, 'files', '', False, False, romPath)
				Logutil.log('artworkPath: ' +str(artworkPath), util.LOG_LEVEL_INFO)				
				#TODO: find out how to deal with non-ascii characters
				try:
					unicode(artworkPath)
				except:
					Logutil.log("RCB can't acces your artwork path. Make sure it does not contain any non-ascii characters.", util.LOG_LEVEL_INFO)
					xbmcgui.Dialog().ok(util.SCRIPTNAME, util.localize(32042), errorMsg)
					break
				
				if(artworkPath == ''):
					Logutil.log('No artworkPath selected. Action canceled.', util.LOG_LEVEL_INFO)
					break
				
				romCollection.descFilePerGame= True
				
				#mediaPaths
				romCollection.mediaPaths = []
				
				if(romCollection.name == 'MAME'):
					romCollection.mediaPaths.append(self.createMediaPath('boxfront', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('action', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('title', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('cabinet', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('marquee', artworkPath, scenarioIndex))					
				else:
					romCollection.mediaPaths.append(self.createMediaPath('boxfront', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('boxback', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('cartridge', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('screenshot', artworkPath, scenarioIndex))
					romCollection.mediaPaths.append(self.createMediaPath('fanart', artworkPath, scenarioIndex))
				
				#other MAME specific properties
				if(romCollection.name == 'MAME'):
					romCollection.imagePlacingMain = ImagePlacing()
					romCollection.imagePlacingMain.name = 'gameinfomamecabinet'
					
					#MAME zip files contain several files but they must be passed to the emu as zip file
					romCollection.doNotExtractZipFiles = True
					
					#create MAWS scraper
					site = Site()
					site.name = 'maws.mameworld.info'
					scrapers = []
					scraper = Scraper()
					scraper.parseInstruction = '06 - maws.xml'
					scraper.source = 'http://maws.mameworld.info/maws/romset/%GAME%'
					scrapers.append(scraper)
					site.scrapers = scrapers
					romCollection.scraperSites = []
					romCollection.scraperSites.append(site)
			else:
				
				if(romCollection.name == 'MAME'):
					romCollection.imagePlacingMain = ImagePlacing()
					romCollection.imagePlacingMain.name = 'gameinfomamecabinet'
					#MAME zip files contain several files but they must be passed to the emu as zip file
					romCollection.doNotExtractZipFiles = True
				
				
				romCollection.mediaPaths = []
				
				lastArtworkPath = ''
				while True:
					
					fileTypeIndex = dialog.select(util.localize(32183), fileTypeList)
					Logutil.log('fileTypeIndex: ' +str(fileTypeIndex), util.LOG_LEVEL_INFO)					
					if(fileTypeIndex == -1):
						Logutil.log('No fileTypeIndex selected.', util.LOG_LEVEL_INFO)
						break
					
					fileType = fileTypeList[fileTypeIndex]
					fileTypeList.remove(fileType)
					
					if(lastArtworkPath == ''):					
						artworkPath = dialog.browse(0, util.localize(32182) %(console, fileType), 'files', '', False, False, romPath)
					else:
						artworkPath = dialog.browse(0, util.localize(32182) %(console, fileType), 'files', '', False, False, lastArtworkPath)
					
					try:
						unicode(artworkPath)
					except:				
						Logutil.log("RCB can't acces your artwork path. Make sure it does not contain any non-ascii characters.", util.LOG_LEVEL_INFO)
						xbmcgui.Dialog().ok(util.SCRIPTNAME, util.localize(32042), errorMsg)
						break
					
					lastArtworkPath = artworkPath
					Logutil.log('artworkPath: ' +str(artworkPath), util.LOG_LEVEL_INFO)
					if(artworkPath == ''):
						Logutil.log('No artworkPath selected.', util.LOG_LEVEL_INFO)
						break
					
					romCollection.mediaPaths.append(self.createMediaPath(fileType, artworkPath, scenarioIndex))
					
					retValue = dialog.yesno(util.localize(32999), util.localize(32184))
					if(retValue == False):
						break
				
				descIndex = dialog.select(util.localize(32185), [util.localize(32186), util.localize(32187), util.localize(32188)])
				Logutil.log('descIndex: ' +str(descIndex), util.LOG_LEVEL_INFO)
				if(descIndex == -1):
					Logutil.log('No descIndex selected. Action canceled.', util.LOG_LEVEL_INFO)
					break
				
				romCollection.descFilePerGame = (descIndex != 1)
				
				if(descIndex == 2):
					#leave scraperSites empty - they will be filled in configwriter
					pass
				
				else:
					descPath = ''
					
					if(romCollection.descFilePerGame):
						#get path
						pathValue = dialog.browse(0, util.localize(32189) %console, 'files')
						if(pathValue == ''):
							break
						
						#get file mask
						keyboard = xbmc.Keyboard()
						keyboard.setHeading(util.localize(32190))
						keyboard.setDefault('%GAME%.txt')
						keyboard.doModal()
						if (keyboard.isConfirmed()):
							filemask = keyboard.getText()
							
						descPath = util.joinPath(pathValue, filemask.strip())
					else:
						descPath = dialog.browse(1, util.localize(32189) %console, 'files', '', False, False, lastArtworkPath)
					
					Logutil.log('descPath: ' +str(descPath), util.LOG_LEVEL_INFO)
					if(descPath == ''):
						Logutil.log('No descPath selected. Action canceled.', util.LOG_LEVEL_INFO)
						break
					
					parserPath = dialog.browse(1, util.localize(32191) %console, 'files', '', False, False, descPath)
					Logutil.log('parserPath: ' +str(parserPath), util.LOG_LEVEL_INFO)
					if(parserPath == ''):
						Logutil.log('No parserPath selected. Action canceled.', util.LOG_LEVEL_INFO)
						break
					
					#create scraper
					site = Site()
					site.name = console
					site.descFilePerGame = (descIndex == 0)
					site.searchGameByCRC = True
					scrapers = []
					scraper = Scraper()
					scraper.parseInstruction = parserPath
					scraper.source = descPath
					scraper.encoding = 'iso-8859-1'
					scrapers.append(scraper)
					site.scrapers = scrapers
					romCollection.scraperSites = []
					romCollection.scraperSites.append(site)
			
			romCollections[romCollection.id] = romCollection						
			
			retValue = dialog.yesno(util.localize(32999), util.localize(32192))
			if(retValue == False):
				break
		
		del dialog
		
		return True, romCollections

Example 30

Project: ArchiveBot
Source File: uploader.py
View license
def main():
    if len(sys.argv) > 1:
        directory = sys.argv[1]
    elif os.environ.get('FINISHED_WARCS_DIR') != None:
        directory = os.environ['FINISHED_WARCS_DIR']
    else:
        raise RuntimeError('No directory specified (set FINISHED_WARCS_DIR '
                           'or specify directory on command line)')

    mode = None #modes: 'rsync', 's3'

    url = os.environ.get('RSYNC_URL')
    if url != None:
        if '/localhost' in url or '/127.' in url:
            raise RuntimeError('Won\'t let you upload to localhost because I '
                               'remove files after uploading them, and you '
                               'might be uploading to the same directory')
        mode = 'rsync'

    if url is None:
        url = os.environ.get('S3_URL')
        if url is not None:
            mode = 's3'

    if url is None:
        raise RuntimeError('Neither RSYNC_URL nor S3_URL are set - nowhere to '
                           'upload to.  Hint: use'
                           'S3_URL=https://s3.us.archive.org')

    if mode == 's3': #parse IA-S3-specific options
        ia_collection = os.environ.get('IA_COLLECTION')
        if ia_collection is None:
            raise RuntimeError('Must specify IA_COLLECTION if using IA S3 '
                               '(hint: ArchiveBot)')

        ia_item_title = os.environ.get('IA_ITEM_TITLE')
        if ia_item_title is None:
            raise RuntimeError('Must specify IA_ITEM_TITLE if using IA S3 '
                               '(hint: "Archiveteam: Archivebot $pipeline_name '
                               'GO Pack")')

        ia_auth = os.environ.get('IA_AUTH')
        if ia_auth is None:
            raise RuntimeError('Must specify IA_AUTH if using IA S3 '
                               '(hint: access_key:secret_key)')

        ia_item_prefix = os.environ.get('IA_ITEM_PREFIX')
        if ia_auth is None:
            raise RuntimeError('Must specify IA_ITEM_PREFIX if using IA S3 '
                               '(hint: archiveteam_archivebot_go_$pipeline_name'
                               '_}')

        ia_access = os.environ.get('IA_ACCESS')
        if ia_access is None:
            raise RuntimeError('Must specify IA_ACCESS if using IA S3 '
                               '(hint: your access key)')

    print("CHECK THE UPLOAD TARGET: %s as %s endpoint" % (url, mode))
    print()
    print("Upload target must reliably store data")
    print("Each local file will removed after upload")
    print("Hit CTRL-C immediately if upload target is incorrect")
    print()

    uploading_dir = os.path.join(directory, "_uploading")
    try_mkdir(uploading_dir)

    while True:
        print("Waiting %d seconds" % (WAIT,))
        time.sleep(WAIT)

        fnames = sorted(list(f for f in os.listdir(directory) if should_upload(f)))
        if len(fnames):
            basename = fnames[0]
            fname_d = os.path.join(directory, basename)
            fname_u = os.path.join(uploading_dir, basename)
            if os.path.exists(fname_u):
                print("%r already exists - another uploader probably grabbed it" % (fname_u,))
                continue
            try:
                os.rename(fname_d, fname_u)
            except OSError:
                print("Could not rename %r - another uploader probably grabbed it" % (fname_d,))
            else:
                print("Uploading %r" % (fname_u,))

                item = parse_name(basename)

                if mode == 'rsync':
                    exit_code = subprocess.call([
                        "rsync", "-av", "--timeout=300", "--contimeout=300",
                        "--progress", fname_u, url])
                elif mode == 's3':
                    ia_upload_bucket = re.sub(r'[^0-9a-zA-Z-]+', '_', ia_item_prefix + '_' + item['dns'][-64:] + '_' + item['date'])
                    if ia_upload_allowed(url, ia_access, ia_upload_bucket): # IA is not throttling
                        # At some point, an ambitious person could try a file belonging in a different bucket if ia_upload_allowed denied this one
                        size_hint = str(os.stat(fname_u).st_size)
                        target = url + '/' + ia_upload_bucket + '/' + \
                                 re.sub(r'[^0-9a-zA-Z-.]+', '_', basename)[-64:]

                        exit_code = subprocess.call([
                            "curl", "-v", "--location", "--fail",
                            "--speed-limit", "1", "--speed-time", "900",
                            "--header", "x-archive-queue-derive:1",
                            "--header", "x-amz-auto-make-bucket:1",
                            "--header", "x-archive-meta-collection:" + ia_collection,
                            "--header", "x-archive-meta-mediatype:web",
                            "--header", "x-archive-meta-subject:archivebot",
                            "--header", "x-archive-meta-title:" + ia_item_title +
                            ' ' + item['dns'] + ' ' + item['date'],
                            "--header", "x-archive-meta-date:" +
                            item['date'][0:4] + '-' +
                            item['date'][4:6] + '-' +
                            item['date'][6:8],
                            "--header", "x-archive-size-hint:" + size_hint,
                            "--header", "authorization: LOW " + ia_auth,
                            "-o", "/dev/stdout",
                            "--upload-file", fname_u,
                            target])
                    else: # Cannot upload now, try again later
                        exit_code = 1
                else: #no upload mechanism available
                    exit_code = 1

                if exit_code == 0:
                    print("Removing %r" % (fname_u,))
                    os.remove(fname_u)
                else:
                    # Move it out of the _uploading directory so that this
                    # uploader (or another one) can try again.
                    os.rename(fname_u, fname_d)
        else:
            print("Nothing to upload")

Example 31

Project: ArchiveBot
Source File: uploader.py
View license
def main():
    if len(sys.argv) > 1:
        directory = sys.argv[1]
    elif os.environ.get('FINISHED_WARCS_DIR') != None:
        directory = os.environ['FINISHED_WARCS_DIR']
    else:
        raise RuntimeError('No directory specified (set FINISHED_WARCS_DIR '
                           'or specify directory on command line)')

    mode = None #modes: 'rsync', 's3'

    url = os.environ.get('RSYNC_URL')
    if url != None:
        if '/localhost' in url or '/127.' in url:
            raise RuntimeError('Won\'t let you upload to localhost because I '
                               'remove files after uploading them, and you '
                               'might be uploading to the same directory')
        mode = 'rsync'

    if url is None:
        url = os.environ.get('S3_URL')
        if url is not None:
            mode = 's3'

    if url is None:
        raise RuntimeError('Neither RSYNC_URL nor S3_URL are set - nowhere to '
                           'upload to.  Hint: use'
                           'S3_URL=https://s3.us.archive.org')

    if mode == 's3': #parse IA-S3-specific options
        ia_collection = os.environ.get('IA_COLLECTION')
        if ia_collection is None:
            raise RuntimeError('Must specify IA_COLLECTION if using IA S3 '
                               '(hint: ArchiveBot)')

        ia_item_title = os.environ.get('IA_ITEM_TITLE')
        if ia_item_title is None:
            raise RuntimeError('Must specify IA_ITEM_TITLE if using IA S3 '
                               '(hint: "Archiveteam: Archivebot $pipeline_name '
                               'GO Pack")')

        ia_auth = os.environ.get('IA_AUTH')
        if ia_auth is None:
            raise RuntimeError('Must specify IA_AUTH if using IA S3 '
                               '(hint: access_key:secret_key)')

        ia_item_prefix = os.environ.get('IA_ITEM_PREFIX')
        if ia_auth is None:
            raise RuntimeError('Must specify IA_ITEM_PREFIX if using IA S3 '
                               '(hint: archiveteam_archivebot_go_$pipeline_name'
                               '_}')

        ia_access = os.environ.get('IA_ACCESS')
        if ia_access is None:
            raise RuntimeError('Must specify IA_ACCESS if using IA S3 '
                               '(hint: your access key)')

    print("CHECK THE UPLOAD TARGET: %s as %s endpoint" % (url, mode))
    print()
    print("Upload target must reliably store data")
    print("Each local file will removed after upload")
    print("Hit CTRL-C immediately if upload target is incorrect")
    print()

    uploading_dir = os.path.join(directory, "_uploading")
    try_mkdir(uploading_dir)

    while True:
        print("Waiting %d seconds" % (WAIT,))
        time.sleep(WAIT)

        fnames = sorted(list(f for f in os.listdir(directory) if should_upload(f)))
        if len(fnames):
            basename = fnames[0]
            fname_d = os.path.join(directory, basename)
            fname_u = os.path.join(uploading_dir, basename)
            if os.path.exists(fname_u):
                print("%r already exists - another uploader probably grabbed it" % (fname_u,))
                continue
            try:
                os.rename(fname_d, fname_u)
            except OSError:
                print("Could not rename %r - another uploader probably grabbed it" % (fname_d,))
            else:
                print("Uploading %r" % (fname_u,))

                item = parse_name(basename)

                if mode == 'rsync':
                    exit_code = subprocess.call([
                        "rsync", "-av", "--timeout=300", "--contimeout=300",
                        "--progress", fname_u, url])
                elif mode == 's3':
                    ia_upload_bucket = re.sub(r'[^0-9a-zA-Z-]+', '_', ia_item_prefix + '_' + item['dns'][-64:] + '_' + item['date'])
                    if ia_upload_allowed(url, ia_access, ia_upload_bucket): # IA is not throttling
                        # At some point, an ambitious person could try a file belonging in a different bucket if ia_upload_allowed denied this one
                        size_hint = str(os.stat(fname_u).st_size)
                        target = url + '/' + ia_upload_bucket + '/' + \
                                 re.sub(r'[^0-9a-zA-Z-.]+', '_', basename)[-64:]

                        exit_code = subprocess.call([
                            "curl", "-v", "--location", "--fail",
                            "--speed-limit", "1", "--speed-time", "900",
                            "--header", "x-archive-queue-derive:1",
                            "--header", "x-amz-auto-make-bucket:1",
                            "--header", "x-archive-meta-collection:" + ia_collection,
                            "--header", "x-archive-meta-mediatype:web",
                            "--header", "x-archive-meta-subject:archivebot",
                            "--header", "x-archive-meta-title:" + ia_item_title +
                            ' ' + item['dns'] + ' ' + item['date'],
                            "--header", "x-archive-meta-date:" +
                            item['date'][0:4] + '-' +
                            item['date'][4:6] + '-' +
                            item['date'][6:8],
                            "--header", "x-archive-size-hint:" + size_hint,
                            "--header", "authorization: LOW " + ia_auth,
                            "-o", "/dev/stdout",
                            "--upload-file", fname_u,
                            target])
                    else: # Cannot upload now, try again later
                        exit_code = 1
                else: #no upload mechanism available
                    exit_code = 1

                if exit_code == 0:
                    print("Removing %r" % (fname_u,))
                    os.remove(fname_u)
                else:
                    # Move it out of the _uploading directory so that this
                    # uploader (or another one) can try again.
                    os.rename(fname_u, fname_d)
        else:
            print("Nothing to upload")

Example 32

Project: nuxeo-drive
Source File: common.py
View license
    def setUp(self):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        self.full_nuxeo_url = self.nuxeo_url
        if '#' in self.nuxeo_url:
            self.nuxeo_url = self.nuxeo_url.split('#')[0]
        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)

        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        self.version = nxdrive.__version__

        # Long timeout for the root client that is responsible for the test
        # environment set: this client is doing the first query on the Nuxeo
        # server and might need to wait for a long time without failing for
        # Nuxeo to finish initialize the repo on the first request after
        # startup
        root_remote_client = RemoteDocumentClient(
            self.nuxeo_url, self.admin_user,
            u'nxdrive-test-administrator-device', self.version,
            password=self.password, base_folder=u'/', timeout=60)

        # Call the Nuxeo operation to setup the integration test environment
        credentials = root_remote_client.execute(
            "NuxeoDrive.SetupIntegrationTests",
            userNames="user_1, user_2", permission='ReadWrite')

        credentials = [c.strip().split(u":") for c in credentials.split(u",")]
        self.user_1, self.password_1 = credentials[0]
        self.user_2, self.password_2 = credentials[1]

        ws_info = root_remote_client.fetch(TEST_WORKSPACE_PATH)
        self.workspace = ws_info[u'uid']
        self.workspace_title = ws_info[u'title']

        # Document client to be used to create remote test documents
        # and folders
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )

        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.root_remote_client = root_remote_client
        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self.local_client_1 = LocalClient(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = LocalClient(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))
        ndrive_path = os.path.dirname(nxdrive.__file__)
        self.ndrive_exec = os.path.join(ndrive_path, '..', 'scripts', 'ndrive.py')
        cmdline_options = '--log-level-console=%s' % DEFAULT_CONSOLE_LOG_LEVEL
        cmdline_options += ' --log-level-file=TRACE'
        cmdline_options += ' --nxdrive-home="%s"'
        if os.environ.get('PYDEV_DEBUG') == 'True':
            cmdline_options += ' --debug-pydev'
        self.ndrive_1_options = cmdline_options % self.nxdrive_conf_folder_1
        self.ndrive_2_options = cmdline_options % self.nxdrive_conf_folder_2

Example 33

Project: nuxeo-drive
Source File: common_unit_test.py
View license
    def setUpApp(self, server_profile=None):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')
        self.result = None
        self.tearedDown = False

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)
        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        from mock import Mock
        options = Mock()
        options.debug = False
        options.delay = TEST_DEFAULT_DELAY
        options.force_locale = None
        options.proxy_server = None
        options.log_level_file = None
        options.update_site_url = None
        options.beta_update_site_url = None
        options.autolock_interval = 30
        options.nxdrive_home = self.nxdrive_conf_folder_1
        self.manager_1 = Manager(options)
        self.connected = False
        import nxdrive
        nxdrive_path = os.path.dirname(nxdrive.__file__)
        i18n_path = os.path.join(nxdrive_path, 'tests', 'resources', "i18n.js")
        Translator(self.manager_1, i18n_path)
        options.nxdrive_home = self.nxdrive_conf_folder_2
        Manager._singleton = None
        self.manager_2 = Manager(options)
        self.version = __version__
        url = self.nuxeo_url
        log.debug("Will use %s as url", url)
        if '#' in url:
            # Remove the engine type for the rest of the test
            self.nuxeo_url = url.split('#')[0]
        self.setUpServer(server_profile)

        self.engine_1 = self.manager_1.bind_server(self.local_nxdrive_folder_1, url, self.user_1,
                                                   self.password_1, start_engine=False)
        self.engine_2 = self.manager_2.bind_server(self.local_nxdrive_folder_2, url, self.user_2,
                                                   self.password_2, start_engine=False)
        self.engine_1.syncCompleted.connect(self.app.sync_completed)
        self.engine_1.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_1.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_1.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.engine_2.syncCompleted.connect(self.app.sync_completed)
        self.engine_2.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_2.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_2.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.queue_manager_1 = self.engine_1.get_queue_manager()
        self.queue_manager_2 = self.engine_2.get_queue_manager()

        self.sync_root_folder_1 = os.path.join(self.local_nxdrive_folder_1, self.workspace_title_1)
        self.sync_root_folder_2 = os.path.join(self.local_nxdrive_folder_2, self.workspace_title_2)

        self.local_root_client_1 = self.engine_1.get_local_client()
        self.local_root_client_2 = self.engine_2.get_local_client()

        self.local_client_1 = self.get_local_client(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = self.get_local_client(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))

        # Document client to be used to create remote test documents
        # and folders
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace_1,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace_2,
            upload_tmp_dir=self.upload_tmp_dir)
        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )
        self.remote_restapi_client_admin = RestAPIClient(
            self.nuxeo_url, self.admin_user, u'nxdrive-test-device-2',
            self.version,
            password=self.password
        )

        # Register root
        remote_document_client_1.register_as_root(self.workspace_1)
        remote_document_client_2.register_as_root(self.workspace_2)

        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self._wait_sync = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._wait_remote_scan = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._remote_changes_count = {self.engine_1.get_uid(): 0, self.engine_2.get_uid(): 0}
        self._no_remote_changes = {self.engine_1.get_uid(): False, self.engine_2.get_uid(): False}

Example 34

Project: nuxeo-drive
Source File: common.py
View license
    def setUp(self):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        self.full_nuxeo_url = self.nuxeo_url
        if '#' in self.nuxeo_url:
            self.nuxeo_url = self.nuxeo_url.split('#')[0]
        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)

        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        self.version = nxdrive.__version__

        # Long timeout for the root client that is responsible for the test
        # environment set: this client is doing the first query on the Nuxeo
        # server and might need to wait for a long time without failing for
        # Nuxeo to finish initialize the repo on the first request after
        # startup
        root_remote_client = RemoteDocumentClient(
            self.nuxeo_url, self.admin_user,
            u'nxdrive-test-administrator-device', self.version,
            password=self.password, base_folder=u'/', timeout=60)

        # Call the Nuxeo operation to setup the integration test environment
        credentials = root_remote_client.execute(
            "NuxeoDrive.SetupIntegrationTests",
            userNames="user_1, user_2", permission='ReadWrite')

        credentials = [c.strip().split(u":") for c in credentials.split(u",")]
        self.user_1, self.password_1 = credentials[0]
        self.user_2, self.password_2 = credentials[1]

        ws_info = root_remote_client.fetch(TEST_WORKSPACE_PATH)
        self.workspace = ws_info[u'uid']
        self.workspace_title = ws_info[u'title']

        # Document client to be used to create remote test documents
        # and folders
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace,
            upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )

        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.root_remote_client = root_remote_client
        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self.local_client_1 = LocalClient(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = LocalClient(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))
        ndrive_path = os.path.dirname(nxdrive.__file__)
        self.ndrive_exec = os.path.join(ndrive_path, '..', 'scripts', 'ndrive.py')
        cmdline_options = '--log-level-console=%s' % DEFAULT_CONSOLE_LOG_LEVEL
        cmdline_options += ' --log-level-file=TRACE'
        cmdline_options += ' --nxdrive-home="%s"'
        if os.environ.get('PYDEV_DEBUG') == 'True':
            cmdline_options += ' --debug-pydev'
        self.ndrive_1_options = cmdline_options % self.nxdrive_conf_folder_1
        self.ndrive_2_options = cmdline_options % self.nxdrive_conf_folder_2

Example 35

Project: nuxeo-drive
Source File: common_unit_test.py
View license
    def setUpApp(self, server_profile=None):
        # Check the Nuxeo server test environment
        self.nuxeo_url = os.environ.get('NXDRIVE_TEST_NUXEO_URL')
        self.admin_user = os.environ.get('NXDRIVE_TEST_USER')
        self.password = os.environ.get('NXDRIVE_TEST_PASSWORD')
        self.build_workspace = os.environ.get('WORKSPACE')
        self.result = None
        self.tearedDown = False

        # Take default parameter if none has been set
        if self.nuxeo_url is None:
            self.nuxeo_url = "http://localhost:8080/nuxeo"
        if self.admin_user is None:
            self.admin_user = "Administrator"
        if self.password is None:
            self.password = "Administrator"
        self.tmpdir = None
        if self.build_workspace is not None:
            self.tmpdir = os.path.join(self.build_workspace, "tmp")
            if not os.path.isdir(self.tmpdir):
                os.makedirs(self.tmpdir)
        self.upload_tmp_dir = tempfile.mkdtemp(u'-nxdrive-uploads', dir=self.tmpdir)

        if None in (self.nuxeo_url, self.admin_user, self.password):
            raise unittest.SkipTest(
                "No integration server configuration found in environment.")

        # Check the local filesystem test environment
        self.local_test_folder_1 = tempfile.mkdtemp(u'drive-1', dir=self.tmpdir)
        self.local_test_folder_2 = tempfile.mkdtemp(u'drive-2', dir=self.tmpdir)

        self.local_nxdrive_folder_1 = os.path.join(
            self.local_test_folder_1, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_1)
        self.local_nxdrive_folder_2 = os.path.join(
            self.local_test_folder_2, u'Nuxeo Drive')
        os.mkdir(self.local_nxdrive_folder_2)

        self.nxdrive_conf_folder_1 = os.path.join(
            self.local_test_folder_1, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_1)
        self.nxdrive_conf_folder_2 = os.path.join(
            self.local_test_folder_2, u'nuxeo-drive-conf')
        os.mkdir(self.nxdrive_conf_folder_2)

        from mock import Mock
        options = Mock()
        options.debug = False
        options.delay = TEST_DEFAULT_DELAY
        options.force_locale = None
        options.proxy_server = None
        options.log_level_file = None
        options.update_site_url = None
        options.beta_update_site_url = None
        options.autolock_interval = 30
        options.nxdrive_home = self.nxdrive_conf_folder_1
        self.manager_1 = Manager(options)
        self.connected = False
        import nxdrive
        nxdrive_path = os.path.dirname(nxdrive.__file__)
        i18n_path = os.path.join(nxdrive_path, 'tests', 'resources', "i18n.js")
        Translator(self.manager_1, i18n_path)
        options.nxdrive_home = self.nxdrive_conf_folder_2
        Manager._singleton = None
        self.manager_2 = Manager(options)
        self.version = __version__
        url = self.nuxeo_url
        log.debug("Will use %s as url", url)
        if '#' in url:
            # Remove the engine type for the rest of the test
            self.nuxeo_url = url.split('#')[0]
        self.setUpServer(server_profile)

        self.engine_1 = self.manager_1.bind_server(self.local_nxdrive_folder_1, url, self.user_1,
                                                   self.password_1, start_engine=False)
        self.engine_2 = self.manager_2.bind_server(self.local_nxdrive_folder_2, url, self.user_2,
                                                   self.password_2, start_engine=False)
        self.engine_1.syncCompleted.connect(self.app.sync_completed)
        self.engine_1.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_1.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_1.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.engine_2.syncCompleted.connect(self.app.sync_completed)
        self.engine_2.get_remote_watcher().remoteScanFinished.connect(self.app.remote_scan_completed)
        self.engine_2.get_remote_watcher().changesFound.connect(self.app.remote_changes_found)
        self.engine_2.get_remote_watcher().noChangesFound.connect(self.app.no_remote_changes_found)
        self.queue_manager_1 = self.engine_1.get_queue_manager()
        self.queue_manager_2 = self.engine_2.get_queue_manager()

        self.sync_root_folder_1 = os.path.join(self.local_nxdrive_folder_1, self.workspace_title_1)
        self.sync_root_folder_2 = os.path.join(self.local_nxdrive_folder_2, self.workspace_title_2)

        self.local_root_client_1 = self.engine_1.get_local_client()
        self.local_root_client_2 = self.engine_2.get_local_client()

        self.local_client_1 = self.get_local_client(os.path.join(self.local_nxdrive_folder_1, self.workspace_title))
        self.local_client_2 = self.get_local_client(os.path.join(self.local_nxdrive_folder_2, self.workspace_title))

        # Document client to be used to create remote test documents
        # and folders
        remote_document_client_1 = RemoteDocumentClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, base_folder=self.workspace_1,
            upload_tmp_dir=self.upload_tmp_dir)

        remote_document_client_2 = RemoteDocumentClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, base_folder=self.workspace_2,
            upload_tmp_dir=self.upload_tmp_dir)
        # File system client to be used to create remote test documents
        # and folders
        remote_file_system_client_1 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1, upload_tmp_dir=self.upload_tmp_dir)

        remote_file_system_client_2 = RemoteFileSystemClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2, upload_tmp_dir=self.upload_tmp_dir)

        self.remote_restapi_client_1 = RestAPIClient(
            self.nuxeo_url, self.user_1, u'nxdrive-test-device-1',
            self.version,
            password=self.password_1
        )
        self.remote_restapi_client_2 = RestAPIClient(
            self.nuxeo_url, self.user_2, u'nxdrive-test-device-2',
            self.version,
            password=self.password_2
        )
        self.remote_restapi_client_admin = RestAPIClient(
            self.nuxeo_url, self.admin_user, u'nxdrive-test-device-2',
            self.version,
            password=self.password
        )

        # Register root
        remote_document_client_1.register_as_root(self.workspace_1)
        remote_document_client_2.register_as_root(self.workspace_2)

        self.remote_document_client_1 = remote_document_client_1
        self.remote_document_client_2 = remote_document_client_2
        self.remote_file_system_client_1 = remote_file_system_client_1
        self.remote_file_system_client_2 = remote_file_system_client_2

        self._wait_sync = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._wait_remote_scan = {self.engine_1.get_uid(): True, self.engine_2.get_uid(): True}
        self._remote_changes_count = {self.engine_1.get_uid(): 0, self.engine_2.get_uid(): 0}
        self._no_remote_changes = {self.engine_1.get_uid(): False, self.engine_2.get_uid(): False}

Example 36

Project: opbeat_python
Source File: base.py
View license
    def __init__(self, organization_id=None, app_id=None, secret_token=None,
                 transport_class=None, include_paths=None, exclude_paths=None,
                 timeout=None, hostname=None, auto_log_stacks=None, key=None,
                 string_max_length=None, list_max_length=None, processors=None,
                 filter_exception_types=None, servers=None, api_path=None,
                 async=None, async_mode=None, traces_send_freq_secs=None,
                 transactions_ignore_patterns=None, framework_version='',
                 **kwargs):
        # configure loggers first
        cls = self.__class__
        self.logger = logging.getLogger('%s.%s' % (cls.__module__,
            cls.__name__))
        self.error_logger = logging.getLogger('opbeat.errors')
        self.state = ClientState()

        if organization_id is None and os.environ.get('OPBEAT_ORGANIZATION_ID'):
            msg = "Configuring opbeat from environment variable 'OPBEAT_ORGANIZATION_ID'"
            self.logger.info(msg)
            organization_id = os.environ['OPBEAT_ORGANIZATION_ID']

        if app_id is None and os.environ.get('OPBEAT_APP_ID'):
            msg = "Configuring opbeat from environment variable 'OPBEAT_APP_ID'"
            self.logger.info(msg)
            app_id = os.environ['OPBEAT_APP_ID']

        if secret_token is None and os.environ.get('OPBEAT_SECRET_TOKEN'):
            msg = "Configuring opbeat from environment variable 'OPBEAT_SECRET_TOKEN'"
            self.logger.info(msg)
            secret_token = os.environ['OPBEAT_SECRET_TOKEN']

        self.servers = servers or defaults.SERVERS
        if async is not None and async_mode is None:
            warnings.warn(
                'Usage of "async" argument is deprecated. Use "async_mode"',
                category=DeprecationWarning,
                stacklevel=2,
            )
            async_mode = async
        self.async_mode = (async_mode is True
                           or (defaults.ASYNC_MODE and async_mode is not False))
        if not transport_class:
            transport_class = (defaults.ASYNC_TRANSPORT_CLASS
                               if self.async_mode
                               else defaults.SYNC_TRANSPORT_CLASS)
        self._transport_class = import_string(transport_class)
        self._transports = {}

        # servers may be set to a NoneType (for Django)
        if self.servers and not (organization_id and app_id and secret_token):
            msg = 'Missing configuration for Opbeat client. Please see documentation.'
            self.logger.info(msg)

        self.is_send_disabled = (
            os.environ.get('OPBEAT_DISABLE_SEND', '').lower() in ('1', 'true')
        )
        if self.is_send_disabled:
            self.logger.info(
                'Not sending any data to Opbeat due to OPBEAT_DISABLE_SEND '
                'environment variable'
            )

        self.include_paths = set(include_paths or defaults.INCLUDE_PATHS)
        self.exclude_paths = set(exclude_paths or defaults.EXCLUDE_PATHS)
        self.timeout = int(timeout or defaults.TIMEOUT)
        self.hostname = six.text_type(hostname or defaults.HOSTNAME)
        self.auto_log_stacks = bool(auto_log_stacks or
                                    defaults.AUTO_LOG_STACKS)

        self.string_max_length = int(string_max_length or
                                     defaults.MAX_LENGTH_STRING)
        self.list_max_length = int(list_max_length or defaults.MAX_LENGTH_LIST)
        self.traces_send_freq_secs = (traces_send_freq_secs or
                                      defaults.TRACES_SEND_FREQ_SECS)

        self.organization_id = six.text_type(organization_id)
        self.app_id = six.text_type(app_id)
        self.secret_token = six.text_type(secret_token)

        self.filter_exception_types_dict = {}
        for exc_to_filter in (filter_exception_types or []):
            exc_to_filter_type = exc_to_filter.split(".")[-1]
            exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
            self.filter_exception_types_dict[exc_to_filter_type] = exc_to_filter_module

        if processors is None:
            self.processors = defaults.PROCESSORS
        else:
            self.processors = processors

        self._framework_version = framework_version

        self.module_cache = ModuleProxyCache()

        self.instrumentation_store = RequestsStore(
            lambda: self.get_stack_info_for_trace(iter_stack_frames(), False),
            self.traces_send_freq_secs,
            transactions_ignore_patterns
        )
        atexit_register(self.close)

Example 37

Project: fuel-devops
Source File: shell.py
View license
    def get_params(self):
        name_parser = argparse.ArgumentParser(add_help=False)

        name_parser.add_argument('name', help='environment name',
                                 default=os.environ.get('ENV_NAME'),
                                 metavar='ENV_NAME')
        group_name_parser = argparse.ArgumentParser(add_help=False)

        group_name_parser.add_argument('--group-name', help='group name',
                                       default='default')
        env_config_name_parser = argparse.ArgumentParser(add_help=False)
        env_config_name_parser.add_argument('env_config_name',
                                            help='environment template name',
                                            default=os.environ.get(
                                                'DEVOPS_SETTINGS_TEMPLATE'))

        snapshot_name_parser = argparse.ArgumentParser(add_help=False)
        snapshot_name_parser.add_argument('snapshot_name',
                                          help='snapshot name',
                                          default=os.environ.get(
                                              'SNAPSHOT_NAME'))

        node_name_parser = argparse.ArgumentParser(add_help=False)
        node_name_parser.add_argument('--node-name', '-N',
                                      help='node name',
                                      default=None)
        no_timesync_parser = argparse.ArgumentParser(add_help=False)
        no_timesync_parser.add_argument('--no-timesync', dest='no_timesync',
                                        action='store_const', const=True,
                                        help='revert without timesync',
                                        default=False)

        list_ips_parser = argparse.ArgumentParser(add_help=False)
        list_ips_parser.add_argument('--ips', dest='list_ips',
                                     action='store_const', const=True,
                                     help='show admin node ip addresses',
                                     default=False)
        timestamps_parser = argparse.ArgumentParser(add_help=False)
        timestamps_parser.add_argument('--timestamps', dest='timestamps',
                                       action='store_const', const=True,
                                       help='show creation timestamps',
                                       default=False)
        iso_path_parser = argparse.ArgumentParser(add_help=False)
        iso_path_parser.add_argument('--iso-path', '-I', dest='iso_path',
                                     help='Set Fuel ISO path',
                                     required=True)
        admin_ram_parser = argparse.ArgumentParser(add_help=False)
        admin_ram_parser.add_argument('--admin-ram', dest='admin_ram_size',
                                      help='Select admin node RAM size (MB)',
                                      default=1536, type=int)
        admin_vcpu_parser = argparse.ArgumentParser(add_help=False)
        admin_vcpu_parser.add_argument('--admin-vcpu', dest='admin_vcpu_count',
                                       help='Select admin node VCPU count',
                                       default=2, type=int)
        change_admin_ram_parser = argparse.ArgumentParser(add_help=False)
        change_admin_ram_parser.add_argument('--admin-ram',
                                             dest='admin_ram_size',
                                             help='Select admin node RAM '
                                             'size (MB)',
                                             default=None, type=int)
        change_admin_vcpu_parser = argparse.ArgumentParser(add_help=False)
        change_admin_vcpu_parser.add_argument('--admin-vcpu',
                                              dest='admin_vcpu_count',
                                              help='Select admin node VCPU '
                                              'count',
                                              default=None, type=int)
        admin_disk_size_parser = argparse.ArgumentParser(add_help=False)
        admin_disk_size_parser.add_argument('--admin-disk-size',
                                            dest='admin_disk_size',
                                            help='Set admin node disk '
                                                 'size (GB)',
                                            default=50, type=int)
        admin_setup_iface_parser = argparse.ArgumentParser(add_help=False)
        admin_setup_iface_parser.add_argument('--iface',
                                              dest='iface',
                                              help='Static network interface '
                                                   'to use when configuring '
                                                   'the admin node. Should '
                                                   'be eth0 or enp0s3',
                                              default='enp0s3')
        admin_setup_boot_from_parser = argparse.ArgumentParser(add_help=False)
        admin_setup_boot_from_parser.add_argument(
            '--boot-from', dest='boot_from', default='cdrom',
            help='Set device to boot from for admin node. '
            'Should be cdrom or usb')
        ram_parser = argparse.ArgumentParser(add_help=False)
        ram_parser.add_argument('--ram', dest='ram_size',
                                help='Set node RAM size',
                                default=1024, type=int)
        vcpu_parser = argparse.ArgumentParser(add_help=False)
        vcpu_parser.add_argument('--vcpu', dest='vcpu_count',
                                 help='Set node VCPU count',
                                 default=1, type=int)

        change_ram_parser = argparse.ArgumentParser(add_help=False)
        change_ram_parser.add_argument('--ram', dest='ram_size',
                                       help='Set node RAM size',
                                       default=None, type=int)
        change_vcpu_parser = argparse.ArgumentParser(add_help=False)
        change_vcpu_parser.add_argument('--vcpu', dest='vcpu_count',
                                        help='Set node VCPU count',
                                        default=None, type=int)
        node_count = argparse.ArgumentParser(add_help=False)
        node_count.add_argument('--node-count', '-C', dest='node_count',
                                help='How many nodes will be created',
                                default=1, type=int)
        net_pool = argparse.ArgumentParser(add_help=False)
        net_pool.add_argument('--net-pool', '-P', dest='net_pool',
                              help='Set ip network pool (cidr)',
                              default="10.21.0.0/16:24", type=str)
        second_disk_size = argparse.ArgumentParser(add_help=False)
        second_disk_size.add_argument('--second-disk-size',
                                      dest='second_disk_size',
                                      help='Allocate second disk for node '
                                           'with selected size(GB). '
                                           'If set to 0, the disk will not be '
                                           'allocated',
                                      default=50, type=int)
        third_disk_size = argparse.ArgumentParser(add_help=False)
        third_disk_size.add_argument('--third-disk-size',
                                     dest='third_disk_size',
                                     help='Allocate the third disk for node '
                                          'with selected size(GB). '
                                          'If set to 0, the disk will not be '
                                          'allocated',
                                     default=50, type=int)
        parser = argparse.ArgumentParser(
            description="Manage virtual environments. "
                        "For additional help, use with -h/--help option")
        subparsers = parser.add_subparsers(title="Operation commands",
                                           help='available commands',
                                           dest='command')
        subparsers.add_parser('list',
                              parents=[list_ips_parser, timestamps_parser],
                              help="Show virtual environments",
                              description="Show virtual environments on host")
        subparsers.add_parser('show', parents=[name_parser],
                              help="Show VMs in environment",
                              description="Show VMs in environment")
        subparsers.add_parser('erase', parents=[name_parser],
                              help="Delete environment",
                              description="Delete environment and VMs on it")
        subparsers.add_parser('start', parents=[name_parser],
                              help="Start VMs",
                              description="Start VMs in selected environment")
        subparsers.add_parser('destroy', parents=[name_parser],
                              help="Destroy(stop) VMs",
                              description="Stop VMs in selected environment")
        subparsers.add_parser('suspend', parents=[name_parser],
                              help="Suspend VMs",
                              description="Suspend VMs in selected "
                              "environment")
        subparsers.add_parser('resume', parents=[name_parser],
                              help="Resume VMs",
                              description="Resume VMs in selected environment")
        subparsers.add_parser('revert',
                              parents=[name_parser, snapshot_name_parser],
                              help="Apply snapshot to environment",
                              description="Apply selected snapshot to "
                              "environment")
        subparsers.add_parser('snapshot',
                              parents=[name_parser, snapshot_name_parser],
                              help="Make environment snapshot",
                              description="Make environment snapshot")
        subparsers.add_parser('sync',
                              help="Synchronization environment and devops",
                              description="Synchronization environment "
                              "and devops"),
        subparsers.add_parser('snapshot-list',
                              parents=[name_parser],
                              help="Show snapshots in environment",
                              description="Show snapshots in selected "
                              "environment")
        subparsers.add_parser('snapshot-delete',
                              parents=[name_parser, snapshot_name_parser],
                              help="Delete snapshot from environment",
                              description="Delete snapshot from selected "
                              "environment")
        subparsers.add_parser('net-list',
                              parents=[name_parser],
                              help="Show networks in environment",
                              description="Display allocated networks for "
                              "environment")
        subparsers.add_parser('time-sync',
                              parents=[name_parser, node_name_parser],
                              help="Sync time on all env nodes",
                              description="Sync time on all active nodes "
                                          "of environment starting from "
                                          "admin")
        subparsers.add_parser('revert-resume',
                              parents=[name_parser, snapshot_name_parser,
                                       node_name_parser, no_timesync_parser],
                              help="Revert, resume, sync time on VMs",
                              description="Revert and resume VMs in selected"
                                          "environment, then"
                                          " sync time on VMs")
        subparsers.add_parser('version',
                              help="Show devops version")
        subparsers.add_parser('create',
                              parents=[name_parser, vcpu_parser,
                                       node_count, ram_parser,
                                       net_pool, iso_path_parser,
                                       admin_disk_size_parser,
                                       admin_ram_parser,
                                       admin_vcpu_parser,
                                       second_disk_size,
                                       third_disk_size],
                              help="Create a new environment (DEPRECATED)",
                              description="Create an environment by using "
                                          "cli options"),
        subparsers.add_parser('create-env',
                              parents=[env_config_name_parser],
                              help="Create a new environment",
                              description="Create an environment from a "
                                          "template file"),
        subparsers.add_parser('slave-add',
                              parents=[name_parser, node_count,
                                       ram_parser, vcpu_parser,
                                       second_disk_size, third_disk_size,
                                       group_name_parser],
                              help="Add a node",
                              description="Add a new node to environment")
        subparsers.add_parser('slave-change',
                              parents=[name_parser, node_name_parser,
                                       change_ram_parser, change_vcpu_parser],
                              help="Change node VCPU and memory config",
                              description="Change count of VCPUs and memory")
        subparsers.add_parser('slave-remove',
                              parents=[name_parser, node_name_parser],
                              help="Remove node from environment",
                              description="Remove selected node from "
                              "environment")
        subparsers.add_parser('admin-setup',
                              parents=[name_parser, admin_setup_iface_parser,
                                       admin_setup_boot_from_parser],
                              help="Setup admin node",
                              description="Setup admin node from ISO")
        subparsers.add_parser('admin-change',
                              parents=[name_parser, change_admin_ram_parser,
                                       change_admin_vcpu_parser],
                              help="Change admin node VCPU and memory config",
                              description="Change count of VCPUs and memory "
                                          "for admin node")
        subparsers.add_parser('node-start',
                              parents=[name_parser, node_name_parser],
                              help="Start node in environment",
                              description="Start a separate node in "
                                          "environment")
        subparsers.add_parser('node-destroy',
                              parents=[name_parser, node_name_parser],
                              help="Destroy (power off) node in environment",
                              description="Destroy a separate node in "
                                          "environment")
        subparsers.add_parser('node-reset',
                              parents=[name_parser, node_name_parser],
                              help="Reset (restart) node in environment",
                              description="Reset a separate node in "
                                          "environment")
        if len(self.args) == 0:
            self.args = ['-h']
        return parser.parse_args(self.args)

Example 38

Project: python-novaclient
Source File: base.py
View license
    def setUp(self):
        super(ClientTestBase, self).setUp()

        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            test_timeout = 0
        if test_timeout > 0:
            self.useFixture(fixtures.Timeout(test_timeout, gentle=True))

        if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
                os.environ.get('OS_STDOUT_CAPTURE') == '1'):
            stdout = self.useFixture(fixtures.StringStream('stdout')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
        if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
                os.environ.get('OS_STDERR_CAPTURE') == '1'):
            stderr = self.useFixture(fixtures.StringStream('stderr')).stream
            self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))

        if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
                os.environ.get('OS_LOG_CAPTURE') != '0'):
            self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
                                                   format=self.log_format,
                                                   level=None))

        # Collecting of credentials:
        #
        # Grab the cloud config from a user's clouds.yaml file.
        # First look for a functional_admin cloud, as this is a cloud
        # that the user may have defined for functional testing that has
        # admin credentials.
        # If that is not found, get the devstack config and override the
        # username and project_name to be admin so that admin credentials
        # will be used.
        #
        # Finally, fall back to looking for environment variables to support
        # existing users running these the old way. We should deprecate that
        # as tox 2.0 blanks out environment.
        #
        # TODO(sdague): while we collect this information in
        # tempest-lib, we do it in a way that's not available for top
        # level tests. Long term this probably needs to be in the base
        # class.
        openstack_config = os_client_config.config.OpenStackConfig()
        try:
            cloud_config = openstack_config.get_one_cloud('functional_admin')
        except os_client_config.exceptions.OpenStackConfigException:
            try:
                cloud_config = openstack_config.get_one_cloud(
                    'devstack', auth=dict(
                        username='admin', project_name='admin'))
            except os_client_config.exceptions.OpenStackConfigException:
                try:
                    cloud_config = openstack_config.get_one_cloud('envvars')
                except os_client_config.exceptions.OpenStackConfigException:
                    cloud_config = None

        if cloud_config is None:
            raise NoCloudConfigException(
                "Could not find a cloud named functional_admin or a cloud"
                " named devstack. Please check your clouds.yaml file and"
                " try again.")
        auth_info = cloud_config.config['auth']

        user = auth_info['username']
        passwd = auth_info['password']
        tenant = auth_info['project_name']
        auth_url = auth_info['auth_url']
        user_domain_id = auth_info['user_domain_id']
        self.project_domain_id = auth_info['project_domain_id']

        if 'insecure' in cloud_config.config:
            self.insecure = cloud_config.config['insecure']
        else:
            self.insecure = False

        auth = identity.Password(username=user,
                                 password=passwd,
                                 project_name=tenant,
                                 auth_url=auth_url,
                                 project_domain_id=self.project_domain_id,
                                 user_domain_id=user_domain_id)
        session = ksession.Session(auth=auth, verify=(not self.insecure))

        self.client = self._get_novaclient(session)

        self.glance = glanceclient.Client('2', session=session)

        # pick some reasonable flavor / image combo
        if "flavor" not in CACHE:
            CACHE["flavor"] = pick_flavor(self.client.flavors.list())
        if "image" not in CACHE:
            CACHE["image"] = pick_image(self.glance.images.list())
        self.flavor = CACHE["flavor"]
        self.image = CACHE["image"]

        if "network" not in CACHE:
            tested_api_version = self.client.api_version
            proxy_api_version = novaclient.api_versions.APIVersion('2.35')
            if tested_api_version > proxy_api_version:
                self.client.api_version = proxy_api_version
            try:
                # TODO(mriedem): Get the networks from neutron if using neutron
                CACHE["network"] = pick_network(self.client.networks.list())
            finally:
                self.client.api_version = tested_api_version
        self.network = CACHE["network"]

        # create a CLI client in case we'd like to do CLI
        # testing. tempest.lib does this really weird thing where it
        # builds a giant factory of all the CLIs that it knows
        # about. Eventually that should really be unwound into
        # something more sensible.
        cli_dir = os.environ.get(
            'OS_NOVACLIENT_EXEC_DIR',
            os.path.join(os.path.abspath('.'), '.tox/functional/bin'))

        self.cli_clients = tempest.lib.cli.base.CLIClient(
            username=user,
            password=passwd,
            tenant_name=tenant,
            uri=auth_url,
            cli_dir=cli_dir,
            insecure=self.insecure)

        self.keystone = keystoneclient.Client(session=session,
                                              username=user,
                                              password=passwd)
        self.cinder = cinderclient.Client(auth=auth, session=session)

        if "use_neutron" not in CACHE:
            # check to see if we're running with neutron or not
            for service in self.keystone.services.list():
                if service.type == 'network':
                    CACHE["use_neutron"] = True
                    break
            else:
                CACHE["use_neutron"] = False

Example 39

Project: swift
Source File: __init__.py
View license
def in_process_setup(the_object_server=object_server):
    _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
    _info('Using object_server class: %s' % the_object_server.__name__)
    conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
    show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS')

    if conf_src_dir is not None:
        if not os.path.isdir(conf_src_dir):
            msg = 'Config source %s is not a dir' % conf_src_dir
            raise InProcessException(msg)
        _info('Using config source dir: %s' % conf_src_dir)

    # If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
    # prefer config files from there, otherwise read config from source tree
    # sample files. A mixture of files from the two sources is allowed.
    proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
    _info('Using proxy config from %s' % proxy_conf)
    swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
    _info('Using swift config from %s' % swift_conf_src)

    monkey_patch_mimetools()

    global _testdir
    _testdir = os.path.join(mkdtemp(), 'tmp_functional')
    utils.mkdirs(_testdir)
    rmtree(_testdir)
    utils.mkdirs(os.path.join(_testdir, 'sda1'))
    utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))

    # Call the associated method for the value of
    # 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists
    conf_loader_label = os.environ.get(
        'SWIFT_TEST_IN_PROCESS_CONF_LOADER')
    if conf_loader_label is not None:
        try:
            conf_loader = conf_loaders[conf_loader_label]
            _debug('Calling method %s mapped to conf loader %s' %
                   (conf_loader.__name__, conf_loader_label))
        except KeyError as missing_key:
            raise InProcessException('No function mapped for conf loader %s' %
                                     missing_key)

        try:
            # Pass-in proxy_conf
            proxy_conf = conf_loader(proxy_conf)
            _debug('Now using proxy conf %s' % proxy_conf)
        except Exception as err:  # noqa
            raise InProcessException(err)

    swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
    obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)

    global orig_swift_conf_name
    orig_swift_conf_name = utils.SWIFT_CONF_FILE
    utils.SWIFT_CONF_FILE = swift_conf
    constraints.reload_constraints()
    storage_policy.SWIFT_CONF_FILE = swift_conf
    storage_policy.reload_storage_policies()
    global config
    if constraints.SWIFT_CONSTRAINTS_LOADED:
        # Use the swift constraints that are loaded for the test framework
        # configuration
        _c = dict((k, str(v))
                  for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
        config.update(_c)
    else:
        # In-process swift constraints were not loaded, somethings wrong
        raise SkipTest
    global orig_hash_path_suff_pref
    orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
    utils.validate_hash_conf()

    global _test_socks
    _test_socks = []
    # We create the proxy server listening socket to get its port number so
    # that we can add it as the "auth_port" value for the functional test
    # clients.
    prolis = eventlet.listen(('localhost', 0))
    _test_socks.append(prolis)

    # The following set of configuration values is used both for the
    # functional test frame work and for the various proxy, account, container
    # and object servers.
    config.update({
        # Values needed by the various in-process swift servers
        'devices': _testdir,
        'swift_dir': _testdir,
        'mount_check': 'false',
        'client_timeout': '4',
        'allow_account_management': 'true',
        'account_autocreate': 'true',
        'allow_versions': 'True',
        'allow_versioned_writes': 'True',
        # Below are values used by the functional test framework, as well as
        # by the various in-process swift servers
        'auth_host': '127.0.0.1',
        'auth_port': str(prolis.getsockname()[1]),
        'auth_ssl': 'no',
        'auth_prefix': '/auth/',
        # Primary functional test account (needs admin access to the
        # account)
        'account': 'test',
        'username': 'tester',
        'password': 'testing',
        # User on a second account (needs admin access to the account)
        'account2': 'test2',
        'username2': 'tester2',
        'password2': 'testing2',
        # User on same account as first, but without admin access
        'username3': 'tester3',
        'password3': 'testing3',
        # Service user and prefix (emulates glance, cinder, etc. user)
        'account5': 'test5',
        'username5': 'tester5',
        'password5': 'testing5',
        'service_prefix': 'SERVICE',
        # For tempauth middleware. Update reseller_prefix
        'reseller_prefix': 'AUTH, SERVICE',
        'SERVICE_require_group': 'service',
        # Reseller admin user (needs reseller_admin_role)
        'account6': 'test6',
        'username6': 'tester6',
        'password6': 'testing6'
    })

    # If an env var explicitly specifies the proxy-server object_post_as_copy
    # option then use its value, otherwise leave default config unchanged.
    object_post_as_copy = os.environ.get(
        'SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY')
    if object_post_as_copy is not None:
        object_post_as_copy = config_true_value(object_post_as_copy)
        config['object_post_as_copy'] = str(object_post_as_copy)
        _debug('Setting object_post_as_copy to %r' % object_post_as_copy)

    acc1lis = eventlet.listen(('localhost', 0))
    acc2lis = eventlet.listen(('localhost', 0))
    con1lis = eventlet.listen(('localhost', 0))
    con2lis = eventlet.listen(('localhost', 0))
    _test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets

    account_ring_path = os.path.join(_testdir, 'account.ring.gz')
    with closing(GzipFile(account_ring_path, 'wb')) as f:
        pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                    [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                      'port': acc1lis.getsockname()[1]},
                     {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                      'port': acc2lis.getsockname()[1]}], 30),
                    f)
    container_ring_path = os.path.join(_testdir, 'container.ring.gz')
    with closing(GzipFile(container_ring_path, 'wb')) as f:
        pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                    [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                      'port': con1lis.getsockname()[1]},
                     {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                      'port': con2lis.getsockname()[1]}], 30),
                    f)

    eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
    # Turn off logging requests by the underlying WSGI software.
    eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
    logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
    # Redirect logging other messages by the underlying WSGI software.
    eventlet.wsgi.HttpProtocol.log_message = \
        lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
    # Default to only 4 seconds for in-process functional test runs
    eventlet.wsgi.WRITE_TIMEOUT = 4

    def get_logger_name(name):
        if show_debug_logs:
            return debug_logger(name)
        else:
            return None

    acc1srv = account_server.AccountController(
        config, logger=get_logger_name('acct1'))
    acc2srv = account_server.AccountController(
        config, logger=get_logger_name('acct2'))
    con1srv = container_server.ContainerController(
        config, logger=get_logger_name('cont1'))
    con2srv = container_server.ContainerController(
        config, logger=get_logger_name('cont2'))

    objsrvs = [
        (obj_sockets[index],
         the_object_server.ObjectController(
             config, logger=get_logger_name('obj%d' % (index + 1))))
        for index in range(len(obj_sockets))
    ]

    if show_debug_logs:
        logger = debug_logger('proxy')

    def get_logger(name, *args, **kwargs):
        return logger

    with mock.patch('swift.common.utils.get_logger', get_logger):
        with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
                        FakeMemcacheMiddleware):
            try:
                app = loadapp(proxy_conf, global_conf=config)
            except Exception as e:
                raise InProcessException(e)

    nl = utils.NullLogger()
    global proxy_srv
    proxy_srv = prolis
    prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
    acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
    acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
    con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
    con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)

    objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
              for objsrv in objsrvs]

    global _test_coros
    _test_coros = \
        (prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)

    # Create accounts "test" and "test2"
    def create_account(act):
        ts = utils.normalize_timestamp(time())
        account_ring = Ring(_testdir, ring_name='account')
        partition, nodes = account_ring.get_nodes(act)
        for node in nodes:
            # Note: we are just using the http_connect method in the object
            # controller here to talk to the account server nodes.
            conn = swift.proxy.controllers.obj.http_connect(
                node['ip'], node['port'], node['device'], partition, 'PUT',
                '/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
            resp = conn.getresponse()
            assert(resp.status == 201)

    create_account('AUTH_test')
    create_account('AUTH_test2')

Example 40

Project: swift
Source File: __init__.py
View license
def setup_package():

    global policy_specified
    policy_specified = os.environ.get('SWIFT_TEST_POLICY')
    in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
    if in_process_env is not None:
        use_in_process = utils.config_true_value(in_process_env)
    else:
        use_in_process = None

    global in_process

    global config
    if use_in_process:
        # Explicitly set to True, so barrel on ahead with in-process
        # functional test setup.
        in_process = True
        # NOTE: No attempt is made to a read local test.conf file.
    else:
        if use_in_process is None:
            # Not explicitly set, default to using in-process functional tests
            # if the test.conf file is not found, or does not provide a usable
            # configuration.
            config.update(get_config('func_test'))
            if not config:
                in_process = True
            # else... leave in_process value unchanged. It may be that
            # setup_package is called twice, in which case in_process_setup may
            # have loaded config before we reach here a second time, so the
            # existence of config is not reliable to determine that in_process
            # should be False. Anyway, it's default value is False.
        else:
            # Explicitly set to False, do not attempt to use in-process
            # functional tests, be sure we attempt to read from local
            # test.conf file.
            in_process = False
            config.update(get_config('func_test'))

    if in_process:
        in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
        in_mem_obj = utils.config_true_value(in_mem_obj_env)
        try:
            in_process_setup(the_object_server=(
                mem_object_server if in_mem_obj else object_server))
        except InProcessException as exc:
            print(('Exception during in-process setup: %s'
                   % str(exc)), file=sys.stderr)
            raise

    global web_front_end
    web_front_end = config.get('web_front_end', 'integral')
    global normalized_urls
    normalized_urls = config.get('normalized_urls', False)

    global orig_collate
    orig_collate = locale.setlocale(locale.LC_COLLATE)
    locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))

    global insecure
    insecure = config_true_value(config.get('insecure', False))

    global swift_test_auth_version
    global swift_test_auth
    global swift_test_user
    global swift_test_key
    global swift_test_tenant
    global swift_test_perm
    global swift_test_domain
    global swift_test_service_prefix

    swift_test_service_prefix = None

    if config:
        swift_test_auth_version = str(config.get('auth_version', '1'))

        swift_test_auth = 'http'
        if config_true_value(config.get('auth_ssl', 'no')):
            swift_test_auth = 'https'
        if 'auth_prefix' not in config:
            config['auth_prefix'] = '/'
        try:
            suffix = '://%(auth_host)s:%(auth_port)s%(auth_prefix)s' % config
            swift_test_auth += suffix
        except KeyError:
            pass  # skip

        if 'service_prefix' in config:
                swift_test_service_prefix = utils.append_underscore(
                    config['service_prefix'])

        if swift_test_auth_version == "1":
            swift_test_auth += 'v1.0'

            try:
                if 'account' in config:
                    swift_test_user[0] = '%(account)s:%(username)s' % config
                else:
                    swift_test_user[0] = '%(username)s' % config
                swift_test_key[0] = config['password']
            except KeyError:
                # bad config, no account/username configured, tests cannot be
                # run
                pass
            try:
                swift_test_user[1] = '%s%s' % (
                    '%s:' % config['account2'] if 'account2' in config else '',
                    config['username2'])
                swift_test_key[1] = config['password2']
            except KeyError:
                pass  # old config, no second account tests can be run
            try:
                swift_test_user[2] = '%s%s' % (
                    '%s:' % config['account'] if 'account'
                    in config else '', config['username3'])
                swift_test_key[2] = config['password3']
            except KeyError:
                pass  # old config, no third account tests can be run
            try:
                swift_test_user[4] = '%s%s' % (
                    '%s:' % config['account5'], config['username5'])
                swift_test_key[4] = config['password5']
                swift_test_tenant[4] = config['account5']
            except KeyError:
                pass  # no service token tests can be run

            for _ in range(3):
                swift_test_perm[_] = swift_test_user[_]

        else:
            swift_test_user[0] = config['username']
            swift_test_tenant[0] = config['account']
            swift_test_key[0] = config['password']
            swift_test_user[1] = config['username2']
            swift_test_tenant[1] = config['account2']
            swift_test_key[1] = config['password2']
            swift_test_user[2] = config['username3']
            swift_test_tenant[2] = config['account']
            swift_test_key[2] = config['password3']
            if 'username4' in config:
                swift_test_user[3] = config['username4']
                swift_test_tenant[3] = config['account4']
                swift_test_key[3] = config['password4']
                swift_test_domain[3] = config['domain4']
            if 'username5' in config:
                swift_test_user[4] = config['username5']
                swift_test_tenant[4] = config['account5']
                swift_test_key[4] = config['password5']
            if 'username6' in config:
                swift_test_user[5] = config['username6']
                swift_test_tenant[5] = config['account6']
                swift_test_key[5] = config['password6']

            for _ in range(5):
                swift_test_perm[_] = swift_test_tenant[_] + ':' \
                    + swift_test_user[_]

    global skip
    skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
    if skip:
        print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG', file=sys.stderr)

    global skip2
    skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
    if not skip and skip2:
        print('SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS '
              'DUE TO NO CONFIG FOR THEM', file=sys.stderr)

    global skip3
    skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
    if not skip and skip3:
        print('SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS'
              'DUE TO NO CONFIG FOR THEM', file=sys.stderr)

    global skip_if_not_v3
    skip_if_not_v3 = (swift_test_auth_version != '3'
                      or not all([not skip,
                                  swift_test_user[3],
                                  swift_test_key[3]]))
    if not skip and skip_if_not_v3:
        print('SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3',
              file=sys.stderr)

    global skip_service_tokens
    skip_service_tokens = not all([not skip, swift_test_user[4],
                                   swift_test_key[4], swift_test_tenant[4],
                                   swift_test_service_prefix])
    if not skip and skip_service_tokens:
        print(
            'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS',
            file=sys.stderr)

    if policy_specified:
        policies = FunctionalStoragePolicyCollection.from_info()
        for p in policies:
            # policy names are case-insensitive
            if policy_specified.lower() == p['name'].lower():
                _info('Using specified policy %s' % policy_specified)
                FunctionalStoragePolicyCollection.policy_specified = p
                Container.policy_specified = policy_specified
                break
        else:
            _info(
                'SKIPPING FUNCTIONAL TESTS: Failed to find specified policy %s'
                % policy_specified)
            raise Exception('Failed to find specified policy %s'
                            % policy_specified)

    global skip_if_no_reseller_admin
    skip_if_no_reseller_admin = not all([not skip, swift_test_user[5],
                                         swift_test_key[5],
                                         swift_test_tenant[5]])
    if not skip and skip_if_no_reseller_admin:
        print(
            'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG FOR RESELLER ADMIN',
            file=sys.stderr)

    get_cluster_info()

Example 41

Project: ramona
Source File: config.py
View license
def read_config(configs=None, use_env=True):
	global config
	assert len(config.sections()) == 0

	# Prepare platform selector regex
	psrg = re.compile('^(.*)@(.*)$')

	# Load config_defaults
	psdefaults = []
	for section, items in config_defaults.iteritems():
		if not config.has_section(section):
			config.add_section(section)

		for key, val in items.iteritems():
			r = psrg.match(key)
			if r is None:
				config.set(section, key, val)
			else:
				if r.group(2) != config_platform_selector: continue
				psdefaults.append((section, r.group(1), val))

	# Handle platform selectors in config_defaults
	for section, key, val in psdefaults:
		config.set(section, key, val)


	# Load configuration files
	global config_files

	if configs is not None: configs = configs[:]
	else: configs = []
	if use_env:
		# Configs from environment variables
		config_envs = os.environ.get('RAMONA_CONFIG')
		if config_envs is not None:
			for config_file in config_envs.split(os.pathsep):
				configs.append(config_file)

	for cfile in configs:
		rfile = os.path.abspath(os.path.expanduser(cfile))
		if os.path.isfile(rfile):
			config_files.append(rfile)
		config.read([rfile])


	# Handle includes ...
	appname = config.get('general','appname')
	for _ in range(100):
		includes = config.get('general','include')
		if includes == '': break
		config.set('general','include','')
		includes = includes.split(';')
		for i in xrange(len(includes)-1,-1,-1):
			include = includes[i] = includes[i].strip()
			if include == '<siteconf>':
				# These are platform specific
				siteconfs = [
					'./site.conf',
					'./{}-site.conf'.format(appname),
					'/etc/{0}.conf'.format(appname),
					'~/.{0}.conf'.format(appname),
				]
				includes[i:i+1] = siteconfs
			elif include[:1] == '<':
				print('WARNING: Unknown include fragment: {0}'.format(include), file=sys.stderr)
				continue

		for include in includes:
			rinclude = os.path.abspath(os.path.expanduser(include))
			if os.path.isfile(rinclude):
				config_includes.append(rinclude)
				config.read([rinclude])

	else:
		raise RuntimeError("FATAL: It looks like we have loop in configuration includes!")

	# Threat platform selector alternatives
	if config_platform_selector is not None and config_platform_selector != '':
		for section in config.sections():
			for name, value in config.items(section):
				r = psrg.match(name)
				if r is None: continue
				if (r.group(2) != config_platform_selector): continue
				config.set(section, r.group(1), value)

	# Special treatment of some values
	if config.get('general', 'logdir') == '<env>':
		logdir = os.environ.get('LOGDIR')
		if logdir is None: logdir = os.curdir
		logdir = os.path.expanduser(logdir)
		config.set('general','logdir',logdir)
	elif config.get('general', 'logdir').strip()[:1] == '<':
		raise RuntimeError("FATAL: Unknown magic value in [general] logdir: '{}'".format(config.get('general', 'logdir')))
	
	for (sec, valname) in (("ramona:server", "consoleuri"), ("ramona:notify", "delivery")):
		if ";" in config.get(sec, valname):
			print(
				"WARNING: ';' character was found in URI: {}. Please note that ';' has been replaced '?' in Ramona 1.0. This can lead to Ramona apparent freeze during start.".format(
					config.get(sec, valname)
				),
				file=sys.stderr
			)

	stashdir = config.get('ramona:notify', 'stashdir')
	if stashdir != '<none>':
		if not os.path.isdir(stashdir):
			os.makedirs(stashdir)

Example 42

Project: authomatic
Source File: test_providers.py
View license
def login(request, browser, app, attempt=1):
    """Runs for each provider."""
    success = False
    provider_name, provider = request.param
    log(1, provider_name, 'Attempt {0}'.format(attempt))

    def wait(indent, seconds):
        seconds = seconds or 0
        seconds = seconds * config.WAIT_MULTIPLIER
        if seconds < config.MIN_WAIT:
            seconds = config.MIN_WAIT

        if seconds:
            log(indent, provider_name, u'(waiting {0} seconds)'.format(seconds))
            # log(0, provider_name, u' waiting {0} seconds '
            #     .format(seconds).center(60, '#'))
            time.sleep(seconds)

    def human_interaction_needed(xpath, seconds=0):
        log(2, provider_name, 'Checking if human interaction is needed')
        try:
            wait(2, seconds)
            el = browser.find_element_by_xpath(xpath)
            if el.is_displayed():
                print('Human interaction is needed (captcha or similar)!')
                print('Go to the browser, do the interaction and hit "c".')

                if os.environ.get('TRAVIS'):
                    message = ('Human interaction needed, '
                               'but not possible on Travis CI!')
                    log(3, provider_name, message)
                    pytest.fail(message)
                    return

                log(3, provider_name, 'Entering PDB for human interaction')
                import pdb; pdb.set_trace()
                log(3, provider_name, 'Returned from PDB')
                return
        except NoSuchElementException:
            pass

        log(3, provider_name, 'Not needed')

    try:
        provider['name'] = provider_name
        conf = fixtures.get_configuration(provider_name)

        # Andy types the login handler url to the address bar.
        url = parse.urljoin(app.check_url, 'login/' + provider['_path'])

        # Andy authenticates by the provider.
        login_url = provider.get('login_url')
        login_xpath = provider.get('login_xpath')
        password_xpath = provider.get('password_xpath')
        pre_login_xpaths = provider.get('pre_login_xpaths')

        # Go to login URL to log in
        if login_url:
            log(2, provider_name, 'Going to login URL: {0}'.format(login_url))
            browser.get(login_url)
        else:
            browser.get(url)

        # Handle alerts
        try:
            alert_wait = provider.get('alert_wait_seconds', 0)
            WebDriverWait(browser, alert_wait)\
                .until(expected_conditions.alert_is_present())

            if alert_wait:
                log(2, provider_name, 'Waiting {0} seconds for alert'
                    .format(alert_wait))

            alert = browser.switch_to_alert()
            log(2, provider_name, 'Accepting alert: {0}'.format(alert.text))
            alert.accept()
        except TimeoutException:
            pass

        # Pause for getting login and password xpaths
        if request.config.getoption("--pause"):
            log(2, provider_name, 'Pausing to pdb')
            import pdb; pdb.set_trace()

        if login_xpath:
            if pre_login_xpaths:
                for xpath in pre_login_xpaths:
                    log(2, provider_name,
                        'Finding pre-login element {0}'.format(xpath))
                    pre_login = browser.find_element_by_xpath(xpath)

                    log(3, provider_name,
                        'Clicking on pre-login element'.format(xpath))
                    pre_login.click()

            log(2, provider_name, 'Finding login input {0}'.format(login_xpath))
            login_element = browser.find_element_by_xpath(login_xpath)

            log(3, provider_name, 'Filling out login')
            login_element.send_keys(conf.user_login)

            enter_after_login_input = provider.get('enter_after_login_input')
            if enter_after_login_input:
                log(3, provider_name, 'Hitting ENTER after login input')
                login_element.send_keys(Keys.ENTER)

            hi = provider.get('human_interaction_before_password')
            if hi:
                human_interaction_needed(*hi)

            log(2, provider_name,
                'Finding password input {0}'.format(password_xpath))
            password_element = browser.find_element_by_xpath(password_xpath)
            log(3, provider_name, 'Filling out password')
            password_element.send_keys(conf.user_password)

            wait(2, provider.get('before_login_enter_wait'))
            log(2, provider_name, 'Hitting ENTER')
            password_element.send_keys(Keys.ENTER)
            wait(2, provider.get('after_login_wait_seconds'))

        if login_url:
            # Return back from login URL
            log(2, provider_name, 'Going back from login URL to: {0}'
                .format(url))

            browser.get(url)

        # Andy authorizes this app to access his protected resources.
        consent_xpaths = provider.get('consent_xpaths')

        if consent_xpaths:
            for xpath in consent_xpaths:
                try:
                    wait(2, provider.get('consent_wait_seconds'))

                    log(2, provider_name,
                        'Finding consent button {0}'.format(xpath))
                    button = browser.find_element_by_xpath(xpath)

                    log(3, provider_name, 'Clicking consent button')
                    button.click()
                except NoSuchElementException as e:
                    log(3, provider_name,
                        'Consent button not found! '
                        '(provider probably remembers consent)')

        wait(2, provider.get('after_consent_wait_seconds'))

        try:
            log(2, provider_name, 'Finding result element')
            browser.find_element_by_id('login-result')
            log(3, provider_name, 'Result element found')
            success = True
        except NoSuchElementException:
            log(3, provider_name, 'Result element not found!')

    except WebDriverException as e:
        if request.config.getoption('--login-error-pdb'):
            log(2, provider_name, 'Entering PDB session')
            import pdb; pdb.set_trace()
        try:
            log(2, provider_name,
                'Finding result element after error {0}'.format(e.msg))
            browser.find_element_by_id('login-result')
            log(3, provider_name, 'Result element found')
            success = True
        except NoSuchElementException:
            log(3, provider_name, 'Result element not found!')

    if success:
        log(0, provider_name, 'SUCCESS')
    else:
        if attempt < config.MAX_LOGIN_ATTEMPTS:
            login(request, browser, app, attempt + 1)
        else:
            log(1, provider_name,
                'Giving up after {0} attempts!'.format(attempt))

            # import pdb; pdb.set_trace()

            pytest.fail('Login by provider "{0}" failed!'.format(provider_name))

    return provider

Example 43

Project: pgmapcss
Source File: main.py
View license
def main():
    print('pgmapcss version %s' % pgmapcss.version.VERSION)
    args = parser.parse_args()

    style_id = args.style_id

    m = re.match('(.*)\.mapcss$', style_id)
    if m:
        style_id = m.group(1)

    file_name = style_id + '.mapcss'

    parameters = { }
    if args.parameters is not None:
        parameters = {
            p[0:p.find('=')]: p[p.find('=')+1:]
            for p in args.parameters
        }

    if args.lang:
        lang = args.lang
    elif 'lang' in parameters:
        pass
    else:
        lang = os.environ.get('LANG')
        if lang:
            m = re.match('(.*)_', lang)
            if m:
                lang = m.group(1)
        else:
            # default: english
            lang = 'en'

    stat = pgmapcss.compiler.stat._stat({
        'id': style_id,
        'config': {},
        'base_style': args.base_style,
        'icons_dir': style_id + '.icons',
        'global_data': None,
        'mode': args.mode,
        'args': args,
        'lang': lang,
        'parameters': parameters,
    })

    if args.config:
        for v in args.config:
            v = v.split("=")
            if len(v) > 1:
                stat['config'][v[0]] = v[1]
            else:
                stat['config'][v[0]] = True

    conn = pgmapcss.db.connect(args, stat)

    stat['database'] = conn.database

    if not 'unit.srs' in stat['config']:
        stat['config']['unit.srs'] = 900913
    if not 'srs' in stat['config']:
        if stat['mode'] == 'database-function':
            stat['config']['srs'] = 900913
        else:
            stat['config']['srs'] = 4326

    if stat['config'].get('offline', False) in (False, 'false', 'no') and args.database_update in ('init', 're-init'):
        print('* Re-initializing database')
        pgmapcss.db.db_init(conn, stat)

    if stat['config'].get('offline', False) not in (False, 'false', 'no'):
        print('* Using offline mode. Attention! Some functionality might be missing.')

    else:
        db_version = pgmapcss.db.db_version()
        if db_version == None:
            print('* DB functions not installed; installing')
            pgmapcss.db.db_init(conn, stat)
        else:
            db_check = pgmapcss.db.db_version_check()
            if db_check == 1 and args.database_update == 'auto':
                print('* Current DB version: {version} -> updating DB functions'.format(**db_version))
                pgmapcss.db.db_update(conn)

            elif db_check == 2:
                print('* Current DB version: {version}'.format(**db_version))
                print('pgmapcss version too new. Database needs to be re-initialized. Please re-run pgmapcss with parameter "-r init". All Mapnik styles need to be re-compiled afterwards.')
                sys.exit(1)

            elif args.database_update == 'update':
                pgmapcss.db.db_update(conn)

            else:
                print('* Current DB version: {version}'.format(**db_version))

    if args.eval_tests is not False:
        if len(args.eval_tests):
            pgmapcss.eval.functions(stat).test_all(args.eval_tests)
        else:
            pgmapcss.eval.functions(stat).test_all()

        print('* All tests completed successfully.')

    try:
        os.mkdir(stat['icons_dir'])
    except OSError:
        pass

    eval_functions = pgmapcss.eval.functions(stat).list()

    content = open(file_name).read()

# check if file is XML -> extract MapCSS code
    tree = None
    if re.match('<\?xml', content):
        import xml.dom.minidom as dom
        tree = dom.parse(file_name)
        mapcss = tree.getElementsByTagName("style")
        if mapcss.length != 1:
            print("Require exactly one <style type='text/mapcss'> node")
            sys.exit(1)

        mapcss = mapcss.item(0)
        content = mapcss.firstChild.nodeValue

    try:
        pgmapcss.parser.parse_file(stat, filename=file_name, content=content, base_style=args.base_style, defaults=args.defaults)
    except pgmapcss.parser.ParseError as e:
        print(e)
        sys.exit(1)

    debug = open(style_id + '.output', 'w')

    pp = pprint.PrettyPrinter()

    debug.write("***** Structure of parsed MapCSS style *****\n")
    debug.write(pp.pformat(stat) + '\n')

    pgmapcss.renderer.init(stat)
    pgmapcss.icons.init(stat)
    pgmapcss.symbols.init(stat)

    try:
        style = pgmapcss.compiler.compile_style(stat)
    except pgmapcss.compiler.CompileError as e:
        print(e)
        sys.exit(1)

    #pp.pprint(style)
    for i in style:
        debug.write("\n***** " + i + " *****\n" + style[i])

    if stat['mode'] == 'database-function':
        pgmapcss.db.install(style_id, style, conn)
        pgmapcss.renderer.process_renderer(style_id, args, stat, conn)
    elif stat['mode'] == 'standalone':
        open(style_id + '.py', 'w').write(style['function_match'])
        os.chmod(style_id + '.py', 0o755)
        print('Created executable {}.py'.format(style_id))

    pgmapcss.icons.process_icons(style_id, args, stat, conn)
    pgmapcss.symbols.process_symbols(style_id, args, stat, conn)

    debug.close()

    if 'unresolvable_properties' in stat:
        print('WARNING: Not all values for the following properties could be guessed (e.g. as they are the result of an eval-expression, and therefore some features in the resulting image(s) may be missing: ' + ', '.join(stat['unresolvable_properties']))

    # copy result xml to original dom
    if tree:
        result_tree = dom.parse(style_id + '.mapnik')
        current = result_tree.getElementsByTagName("Map").item(0).firstChild

        while current:
            if re.search('[^\s]', current.toxml()):
                if not re.match('<!\-\-', current.toxml()):
                    copy = dom.parseString(current.toxml())
                    mapcss.parentNode.insertBefore(copy.firstChild, mapcss)
            current = current.nextSibling

        mapcss.parentNode.removeChild(mapcss)
        open(style_id + '.mapnik', 'w').write(tree.toxml())

    print('Debug output wrote to ' + style_id + '.output')

Example 44

Project: terrarium
Source File: terrarium.py
View license
def parse_args():
    sensitive_arguments = [
        's3_access_key',
        's3_secret_key',
        'gcs_client_email',
        'gcs_private_key',
    ]
    ap = argparse.ArgumentParser()
    ap.add_argument(
        '-V', '--version',
        action='version',
        version='%(prog)s ' + terrarium.__version__,
    )
    ap.add_argument(
        '-v', '--verbose',
        action='append_const',
        const=-10,
        default=[logging.INFO],
        dest='v',
        help='Increase verbosity',
    )
    ap.add_argument(
        '-q', '--quiet',
        action='append_const',
        const=10,
        default=[logging.INFO],
        dest='v',
        help='Decrease verbosity',
    )
    ap.add_argument(
        '-t', '--target',
        dest='target',
        default=os.environ.get('VIRTUAL_ENV', None),
        help='''
            Replace or build new environment at this location. If you are
            already within a virtual environment, this option defaults to
            VIRTUAL_ENV.
        ''',
    )
    ap.add_argument(
        '--pip-log-level',
        default=25,
        help='''
        Set the log level for pip
        ''',
    )
    ap.add_argument(
        '--virtualenv-log-level',
        default=25,
        help='''
        Set the log level for virtualenv
        ''',
    )
    ap.add_argument(
        '--no-download',
        default=True,
        action='store_false',
        dest='download',
        help='''
            If an external storage location is specified, terrarium will
            attempt to download an existing terrarium bundle instead of
            building a new one. Using --no-download forces terrarium to build a
            new environment.
        ''',
    )
    ap.add_argument(
        '--require-download',
        default=False,
        action='store_true',
        help='''
            If we fail to download a terrarium bundle from the storage
            location, do not proceed to build one.
        ''',
    )
    ap.add_argument(
        '--no-upload',
        default=True,
        action='store_false',
        dest='upload',
        help='''
            If an external storage location is specified, terrarium will upload
            a new environment after it has been built. Using --no-upload,
            terrarium will not upload the resulting environment to the external
            storage location.
        ''',
    )
    ap.add_argument(
        '--no-backup',
        default=True,
        action='store_false',
        dest='backup',
        help='''
            By default, terrarium preserves the old environment. See
            --backup-suffix. Using this option, terrarium will delete the old
            environment.
        ''',
    )
    ap.add_argument(
        '--backup-suffix',
        default='.bak',
        help='''
            The suffix to use when preserving an old environment. This option
            is ignored if --no-backup is used. Default is .bak.
        '''
    )
    ap.add_argument(
        '--no-compress',
        default=True,
        action='store_false',
        dest='compress',
        help='''
            By default, terrarium compresses the archive using gzip before
            uploading it.
        ''',
    )
    ap.add_argument(
        '--storage-dir',
        default=os.environ.get('TERRARIUM_STORAGE_DIR', None),
        help='''
            Path to a directory in which terrarium bundles will be retrieved
            and stored for speedy re-installation. This will usually be a
            shared drive.
        ''',
    )
    ap.add_argument(
        '--digest-type',
        default='md5',
        help='Choose digest type (md5, sha, see hashlib). Default is md5.',
    )
    ap.add_argument(
        '--no-bootstrap',
        default=True,
        action='store_false',
        dest='bootstrap',
        help='''
            By default, terrarium will create a script called
            'terrarium_bootstrap.py' in the new environment bin directory.
            Running this script will create a new environment at the specified
            location using all of the packages that were defined at the time of
            its creation. To prevent this script from being created, use
            --no-bootstrap.
        ''',
    )
    default_remote_key_format = '''
        %(arch)s-%(python_vmajor)s.%(python_vminor)s-%(digest)s
    '''.strip()
    ap.add_argument(
        '--remote-key-format',
        default=default_remote_key_format,
        help='''
            Key name format to use when storing the archive. Default is "%s"
        ''' % default_remote_key_format.replace('%', '%%'),
    )

    ap.add_argument(
        '--s3-bucket',
        default=os.environ.get('S3_BUCKET', None),
        help='''
            S3 bucket name. Defaults to S3_BUCKET env variable.
        '''
    )
    ap.add_argument(
        '--s3-access-key',
        default=os.environ.get('S3_ACCESS_KEY', None),
        help='''
            Defaults to S3_ACCESS_KEY env variable.
        '''
    )
    ap.add_argument(
        '--s3-secret-key',
        default=os.environ.get('S3_SECRET_KEY', None),
        help='''
            Defaults to S3_SECRET_KEY env variable.
        '''
    )
    ap.add_argument(
        '--s3-max-retries',
        default=os.environ.get('S3_MAX_RETRIES', 3),
        help='''
            Number of times to attempt a S3 operation before giving up.
            Default is 3.
        ''',
    )

    # gcs relavent arguments
    ap.add_argument(
        '--gcs-bucket',
        default=os.environ.get('GCS_BUCKET', None),
        help='''
            Google Cloud Storage bucket name.
            Defaults to GCS_BUCKET env variable.
        '''
    )
    ap.add_argument(
        '--gcs-project',
        default=os.environ.get('GCS_PROJECT', None),
        help='''
            Google Cloud Storage project.
            Defaults to GCS_PROJECT env variable.
        '''
    )
    ap.add_argument(
        '--gcs-client-email',
        default=os.environ.get('GCS_CLIENT_EMAIL', None),
        help='''
            Google Cloud Storage client email.
            Defaults to GCS_CLIENT_EMAIL env variable.
        '''
    )
    ap.add_argument(
        '--gcs-private-key',
        default=os.environ.get('GCS_PRIVATE_KEY', None),
        help='''
            Google Cloud Storage private key.
            Defaults to GCS_PRIVATE_KEY env variable.
        '''
    )
    ap.add_argument(
        '--gcs-max-retries',
        default=os.environ.get('GCS_MAX_RETRIES', 3),
        help='''
            Number of times to attempt a GCS operation before giving up.
            Default is 3.
        '''
    )

    subparsers = ap.add_subparsers(
        title='Basic Commands',
        dest='command',
    )
    commands = {
        'hash': subparsers.add_parser(
            'hash',
            help='Display digest for current requirement set',
        ),
        'key': subparsers.add_parser(
            'key',
            help='Display remote key for current requirement set and platform',
        ),
        'install': subparsers.add_parser(
            'install',
            help='''
                Replace current environment with the one given by the
                requirement set.
            ''',
        ),
        'revert': subparsers.add_parser(
            'revert',
            help='''
                Restore the most recent backed-up virtualenv, if it exists.
            ''',
        ),
    }

    for command in commands.values():
        command.add_argument('reqs', nargs=argparse.REMAINDER)

    args = ap.parse_args()
    args.add_sensitive_arguments(*sensitive_arguments)

    if not boto and args.s3_bucket is not None:
        ap.error(
            '--s3-bucket requires that you have boto installed, '
            'which does not appear to be the case'
        )

    if not gcs and args.gcs_bucket is not None:
        ap.error(
            '--gcs-bucket requires that you have gcloud installed, '
            'which does not appear to be the case'
        )

    return args

Example 45

View license
def args_from(original_function,
              only=None,
              allexcept=None,
              inject_kwargs=None,
              inject_docs=None,
              wraps=None,
              update_docstring_args=False):
    """
    Decorator to transfer call signatures - helps to hide ugly *args and **kwargs in delegated calls

    Args:
        original_function (callable): the function to take the call signature from
        only (List[str]): only transfer these arguments (incompatible with `allexcept`)
        wraps (bool): Transfer documentation and attributes from original_function to
            decorated_function, using functools.wraps (default: True if call signature is
            unchanged, False otherwise)
        allexcept (List[str]): transfer all except these arguments (incompatible with `only`)
        inject_kwargs (dict): Inject new kwargs into the call signature
            (of the form ``{argname: defaultvalue}``)
        inject_docs (dict): Add or modifies argument documentation (requires google-style
            docstrings) with a dict of the form `{argname: "(type): description"}`
        update_docstring_args (bool): Update "arguments" section of the docstring using the
           original function's documentation (requires google-style docstrings and wraps=False)

    Note:
        To use arguments from a classes' __init__ method, pass the class itself as
        ``original_function`` - this will also allow us to inject the documentation

    Returns:
        Decorator function
    """
    # NEWFEATURE - verify arguments?

    if only and allexcept:
        raise ValueError('Error in keyword arguments - '
                         'pass *either* "only" or "allexcept", not both')

    origname = get_qualified_name(original_function)

    if hasattr(original_function, '__signature__'):
        sig = original_function.__signature__.replace()
    else:
        sig = funcsigs.signature(original_function)

    # Modify the call signature if necessary
    if only or allexcept or inject_kwargs:
        wraps = if_not_none(wraps, False)
        newparams = []
        if only:
            for param in only:
                newparams.append(sig.parameters[param])
        elif allexcept:
            for name, param in sig.parameters.iteritems():
                if name not in allexcept:
                    newparams.append(param)
        else:
            newparams = sig.parameters.values()
        if inject_kwargs:
            for name, default in inject_kwargs.iteritems():
                newp = funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_OR_KEYWORD,
                                          default=default)
                newparams.append(newp)

        newparams.sort(key=lambda param: param._kind)
        sig = sig.replace(parameters=newparams)

    else:
        wraps = if_not_none(wraps, True)

    # Get the docstring arguments
    if update_docstring_args:
        original_docs = GoogleDocArgumentInjector(original_function.__doc__)
        argument_docstrings = collections.OrderedDict((p.name, original_docs.args[p.name])
                                                      for p in newparams)

    def decorator(f):
        """Modify f's call signature (using the `__signature__` attribute)"""
        if wraps:
            fname = original_function.__name__
            f = functools.wraps(original_function)(f)
            f.__name__ = fname  # revert name change
        else:
            fname = f.__name__
        f.__signature__ = sig

        if update_docstring_args or inject_kwargs:
            if not update_docstring_args:
                argument_docstrings = GoogleDocArgumentInjector(f.__doc__).args
            docs = GoogleDocArgumentInjector(f.__doc__)
            docs.args = argument_docstrings

            if not hasattr(f, '__orig_docs'):
                f.__orig_docs = []
            f.__orig_docs.append(f.__doc__)

            f.__doc__ = docs.new_docstring()

        # Only for building sphinx documentation:
        if os.environ.get('SPHINX_IS_BUILDING_DOCS', ""):
            sigstring = '%s%s\n' % (fname, sig)
            if hasattr(f, '__doc__') and f.__doc__ is not None:
                f.__doc__ = sigstring + f.__doc__
            else:
                f.__doc__ = sigstring
        return f

    return decorator

Example 46

Project: tp-qemu
Source File: softlockup.py
View license
def run(test, params, env):
    """
    soft lockup/drift test with stress.

    1) Boot up a VM.
    2) Build stress on host and guest.
    3) run heartbeat with the given options on server and host.
    3) Run for a relatively long time length. ex: 12, 18 or 24 hours.
    4) Output the test result and observe drift.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    stress_setup_cmd = params.get("stress_setup_cmd", None)
    stress_cmd = params.get("stress_cmd")
    server_setup_cmd = params.get("server_setup_cmd")
    drift_cmd = params.get("drift_cmd")
    kill_stress_cmd = params.get("kill_stress_cmd")
    kill_monitor_cmd = params.get("kill_monitor_cmd")

    threshold = int(params.get("stress_threshold"))
    monitor_log_file_server = params.get("monitor_log_file_server")
    monitor_log_file_client = params.get("monitor_log_file_client")
    test_length = int(3600 * float(params.get("test_length")))
    monitor_port = int(params.get("monitor_port"))

    vm = env.get_vm(params["main_vm"])
    login_timeout = int(params.get("login_timeout", 360))
    auto_dir = os.environ.get("AUTODIR", os.environ.get("AUTOTEST_PATH"))
    stress_dir = os.path.join(auto_dir, "tests", "stress")
    monitor_dir = params.get("monitor_dir",
                             data_dir.get_deps_dir("softlockup"))

    def _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd):
        logging.info("Kill stress and monitor on guest")
        try:
            session.cmd(kill_stress_cmd)
        except Exception:
            pass
        try:
            session.cmd(kill_monitor_cmd)
        except Exception:
            pass

    def _kill_host_programs(kill_stress_cmd, kill_monitor_cmd):
        logging.info("Kill stress and monitor on host")
        utils.run(kill_stress_cmd, ignore_status=True)
        utils.run(kill_monitor_cmd, ignore_status=True)

    def host():
        logging.info("Setup monitor server on host")
        # Kill previous instances of the host load programs, if any
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)
        # Cleanup previous log instances
        if os.path.isfile(monitor_log_file_server):
            os.remove(monitor_log_file_server)
        # Opening firewall ports on host
        utils.run("iptables -F", ignore_status=True)

        # Run heartbeat on host
        utils.run(server_setup_cmd % (monitor_dir, threshold,
                                      monitor_log_file_server, monitor_port))

        if stress_setup_cmd is not None:
            logging.info("Build stress on host")
            # Uncompress and build stress on host
            utils.run(stress_setup_cmd % stress_dir)

        logging.info("Run stress on host")
        # stress_threads = 2 * n_cpus
        threads_host = 2 * utils.count_cpus()
        # Run stress test on host
        utils.run(stress_cmd % (stress_dir, threads_host))

    def guest():
        try:
            host_ip = socket.gethostbyname(socket.gethostname())
        except socket.error:
            try:
                # Hackish, but works well on stand alone (laptop) setups
                # with access to the internet. If this fails, well, then
                # not much else can be done...
                s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
                s.connect(("redhat.com", 80))
                host_ip = s.getsockname()[0]
            except socket.error, (value, e):
                raise error.TestError("Could not determine host IP: %d %s" %
                                      (value, e))

        # Now, starting the guest
        vm.verify_alive()
        session = vm.wait_for_login(timeout=login_timeout)

        # Kill previous instances of the load programs, if any
        _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd)
        # Clean up previous log instances
        session.cmd("rm -f %s" % monitor_log_file_client)

        # Opening firewall ports on guest
        try:
            session.cmd("iptables -F")
        except Exception:
            pass

        # Get monitor files and copy them from host to guest
        monitor_path = os.path.join(data_dir.get_deps_dir(), 'softlockup',
                                    'heartbeat_slu.py')
        vm.copy_files_to(monitor_path, "/tmp")

        logging.info("Setup monitor client on guest")
        # Start heartbeat on guest
        session.cmd(params.get("client_setup_cmd") %
                    ("/tmp", host_ip, monitor_log_file_client, monitor_port))

        if stress_setup_cmd is not None:
            # Copy, uncompress and build stress on guest
            stress_source = params.get("stress_source")
            stress_path = os.path.join(stress_dir, stress_source)
            vm.copy_files_to(stress_path, "/tmp")
            logging.info("Build stress on guest")
            session.cmd(stress_setup_cmd % "/tmp", timeout=200)

        logging.info("Run stress on guest")
        # stress_threads = 2 * n_vcpus
        threads_guest = 2 * int(params.get("smp", 1))
        # Run stress test on guest
        session.cmd(stress_cmd % ("/tmp", threads_guest))

        # Wait and report
        logging.debug("Wait for %d s", test_length)
        time.sleep(test_length)

        # Kill instances of the load programs on both guest and host
        _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd)
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)

        # Collect drift
        drift = utils.system_output(drift_cmd % monitor_log_file_server)
        logging.info("Drift noticed: %s", drift)

    host()
    guest()

Example 47

Project: python-beaver
Source File: config.py
View license
    def __init__(self, args, logger=None):
        self._logger = logger or logging.getLogger(__name__)
        self._logger.debug('Processing beaver portion of config file %s' % args.config)

        self._section_defaults = {
            'add_field': '',
            'add_field_env': '',
            'debug': '0',
            'discover_interval': '15',
            'encoding': 'utf_8',

            # should be a python regex of files to remove
            'exclude': '',
            'format': '',

            # throw out empty lines instead of shipping them
            'ignore_empty': '0',

            # allow ignoring copytruncate results
            'ignore_truncate': '0',

            # buffered tokenization
            # we string-escape the delimiter later so that we can put escaped characters in our config file
            'delimiter': '\n',
            'size_limit': '',

            # multiline events support. Default is disabled
            'multiline_regex_after': '',
            'multiline_regex_before': '',

            'message_format': '',
            'sincedb_write_interval': '15',
            'stat_interval': '1',
            'start_position': 'end',
            'tags': '',
            'tail_lines': '0',
            'type': '',
            # Redis specific namespace
            'redis_namespace': ''
        }

        self._main_defaults = {
            'kafka_client_id': os.environ.get('KAFKA_CLIENT_ID', 'beaver-kafka'),
            'kafka_hosts': os.environ.get('KAFKA_HOSTS', 'localhost:9092'),
            'kafka_async': os.environ.get('KAFKA_ASYNC', True),
            'kafka_topic': os.environ.get('KAFKA_TOPIC', 'logstash-topic'),
            'kafka_key': os.environ.get('KAFKA_KEY'),
            'kafka_codec': os.environ.get('KAFKA_CODEC'),
            'kafka_ack_timeout': os.environ.get('KAFKA_ACK_TIMEOUT', 2000),
            'kafka_batch_n': os.environ.get('KAFKA_BATCH_N', 10),
            'kafka_batch_t': os.environ.get('KAFKA_BATCH_T', 10),
            'kafka_round_robin': os.environ.get('KAFKA_ROUND_ROBIN', False),
            'mqtt_clientid': 'paho',
            'mqtt_host': 'localhost',
            'mqtt_port': '1883',
            'mqtt_topic': '/logstash',
            'mqtt_keepalive': '60',
            'rabbitmq_host': os.environ.get('RABBITMQ_HOST', 'localhost'),
            'rabbitmq_port': os.environ.get('RABBITMQ_PORT', '5672'),
            'rabbitmq_ssl': '0',
            'rabbitmq_ssl_key': '',
            'rabbitmq_ssl_cert': '',
            'rabbitmq_ssl_cacert': '',
            'rabbitmq_vhost': os.environ.get('RABBITMQ_VHOST', '/'),
            'rabbitmq_username': os.environ.get('RABBITMQ_USERNAME', 'guest'),
            'rabbitmq_password': os.environ.get('RABBITMQ_PASSWORD', 'guest'),
            'rabbitmq_queue': os.environ.get('RABBITMQ_QUEUE', 'logstash-queue'),
            'rabbitmq_exchange_type': os.environ.get('RABBITMQ_EXCHANGE_TYPE', 'direct'),
            'rabbitmq_exchange_durable': os.environ.get('RABBITMQ_EXCHANGE_DURABLE', '0'),
            'rabbitmq_queue_durable': os.environ.get('RABBITMQ_QUEUE_DURABLE', '0'),
            'rabbitmq_ha_queue': os.environ.get('RABBITMQ_HA_QUEUE', '0'),
            'rabbitmq_key': os.environ.get('RABBITMQ_KEY', 'logstash-key'),
            'rabbitmq_exchange': os.environ.get('RABBITMQ_EXCHANGE', 'logstash-exchange'),
            'rabbitmq_timeout': '1',
            'rabbitmq_delivery_mode': 1,
            'redis_url': os.environ.get('REDIS_URL', 'redis://localhost:6379/0'),
            'redis_namespace': os.environ.get('REDIS_NAMESPACE', 'logstash:beaver'),
            'redis_data_type': os.environ.get('REDIS_DATA_TYPE', 'list'),
            'redis_password': '',
            'sns_aws_access_key': '',
            'sns_aws_secret_key': '',
            'sns_aws_profile_name': '',
            'sns_aws_region': 'us-east-1',
            'sns_aws_topic_arn': '',
            'sqs_aws_access_key': '',
            'sqs_aws_secret_key': '',
            'sqs_aws_profile_name': '',
            'sqs_aws_region': 'us-east-1',
            'sqs_aws_queue': '',
            'sqs_aws_queue_owner_acct_id': '',
            'sqs_bulk_lines': False,
            'kinesis_aws_access_key': '', 
            'kinesis_aws_secret_key': '', 
            'kinesis_aws_region': 'us-east-1', 
            'kinesis_aws_stream': '', 
            'kinesis_aws_batch_size_max': '512000',
            'tcp_host': '127.0.0.1',
            'tcp_port': '9999',
            'tcp_ssl_enabled': '0',
            'tcp_ssl_verify': '0',
            'tcp_ssl_cacert': '',
            'tcp_ssl_cert': '',
            'tcp_ssl_key':'',
            'udp_host': os.environ.get('UDP_HOST', '127.0.0.1'),
            'udp_port': os.environ.get('UDP_PORT', '9999'),
            'zeromq_address': os.environ.get('ZEROMQ_ADDRESS', 'tcp://localhost:2120'),
            'zeromq_pattern': 'push',
            'zeromq_hwm': os.environ.get('ZEROMQ_HWM', ''),
            'stomp_host' : 'localhost',
            'stomp_port' : '61613',
            'stomp_user' : 'user',
            'stomp_password' : None,
            'stomp_queue' : 'queue/logstash',

            # exponential backoff
            'respawn_delay': '3',
            'max_failure': '7',

            # consumer processes
            'number_of_consumer_processes': '1',

            # interprocess queue max size before puts block
            'max_queue_size': '100',

            # time in seconds before updating the file mapping
            'update_file_mapping_time': '',  # deprecated
            'discover_interval': '15',

            # time in seconds from last command sent before a queue kills itself
            'queue_timeout': '60',

            # kill and respawn worker process after given number of seconds
            'refresh_worker_process': '',

            # time in seconds to wait on queue.get() block before raising Queue.Empty exception
            'wait_timeout': '5',

            # path to sincedb sqlite db
            'sincedb_path': '',

            # 0 for logstash version < 1.2, 1 for logstash >= 1.2
            'logstash_version': '',

            # ssh tunnel support
            'ssh_key_file': '',
            'ssh_tunnel': '',
            'ssh_tunnel_port': '',
            'ssh_remote_host': '',
            'ssh_remote_port': '',
            'ssh_options': '',
            'subprocess_poll_sleep': '1',

            # the following can be passed via argparse
            'zeromq_bind': os.environ.get('BEAVER_MODE', 'bind' if os.environ.get('BIND', False) else 'connect'),
            'files': os.environ.get('BEAVER_FILES', ''),
            'format': os.environ.get('BEAVER_FORMAT', 'json'),
            'fqdn': '0',
            'hostname': '',
            'output': '',
            'path': os.environ.get('BEAVER_PATH', '/var/log'),
            'transport': os.environ.get('BEAVER_TRANSPORT', 'stdout'),  # this needs to be passed to the import class somehow

            # Path to individual file configs. These override any sections in the main beaver.ini file
            'confd_path': '/etc/beaver/conf.d',

            # the following are parsed before the config file is parsed
            # but may be useful at runtime
            'config': '/dev/null',
            'debug': '0',
            'daemonize': '0',
            'pid': '',

            # Ignore files older then n days, use 0 to disable
            'ignore_old_files': 0
        }

        self._configfile = args.config
        self._config_parser = GlobSafeConfigParser
        self._globbed = []
        self._parse(args)
        for key in self._beaver_config:
            self._logger.debug('[CONFIG] "{0}" => "{1}"'.format(key, self._beaver_config.get(key)))

        self._update_files()
        self._check_for_deprecated_usage()

Example 48

Project: radical.pilot
Source File: session.py
View license
def fetch_logfiles (sid, dburl=None, client=None, tgt=None, access=None, 
        session=None, skip_existing=False):
    '''
    sid: session for which all logfiles are fetched
    client: dir to look for client session logfiles
    tgt: dir to store the logfile in

    returns list of file names
    '''


    ret = list()

    if not dburl:
        dburl = os.environ.get('RADICAL_PILOT_DBURL')

    if not dburl:
        from radical.pilot.session import default_dburl
        logger.report.warn('using default dburl: %s' % default_dburl)
        dburl = default_dburl

    if not client:
        client = os.getcwd()
            
    if not tgt:
        tgt = os.getcwd()
            
    if not tgt.startswith('/') and '://' not in tgt:
        tgt = "%s/%s" % (os.getcwd(), tgt)

    # we always create a session dir as real target
    tgt_url = saga.Url(tgt)

    # Turn URLs without schema://host into file://localhost,
    # so that they dont become interpreted as relative.
    if not tgt_url.schema:
        tgt_url.schema = 'file'
    if not tgt_url.host:
        tgt_url.host = 'localhost'

    # first fetch session logfile
    # FIXME: should we record pwd or logfile location in db session?  Or create
    #        a sandbox like dir for storing logfiles and logs?
    client_logfile = "%s/%s.log" % (client, sid)

    ftgt = saga.Url('%s/%s' % (tgt_url, os.path.basename(client_logfile)))
    ret.append("%s" % ftgt.path)

    if skip_existing and os.path.isfile(ftgt.path) \
            and os.stat(ftgt.path).st_size > 0:

        logger.report.info("\t- %s\n" % client_logfile.split('/')[-1])

    else:

        if not os.path.isfile(client_logfile):
            print 'skipping client logfile: %s does not exist' % client_logfile

        else:
            logger.report.info("\t+ %s\n" % client_logfile.split('/')[-1])
            log_file = saga.filesystem.File(client_logfile, session=session)
            log_file.copy(ftgt, flags=saga.filesystem.CREATE_PARENTS)
            log_file.close()

    _, db, _, _, _ = ru.mongodb_connect (dburl)

    json_docs = get_session_docs(db, sid)

    pilots = json_docs['pilot']
    num_pilots = len(pilots)
 #  print "Session: %s" % sid
 #  print "Number of pilots in session: %d" % num_pilots

    for pilot in pilots:

      # print "Processing pilot '%s'" % pilot['_id']

        sandbox_url = saga.Url(pilot['sandbox'])

        if access:
            # Allow to use a different access scheme than used for the the run.
            # Useful if you ran from the headnode, but would like to retrieve
            # the logfiles to your desktop (Hello Titan).
            access_url = saga.Url(access)
            sandbox_url.schema = access_url.schema
            sandbox_url.host = access_url.host

          # print "Overriding remote sandbox: %s" % sandbox_url

        sandbox  = saga.filesystem.Directory (sandbox_url, session=session)

        # Try to fetch a tarball of logfiles, so that we can get them all in one (SAGA) go!
        LOGFILES_TARBALL = '%s.log.tgz' % pilot['_id']
        tarball_available = False
        try:
            if sandbox.is_file(LOGFILES_TARBALL):
                print "Logfiles tarball exists!"

                ftgt = saga.Url('%s/%s' % (tgt_url, LOGFILES_TARBALL))

                if skip_existing and os.path.isfile(ftgt.path) \
                        and os.stat(ftgt.path).st_size > 0:

                    print "Skipping fetching of '%s/%s' to '%s'." % (sandbox_url, LOGFILES_TARBALL, tgt_url)
                    tarball_available = True
                else:

                    print "Fetching '%s%s' to '%s'." % (sandbox_url, LOGFILES_TARBALL, tgt_url)
                    log_file = saga.filesystem.File("%s%s" % (sandbox_url, LOGFILES_TARBALL), session=session)
                    log_file.copy(ftgt, flags=saga.filesystem.CREATE_PARENTS)
                    log_file.close()

                    tarball_available = True
            else:
                print "Logfiles tarball doesnt exists!"

        except saga.DoesNotExist:
            print "exception(TODO): logfiles tarball doesnt exists!"

        try:
            os.mkdir("%s/%s" % (tgt_url.path, pilot['_id']))
        except OSError:
            pass

        # We now have a local tarball
        if tarball_available:
            print "Extracting tarball %s into '%s'." % (ftgt.path, tgt_url.path)
            tarball = tarfile.open(ftgt.path)
            tarball.extractall("%s/%s" % (tgt_url.path, pilot['_id']))

            logfiles = glob.glob("%s/%s/*.log" % (tgt_url.path, pilot['_id']))
            print "Tarball %s extracted to '%s/%s/'." % (ftgt.path, tgt_url.path, pilot['_id'])
            ret.extend(logfiles)
            os.unlink(ftgt.path)

            # If extract succeeded, no need to fetch individual logfiles
            continue

        # If we dont have a tarball (for whichever reason), fetch individual logfiles
        logfiles = sandbox.list('*.log')

        for log in logfiles:

            ftgt = saga.Url('%s/%s/%s' % (tgt_url, pilot['_id'], log))
            ret.append("%s" % ftgt.path)

            if skip_existing and os.path.isfile(ftgt.path) \
                             and os.stat(ftgt.path).st_size > 0:

                logger.report.info("\t- %s\n" % str(log).split('/')[-1])
                continue

            logger.report.info("\t+ %s\n" % str(log).split('/')[-1])
            log_file = saga.filesystem.File("%s%s" % (sandbox_url, log), session=session)
            log_file.copy(ftgt, flags=saga.filesystem.CREATE_PARENTS)
            log_file.close()

    return ret

Example 49

Project: radical.pilot
Source File: session.py
View license
def get_session_frames (sids, db=None, cachedir=None) :

    # use like this: 
    #
    # session_frame, pilot_frame, unit_frame = rpu.get_session_frames (session, db, cachedir)
    # pandas.set_option('display.width', 1000)
    # print session_frame
    # print pilot_frame
    # print unit_frame
    #
    # u_min = unit_frame.ix[unit_frame['started'].idxmin()]['started']
    # u_max = unit_frame.ix[unit_frame['finished'].idxmax()]['finished']
    # print u_min
    # print u_max
    # print u_max - u_min

    mongo = None

    if not db:
        dburl = os.environ.get('RADICAL_PILOT_DBURL')
        if not dburl:
            raise RuntimeError ('Please set RADICAL_PILOT_DBURL')

        mongo, db, _, _, _ = ru.mongodb_connect(dburl)


    if not isinstance (sids, list) :
        sids = [sids]

    session_dicts = list()
    pilot_dicts   = list()
    unit_dicts    = list()

    for sid in sids :

        docs = get_session_docs (db, sid, cachedir=cachedir)

        session       = docs['session']
        session_start = session['created']
        session_dict  = {
            'sid'       : sid,
            'started'   : session['created'],
            'finished'  : None, 
            'n_pilots'  : len(docs['pilot']),
            'n_units'   : 0
            }

        last_pilot_event = 0
        for pilot in docs['pilot'] :

            pid         = pilot['_id']
            description = pilot.get ('description', dict())
            started     = pilot.get ('started')
            finished    = pilot.get ('finished')
            
            cores = 0

            if pilot['nodes'] and pilot['cores_per_node']:
                cores = len(pilot['nodes']) * pilot['cores_per_node']
            else:
                cores = description.get('cores')

            if started  : started  -= session_start
            if finished : finished -= session_start

            pilot_dict = {
                'sid'          : sid,
                'pid'          : pid, 
                'n_units'      : len(pilot.get ('unit_ids', list())), 
                'started'      : started,
                'finished'     : finished,
                'resource'     : description.get ('resource'),
                'cores'        : cores,
                'runtime'      : description.get ('runtime'),
                NEW            : None, 
                PENDING_LAUNCH : None, 
                LAUNCHING      : None, 
                PENDING_ACTIVE : None, 
                ACTIVE         : None, 
                DONE           : None, 
                FAILED         : None, 
                CANCELED       : None
            }

            for entry in pilot.get('statehistory', list()):
                state = entry['state']
                timer = entry['timestamp'] - session_start
                pilot_dict[state] = timer
                last_pilot_event  = max(last_pilot_event, timer)

            if not pilot_dict[NEW]:
                if pilot_dict[PENDING_LAUNCH]:
                    pilot_dict[NEW] = pilot_dict[PENDING_LAUNCH]
                else:
                    pilot_dict[NEW] = pilot_dict[LAUNCHING]

            pilot_dicts.append (pilot_dict)


        for unit in docs['unit']:

            uid         = unit['_id']
            started     = unit.get ('started')
            finished    = unit.get ('finished')
            description = unit.get ('description', dict())

            if started  : started  -= session_start
            if finished : finished -= session_start

            session_dict['n_units'] += 1

            unit_dict = {
                'sid'                  : sid, 
                'pid'                  : unit.get('pilot'), 
                'uid'                  : uid, 
                'started'              : started,
                'finished'             : finished,
                'cores'                : description.get ('cores'),
                'slots'                : unit.get ('slots'),
                NEW                    : None, 
                UNSCHEDULED            : None, 
                PENDING_INPUT_STAGING  : None, 
                STAGING_INPUT          : None, 
                EXECUTING_PENDING      : None,
                SCHEDULING             : None, 
                ALLOCATING             : None, 
                EXECUTING              : None, 
                PENDING_OUTPUT_STAGING : None, 
                STAGING_OUTPUT         : None, 
                DONE                   : None, 
                FAILED                 : None, 
                CANCELED               : None
            }

            for entry in unit.get('statehistory', list()):
                state = entry['state']
                timer = entry['timestamp'] - session_start
                unit_dict[state] = timer

            # FIXME: there is more state messup afloat: some states are missing,
            # even though we know they have happened.  For one, we see data
            # being staged w/o having a record of InputStaging states.  Or we
            # find callback history entries for states which are not in the
            # history...
            #
            # We try to clean up to some extent.  The policy is like this, for
            # any [pending_state, state] pair:
            #
            # - if both are in the hist: great
            # - if one is in the hist, and the other in the cb hist, use like
            #   that, but ensure that pending_state <= state
            # - if both are in cb_hist, use them, apply same ordering assert.
            #   Use median if ordering is wrong
            # - if only on is in cb_host, use the same value for the other one
            # - if neither is anywhere, leave unset
            rec_hist = dict()
            cb_hist  = dict()

            for e in unit.get('statehistory', list()):
                state = e['state']
                timer = e['timestamp'] - session_start
                if state not in rec_hist:
                    rec_hist[state] = list()
                rec_hist[state].append(timer)

            for e in unit.get('callbackhistory', list()):
                state = e['state']
                timer = e['timestamp'] - session_start
                if state not in cb_hist:
                    cb_hist[state] = list()
                cb_hist[state].append(timer)

            statepairs = {STAGING_INPUT  : PENDING_INPUT_STAGING ,
                          STAGING_OUTPUT : PENDING_OUTPUT_STAGING}

            primary_states = [NEW                   ,
                              UNSCHEDULED           ,
                              STAGING_INPUT         ,
                              EXECUTING_PENDING     ,
                              SCHEDULING            ,
                              ALLOCATING            ,
                              EXECUTING             ,
                              STAGING_OUTPUT        ,
                              DONE                  ,
                              CANCELED              ,
                              FAILED                ]

            for state in primary_states:

                pend    = None
                t_state = None
                t_pend  = None

                ts_rec  = rec_hist.get (state) #         state time stamp from state hist
                ts_cb   = cb_hist.get  (state) #         state time stamp from cb    hist
                tp_rec  = None                 # pending state time stamp from state hist
                tp_cb   = None                 # pending state time stamp from cb    hist

                if  state in statepairs:
                    pend   = statepairs[state]
                    tp_rec = rec_hist.get (pend)
                    tp_cb  = cb_hist.get  (pend)

                # try to find a candidate for state timestamp
                if   ts_rec : t_state = ts_rec[0]
                elif ts_cb  : t_state = ts_cb [0]
                elif tp_rec : t_state = tp_rec[0]
                elif tp_cb  : t_state = tp_cb [0]

                # try to find a candidate for pending timestamp
                if   tp_rec : t_pend  = tp_rec[0]
                elif tp_cb  : t_pend  = tp_cb [0]

                # if there is no t_pend, check if there are two state times on
                # record (in the state hist), and if so, reorder
                if pend :
                    if t_state and not t_pend:
                        if ts_rec and len(ts_rec) == 2:
                            t_pend  = min (ts_rec)
                            t_state = max (ts_rec)
                        else:
                            t_pend  = t_state

                # make sure that any pending time comes before state time
                if pend:
                    if t_pend > t_state:
                      # print "%s : %s" % (uid, state)
                        t_med   = (t_pend + t_state) / 2
                        t_pend  = t_med
                        t_state = t_med

                # record the times for the data frame
                unit_dict[state] = t_state

                if pend :
                    unit_dict[pend] = t_pend


            if unit_dict[UNSCHEDULED] and unit_dict[SCHEDULING]:
                unit_dict[UNSCHEDULED] = min(unit_dict[UNSCHEDULED], unit_dict[SCHEDULING])

            if not unit_dict[NEW]:
                if unit_dict[UNSCHEDULED]:
                    unit_dict[NEW] = unit_dict[UNSCHEDULED]
                if unit_dict[SCHEDULING]:
                    unit_dict[NEW] = unit_dict[SCHEDULING]


            unit_dicts.append (unit_dict)
        
        session_dict['finished'] = last_pilot_event
        session_dicts.append (session_dict)

    import pandas 
    session_frame = pandas.DataFrame (session_dicts)
    pilot_frame   = pandas.DataFrame (pilot_dicts)
    unit_frame    = pandas.DataFrame (unit_dicts)

    if mongo:
        mongo.close()

    return session_frame, pilot_frame, unit_frame

Example 50

Project: anaconda
Source File: anaconda_argparse.py
View license
def getArgumentParser(version_string, boot_cmdline=None):
    """Return the anaconda argument parser.

       :param str version_string: The version string, e.g. 23.19.5.
       :param pyanaconda.flags.BootArgs: The boot command line options
       :rtype: AnacondaArgumentParser
    """

    datadir = os.environ.get("ANACONDA_DATADIR", "/usr/share/anaconda")

    # NOTE: for each long option (like '--repo'), AnacondaOptionParser
    # checks the boot arguments for bootarg_prefix+option ('inst.repo').
    # If require_prefix is False, it also accepts the option without the
    # bootarg_prefix ('repo').
    # See anaconda_optparse.py and BootArgs (in flags.py) for details.
    ap = AnacondaArgumentParser(bootarg_prefix="inst.", require_prefix=False)
    help_parser = HelpTextParser(os.path.join(datadir, "anaconda_options.txt"))

    # NOTE: store_false options will *not* get negated when the user does
    # "option=0" on the boot commandline (store_true options do, though).
    # Basically, don't use store_false unless the option starts with "no".

    # YET ANOTHER NOTE: If you change anything here:
    # a) document its usage in docs/boot-options.txt
    # b) be prepared to maintain it for a very long time
    # If this seems like too much trouble, *don't add a new option*!

    # Version
    ap.add_argument('--version', action='version', version="%(prog)s " + version_string)

    class SetCmdlineMode(Action):
        def __call__(self, parser, namespace, values, option_string=None):
            # We need to save both display mode to TEXT and set noninteractive flag
            setattr(namespace, "display_mode",  DisplayModes.TUI)
            setattr(namespace, "noninteractive", True)

    # Interface
    ap.add_argument("-G", "--graphical", dest="display_mode", action="store_const", const=DisplayModes.GUI,
                    default=DisplayModes.GUI, help=help_parser.help_text("graphical"))
    ap.add_argument("-T", "--text", dest="display_mode", action="store_const", const=DisplayModes.TUI,
                    help=help_parser.help_text("text"))
    ap.add_argument("-C", "--cmdline", action=SetCmdlineMode, nargs=0,
                    help=help_parser.help_text("cmdline"))
    ap.add_argument("--noninteractive", dest="noninteractive", action="store_true",
                    help=help_parser.help_text("noninteractive"))

    # Network
    ap.add_argument("--proxy", metavar='PROXY_URL', help=help_parser.help_text("proxy"))

    # Method of operation
    ap.add_argument("-d", "--debug", dest="debug", action="store_true",
                    default=False, help=help_parser.help_text("debug"))
    ap.add_argument("--ks", dest="ksfile", action="store_const",
                    metavar="KICKSTART_URL", const="/run/install/ks.cfg",
                    help=help_parser.help_text("ks"))
    ap.add_argument("--kickstart", dest="ksfile", metavar="KICKSTART_PATH",
                    help=help_parser.help_text("kickstart"))
    ap.add_argument("--rescue", dest="rescue", action="store_true", default=False,
                    help=help_parser.help_text("rescue"))
    ap.add_argument("--armplatform", dest="armPlatform", type=str, metavar="PLATFORM_ID",
                    help=help_parser.help_text("armplatform"))
    ap.add_argument("--multilib", dest="multiLib", action="store_true", default=False,
                    help=help_parser.help_text("multilib"))

    ap.add_argument("-m", "--method", dest="method", default=None, metavar="METHOD",
                    help=help_parser.help_text("method"))
    ap.add_argument("--askmethod", dest="askmethod", action="store_true", default=False,
                    help=help_parser.help_text("askmethod"))
    ap.add_argument("--repo", dest="method", default=None, metavar="REPO_URL",
                    help=help_parser.help_text("repo"))
    ap.add_argument("--stage2", dest="stage2", default=None, metavar="STAGE2_URL",
                    help=help_parser.help_text("stage2"))
    ap.add_argument("--noverifyssl", action="store_true", default=False,
                    help=help_parser.help_text("noverifyssl"))
    ap.add_argument("--liveinst", action="store_true", default=False,
                    help=help_parser.help_text("liveinst"))

    # Display
    ap.add_argument("--resolution", dest="runres", default=None, metavar="WIDTHxHEIGHT",
                    help=help_parser.help_text("resolution"))
    ap.add_argument("--usefbx", dest="xdriver", action="store_const", const="fbdev",
                    help=help_parser.help_text("usefbx"))
    ap.add_argument("--vnc", action="store_true", default=False,
                    help=help_parser.help_text("vnc"))
    ap.add_argument("--vncconnect", metavar="HOST:PORT", help=help_parser.help_text("vncconnect"))
    ap.add_argument("--vncpassword", default="", metavar="PASSWORD",
                    help=help_parser.help_text("vncpassword"))
    ap.add_argument("--xdriver", dest="xdriver", action="store", type=str,
                    default=None, metavar="DRIVER", help=help_parser.help_text("xdriver"))

    # Language
    ap.add_argument("--keymap", metavar="KEYMAP", help=help_parser.help_text("keymap"))
    ap.add_argument("--lang", metavar="LANG", help=help_parser.help_text("lang"))
    ap.add_argument("--singlelang", action="store_true", default=False,
                    help=help_parser.help_text("singlelang"))

    # Obvious
    ap.add_argument("--loglevel", metavar="LEVEL", help=help_parser.help_text("loglevel"))
    ap.add_argument("--syslog", metavar="HOST[:PORT]", help=help_parser.help_text("syslog"))
    ap.add_argument("--remotelog", metavar="HOST:PORT", help=help_parser.help_text("remotelog"))

    from pykickstart.constants import SELINUX_DISABLED, SELINUX_ENFORCING
    from pyanaconda.constants import SELINUX_DEFAULT
    ap.add_argument("--noselinux", dest="selinux", action="store_const",
                    const=SELINUX_DISABLED, default=SELINUX_DEFAULT,
                    help=help_parser.help_text("noselinux"))

    # Use a custom action to convert --selinux=0 and --selinux=1 into the
    # appropriate constants
    class ParseSelinux(Action):
        def __call__(self, parser, namespace, values, option_string=None):
            if values == "0":
                setattr(namespace, self.dest, SELINUX_DISABLED)
            else:
                setattr(namespace, self.dest, SELINUX_ENFORCING)

    ap.add_argument("--selinux", action=ParseSelinux, nargs="?", help=help_parser.help_text("selinux"))

    ap.add_argument("--nompath", dest="mpath", action="store_false", default=True,
                    help=help_parser.help_text("nompath"))
    ap.add_argument("--mpath", action="store_true", help=help_parser.help_text("mpath"))

    ap.add_argument("--nodmraid", dest="dmraid", action="store_false", default=True,
                    help=help_parser.help_text("nodmraid"))
    ap.add_argument("--dmraid", action="store_true", help=help_parser.help_text("dmraid"))

    ap.add_argument("--noibft", dest="ibft", action="store_false", default=True,
                    help=help_parser.help_text("noibft"))
    ap.add_argument("--ibft", action="store_true", help=help_parser.help_text("ibft"))

    # Geolocation
    ap.add_argument("--geoloc", metavar="PROVIDER_ID", help=help_parser.help_text("geoloc"))

    # legacy stuff
    ap.add_argument("--legacygrub", dest="legacygrub", action="store_true",
                    default=False, help=help_parser.help_text("legacygrub"))

    # Kickstart and log saving
    # - use a custom action to convert the values of the nosave option into appropriate flags
    class ParseNosave(Action):
        def __call__(self, parser, namespace, values, option_string=None):
            options = []
            if values:
                options = values.split(",")
            if "all" in options:
                flags_instance.nosave_input_ks = True
                flags_instance.nosave_output_ks = True
                flags_instance.nosave_logs = True
            else:
                if "all_ks" in options:
                    flags_instance.nosave_input_ks = True
                    flags_instance.nosave_output_ks = True
                else:
                    if "input_ks" in options:
                        flags_instance.nosave_input_ks = True
                    if "output_ks" in options:
                        flags_instance.nosave_output_ks = True
                if "logs" in options:
                    flags_instance.nosave_logs = True

    ap.add_argument("--nosave", action=ParseNosave, nargs="?", help=help_parser.help_text("nosave"))

    # Miscellaneous
    ap.add_argument("--nomount", dest="rescue_nomount", action="store_true", default=False,
                    help=help_parser.help_text("nomount"))
    ap.add_argument("--updates", dest="updateSrc", action="store", type=str,
                    metavar="UPDATES_URL", help=help_parser.help_text("updates"))
    ap.add_argument("--image", action="append", dest="images", default=[],
                    metavar="IMAGE_SPEC", help=help_parser.help_text("image"))
    ap.add_argument("--dirinstall", nargs="?",
                    const=os.environ.get("ANACONDA_ROOT_PATH", "/mnt/sysimage"),
                    help=help_parser.help_text("dirinstall"))
    ap.add_argument("--memcheck", action="store_true", default=True,
                    help=help_parser.help_text("memcheck"))
    ap.add_argument("--nomemcheck", action="store_false", dest="memcheck",
                    help=help_parser.help_text("nomemcheck"))
    ap.add_argument("--leavebootorder", action="store_true", default=False,
                    help=help_parser.help_text("leavebootorder"))
    ap.add_argument("--noeject", action="store_false", dest="eject", default=True,
                    help=help_parser.help_text("noeject"))
    ap.add_argument("--extlinux", action="store_true", default=False,
                    help=help_parser.help_text("extlinux"))
    ap.add_argument("--nombr", action="store_true", default=False,
                    help=help_parser.help_text("nombr"))
    ap.add_argument("--mpathfriendlynames", action="store_true", default=True,
                    help=help_parser.help_text("mpathfriendlynames"))
    ap.add_argument("--kexec", action="store_true", default=False,
                    help=help_parser.help_text("kexec"))

    # some defaults change based on cmdline flags
    if boot_cmdline is not None:
        if "console" in boot_cmdline:
            ap.set_defaults(display_mode=DisplayModes.TUI)

    return ap