os.path.expanduser

Here are the examples of the python api os.path.expanduser taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: pinguino-ide
Source File: pinguino_tools.py
View license
    @Debugger.debug_method
    def compile(self, filename):
        """ Compile.

        NB :    "--opt-code-size"   deprecated
                "--use-non-free"    implicit -I and -L options for non-free headers and libs
                "-I" + os.path.join(self.P8_DIR, '..', 'sdcc', 'include', 'pic16'),\
                "-I" + os.path.join(self.P8_DIR, '..', 'sdcc', 'non-free', 'include', 'pic16'),\
        """

        ERROR = {"c": {},
                 "asm": {},}

        board = self.get_board()

        if board.arch == 32: return 0, None

        fichier = open(os.path.join(os.path.expanduser(self.SOURCE_DIR), "stdout"), "w+")

        user_imports = self.get_user_imports_p8()
        #for lib_dir in self.USER_P8_LIBS:
            #user_imports.append("-I" + lib_dir)

        if board.bldr == 'boot2':
            sortie = Popen([self.COMPILER_8BIT,
                "--verbose",\
                "-mpic16",\
                "--denable-peeps",\
                "--obanksel=9",\
                "--optimize-cmp",\
                "--optimize-df",\
                "-p" + board.proc,\
                "-D" + board.board,\
                "-D" + board.bldr,\
                "-DBOARD=\"" + board.board + "\"",\
                "-DPROC=\"" + board.proc + "\"",\
                "-DBOOT_VER=2",\
                "--use-non-free",\
                "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'core'),\
                "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'libraries'),\
                "-I" + os.path.dirname(filename),\
                "--compile-only",\
                "-o" + os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.o'),\
                os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.c')] + user_imports,\
                stdout=fichier, stderr=STDOUT)


        elif board.bldr == 'boot4':
            sortie = Popen([self.COMPILER_8BIT,
                "--verbose",\
                "-mpic16",\
                "--denable-peeps",\
                "--obanksel=9",\
                "--optimize-cmp",\
                "--optimize-df",\
                # Do not remove --ivt-loc option
                "--ivt-loc=" + str(board.memstart),\
                "-p" + board.proc,\
                "-D" + board.board,\
                "-D" + board.bldr,\
                "-DBOARD=\"" + board.board + "\"",\
                "-DPROC=\"" + board.proc + "\"",\
                "-DBOOT_VER=4",\
                "--use-non-free",\
                "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'core'),\
                "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'libraries'),\
                "-I" + os.path.dirname(filename),\
                "--compile-only",\
                os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.c'),\
                "-o" + os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.o')] + user_imports,\
                stdout=fichier, stderr=STDOUT)

        elif board.bldr == 'noboot':
            sortie = Popen([self.COMPILER_8BIT,
                "--verbose",\
                "-mpic16",\
                "--denable-peeps",\
                "--obanksel=9",\
                "--optimize-cmp",\
                "--optimize-df",\
                "-p" + board.proc,\
                "-D" + board.board,\
                "-D" + board.bldr,\
                "-DBOARD=\"" + board.board + "\"",\
                "-DPROC=\"" + board.proc + "\"",\
                "-DBOOT_VER=0",\
                "--use-non-free",\
                "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'core'),\
                "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'libraries'),\
                "-I" + os.path.dirname(filename),\
                "--compile-only",\
                os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.c'),\
                "-o" + os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.o')] + user_imports,\
                stdout=fichier, stderr=STDOUT)


        sortie.communicate()
        if sortie.poll()!=0:
            #
            # Error treatment (RB: fixed 2012-11-15)
            #

            # set the file pointer to the beginning of stdout
            fichier.seek(0)

            # read lines until 'error' or 'Error' is found
            lines = fichier.readlines()
            errors_c = []
            errors_asm = []
            for ligne in lines:
                # C errors
                error_pos = ligne.find('error')
                if (error_pos != -1):
                    if os.name == "nt": err = 2
                    else: err = 1
                    error_line_number = ligne.split(":")[err]
                    error_message = ligne.split(":")[-1]
                    if error_line_number.isdigit():
                        errors_c.append({"line_number": error_line_number,
                                         "message": error_message,})

                # ASM errors
                error_pos = ligne.find('Error')
                if (error_pos != -1):
                    # do not display error line number since they are from the ASM file
                    # display error symbol instead
                    error_symbol = ligne[ligne.find("(_") + 2 : ligne.find(").")]
                    error_message = ligne[ error_pos + 13 : ligne.find("(_") - 1]
                    errors_asm.append(error_symbol)

            ERROR["c"] = errors_c
            ERROR["asm"] = errors_asm

        fichier.close()
        return sortie.poll(), ERROR

Example 2

Project: cookiecutter
Source File: conftest.py
View license
@pytest.fixture(scope='function')
def clean_system(request):
    """
    Fixture that simulates a clean system with no config/cloned cookiecutters.

    It runs code which can be regarded as setup code as known from a unittest
    TestCase. Additionally it defines a local function referring to values
    which have been stored to local variables in the setup such as the location
    of the cookiecutters on disk. This function is registered as a teardown
    hook with `request.addfinalizer` at the very end of the fixture. Pytest
    runs the named hook as soon as the fixture is out of scope, when the test
    finished to put it another way.

    During setup:

    * Back up the `~/.cookiecutterrc` config file to `~/.cookiecutterrc.backup`
    * Back up the `~/.cookiecutters/` dir to `~/.cookiecutters.backup/`
    * Back up the `~/.cookiecutter_replay/` dir to
      `~/.cookiecutter_replay.backup/`
    * Starts off a test case with no pre-existing `~/.cookiecutterrc` or
      `~/.cookiecutters/` or `~/.cookiecutter_replay/`

    During teardown:

    * Delete `~/.cookiecutters/` only if a backup is present at
      `~/.cookiecutters.backup/`
    * Delete `~/.cookiecutter_replay/` only if a backup is present at
      `~/.cookiecutter_replay.backup/`
    * Restore the `~/.cookiecutterrc` config file from
      `~/.cookiecutterrc.backup`
    * Restore the `~/.cookiecutters/` dir from `~/.cookiecutters.backup/`
    * Restore the `~/.cookiecutter_replay/` dir from
      `~/.cookiecutter_replay.backup/`

    """

    # If ~/.cookiecutterrc is pre-existing, move it to a temp location
    user_config_path = os.path.expanduser('~/.cookiecutterrc')
    user_config_path_backup = os.path.expanduser(
        '~/.cookiecutterrc.backup'
    )
    if os.path.exists(user_config_path):
        user_config_found = True
        shutil.copy(user_config_path, user_config_path_backup)
        os.remove(user_config_path)
    else:
        user_config_found = False

    # If the default cookiecutters_dir is pre-existing, move it to a
    # temp location
    cookiecutters_dir = os.path.expanduser('~/.cookiecutters')
    cookiecutters_dir_backup = os.path.expanduser('~/.cookiecutters.backup')
    cookiecutters_dir_found = backup_dir(
        cookiecutters_dir, cookiecutters_dir_backup
    )

    # If the default cookiecutter_replay_dir is pre-existing, move it to a
    # temp location
    cookiecutter_replay_dir = os.path.expanduser('~/.cookiecutter_replay')
    cookiecutter_replay_dir_backup = os.path.expanduser(
        '~/.cookiecutter_replay.backup'
    )
    cookiecutter_replay_dir_found = backup_dir(
        cookiecutter_replay_dir, cookiecutter_replay_dir_backup
    )

    def restore_backup():
        # If it existed, restore ~/.cookiecutterrc
        # We never write to ~/.cookiecutterrc, so this logic is simpler.
        if user_config_found and os.path.exists(user_config_path_backup):
            shutil.copy(user_config_path_backup, user_config_path)
            os.remove(user_config_path_backup)

        # Carefully delete the created ~/.cookiecutters dir only in certain
        # conditions.
        restore_backup_dir(
            cookiecutters_dir,
            cookiecutters_dir_backup,
            cookiecutters_dir_found
        )

        # Carefully delete the created ~/.cookiecutter_replay dir only in
        # certain conditions.
        restore_backup_dir(
            cookiecutter_replay_dir,
            cookiecutter_replay_dir_backup,
            cookiecutter_replay_dir_found
        )

    request.addfinalizer(restore_backup)

Example 3

Project: arkc-server
Source File: main.py
View license
def main():
    parser = argparse.ArgumentParser(description=None)
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="show detailed logs")
    parser.add_argument("-vv", action="store_true", dest="vv",
                        help="show debug logs")
    parser.add_argument(
        "--version", dest="version", action="store_true", help="show version number")
    parser.add_argument('-kg', '--keygen', dest="kg", action="store_true",
                        help="Generate a key string and quit, overriding other options")
    parser.add_argument('--get-meek', dest="dlmeek", action="store_true",
                        help="Download meek to home directory, overriding normal options")
    parser.add_argument('-c', '--config', dest="config", default=None,
                        help="specify a configuration files, required for ArkC to start")
    parser.add_argument("-t", action="store_true", dest="transmit",
                        help="use transmit server")

    parser.add_argument('-ep', "--use-external-proxy", action="store_true",
                        help="""use an external proxy server or handler running locally,e.g. polipo, for better performance.
Use this option to support other types of proxy other than HTTP, or use authentication at client-end proxy.
Fall back to in-built python proxy server otherwise.""")
    print(
        """ArkC Server V""" + VERSION + """ by ArkC Technology.
The programs is distributed under GNU General Public License Version 2.
""")

    args = parser.parse_args()
    if args.version:
        print("ArkC Server Version " + VERSION)
        sys.exit()
    elif args.kg:
        print("Generating 2048 bit RSA key.")
        print("Writing to home directory " + os.path.expanduser('~'))
        generate_RSA(os.path.expanduser(
            '~') + os.sep + 'arkc_pri.asc', os.path.expanduser('~') + os.sep + 'arkc_pub.asc')
        print(
            "Please save the above settings to client and server side config files.")
        sys.exit()
    elif args.dlmeek:
        if sys.platform == 'linux2':
            link = "https://github.com/projectarkc/meek/releases/download/v0.2.2/meek-client"
            localfile = os.path.expanduser('~') + os.sep + "meek-client"
        elif sys.platform == 'win32':
            link = "https://github.com/projectarkc/meek/releases/download/v0.2.2/meek-client.exe"
            localfile = os.path.expanduser('~') + os.sep + "meek-client.exe"
        else:
            print(
                "MEEK for ArkC has no compiled executable for your OS platform. Please compile and install from source.")
            print(
                "Get source at https://github.com/projectarkc/meek/tree/master/meek-client")
            sys.exit()
        print(
            "Downloading meek plugin (meek-client) from github to " + localfile)
        urllib.urlretrieve(link, localfile)
        if sys.platform == 'linux2':
            st = os.stat(localfile)
            os.chmod(localfile, st.st_mode | stat.S_IEXEC)
            print("File made executable.")
        print("Finished. If no error, you may change obfs_level and update pt_exec to " +
              localfile + " to use meek.")
        sys.exit()
    elif args.config is None:
        logging.fatal("Config file (-c or --config) must be specified.\n")
        parser.print_help()
        sys.exit()

    # mapping client public sha1 --> (RSA key object, client private sha1)
    certs = dict()

    data = {}

    # Load json configuration file
    try:
        data_file = open(args.config)
        data = json.load(data_file)
        data_file.close()
    except Exception as err:
        logging.error("Fatal error while loading configuration file.")
        print(err)  # TODO: improve error processing
        sys.exit()

    try:
        for client in data["clients"]:
            with open(client[0], "r") as f:
                remote_cert_txt = f.read()
                remote_cert = RSA.importKey(remote_cert_txt)
                remote_cert_txt = remote_cert_txt.strip(
                    ' ').lstrip('\n')
                certs[sha1(remote_cert_txt).hexdigest()] =\
                     [remote_cert, client[1]]
    except KeyError:
        pass
    except Exception as err:
        print ("Fatal error while loading client certificate.")
        print (err)
        sys.exit()

    try:
        certsdbpath = data["clients_db"]
    except KeyError:
        certsdbpath = None

    try:
        certs_db = certstorage(certs, certsdbpath)
    except Exception as err:
        print ("Fatal error while loading clients' certificate.")
        print (err)
        sys.exit()

    if args.transmit:
        try:
            with open(data["central_cert"], "r") as f:
                central_cert_txt = f.read()
                central_cert = RSA.importKey(central_cert_txt)
        except Exception as err:
            print ("Fatal error while loading client certificate.")
            print (err)
            sys.exit()
    else:
        central_cert = None

    try:
        with open(data["local_cert_path"], "r") as f:
            local_cert = RSA.importKey(f.read())
        if not local_cert.has_private():
            print("Fatal error, no private key included in local certificate.")
    except IOError as err:
        print ("Fatal error while loading local certificate.")
        print (err)
        sys.exit()

    if args.vv:
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
                            format="%(levelname)s: %(asctime)s; %(message)s")
    elif args.verbose:
        logging.basicConfig(stream=sys.stdout, level=logging.INFO,
                            format="%(levelname)s: %(asctime)s; %(message)s")
    else:
        logging.basicConfig(stream=sys.stdout, level=logging.WARNING,
                            format="%(levelname)s: %(asctime)s; %(message)s")

    if not args.use_external_proxy:
        if "proxy_port" not in data:
            data["proxy_port"] = 8100
        start_proxy(data["proxy_port"])
    else:
        if "proxy_port" not in data:
            data["proxy_port"] = 8123

    if "udp_port" not in data:
        if args.transmit:
            data["udp_port"] = 8000
        else:
            data["udp_port"] = 53

    if "socks_proxy" not in data:
        data["socks_proxy"] = None

    if "delegated_domain" not in data:
        data["delegated_domain"] = "public.arkc.org"

    if "self_domain" not in data:
        data["self_domain"] = "freedom.arkc.org"

    if "pt_exec" not in data:
        data["pt_exec"] = "obfs4proxy"

    if "obfs_level" not in data:
        data["obfs_level"] = 0
    elif 1 <= int(data["obfs_level"]) <= 2:
        logging.error(
            "Support for obfs4proxy is experimental with known bugs. Run this mode at your own risk.")

    if "meek_url" not in data:
        data["meek_url"] = "https://arkc-reflect1.appspot.com/"

    # Start the loop
    try:
        reactor.listenUDP(
            data["udp_port"],
            Coordinator(
                data["proxy_port"],
                data["socks_proxy"],
                local_cert,
                certs_db,
                central_cert,
                data["delegated_domain"],
                data["self_domain"],
                data["pt_exec"],
                data["obfs_level"],
                data["meek_url"],
                args.transmit
            )
        )
    except CannotListenError as err:
        print(err.socketError)
        if data["udp_port"] <= 1024 and str(err.socketError) == "[Errno 13] \
                Permission denied":
            print("root privilege may be required to listen to low ports")
        exit()

    try:
        reactor.run()
    except KeyboardInterrupt:
        pass

Example 4

Project: radssh
Source File: shell.py
View license
def radssh_shell_main():
    args = sys.argv[1:]
    defaults = config.load_settings()
    # Keep command line options separately, for reuse in sshconfig defaults
    cmdline_options = config.command_line_settings(args, defaults.get('user.settings'))
    defaults.update(cmdline_options)

    if 'socket.timeout' in defaults:
        socket.setdefaulttimeout(float(defaults['socket.timeout']))

    # Setup Logging
    logformat = '%(asctime)s %(levelname)-8s [%(name)s:%(thread)08X] %(message)s'
    logdir = os.path.expanduser(time.strftime(defaults.get('logdir', '')))
    if logdir:
        if not os.path.exists(logdir):
            os.mkdir(logdir)
        logging.basicConfig(filename=os.path.join(logdir, 'radssh.log'),
                            format=logformat)
    else:
        logging.basicConfig(format=logformat)
        pass
    try:
        logging.getLogger().setLevel(getattr(logging, defaults['loglevel'].upper()))
    except AttributeError:
        raise RuntimeError('RadSSH setting "loglevel" should be set to one of [CRITICAL,ERROR,WARNING,INFO,DEBUG] instead of "%s"', defaults['loglevel'])
    logger = logging.getLogger('radssh')

    # With logging setup, output any deferred warnings
    for w in paramiko_load_warnings:
        logger.warning(warnings.formatwarning(w.message, w.category, w.filename, w.lineno))

    # Make an AuthManager to handle user authentication
    a = ssh.AuthManager(defaults['username'],
                        auth_file=os.path.expanduser(defaults['authfile']),
                        try_auth_none=(defaults['try_auth_none'] == 'on'))

    # Load Plugins to aid in host lookups and add *commands dynamically
    loaded_plugins = {}
    exe_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
    system_plugin_dir = os.path.join(exe_dir, 'plugins')
    disable_plugins = defaults['disable_plugins'].split(',')
    plugin_dirs = [x for x in defaults['plugins'].split(';') if x]
    plugin_dirs.append(system_plugin_dir)

    for x in plugin_dirs:
        plugin_dir = os.path.abspath(os.path.expanduser(x))
        if not os.path.exists(plugin_dir):
            continue
        for module in sorted(os.listdir(plugin_dir)):
            if module.endswith('.py') and not module.startswith('__'):
                plugin = module[:-3]
                # Skip modules found in more that 1 location, and ones explicitly disabled
                if plugin in loaded_plugins or plugin in disable_plugins:
                    continue
                try:
                    logger.info('Loading plugin module: %s', plugin)
                    this_plugin = radssh.plugins.load_plugin(os.path.join(plugin_dir, module))
                    if hasattr(this_plugin, 'init'):
                        logger.debug('Calling init method for plugin: %s', plugin)
                        this_plugin.init(defaults=defaults, auth=a, plugins=loaded_plugins, star_commands=star.commands, shell=shell)
                    if hasattr(this_plugin, 'star_commands'):
                        logger.debug('Registering *commands for plugin: %s %s', plugin, this_plugin.star_commands.keys())
                        star.commands.update(this_plugin.star_commands)
                    if hasattr(this_plugin, 'settings'):
                        prefix = 'plugin.%s.' % plugin
                        user_settings = {}
                        user_settings = dict([(k[len(prefix):], v) for k, v in defaults.items() if k.startswith(prefix)])
                        logger.info('Updating settings for plugin %s with: %s', plugin, user_settings)
                        this_plugin.settings.update(user_settings)
                    if hasattr(this_plugin, 'command_listener'):
                        command_listeners.append(this_plugin.command_listener)
                    loaded_plugins[plugin] = this_plugin

                except Exception as e:
                    logger.error('Failed to load plugin (%s): %s', plugin, repr(e))

    # Use command line args as connect list, or give user option to supply list now
    if not args:
        print('No command line arguments given.')
        print('You can connect to a number of hosts by hostname or IP')
        if loaded_plugins:
            print('You can also give symbolic names that can be translated by')
            print('the following loaded plugins:')
            for module, plugin in loaded_plugins.items():
                try:
                    lookup_doc = plugin.lookup.__doc__
                    print(module, plugin.__doc__)
                    print('\t%s' % lookup_doc)
                    try:
                        plugin.banner()
                    except AttributeError:
                        pass
                except AttributeError:
                    pass
        connect_list = raw_input('Enter a list of connection destinations: ').split()
    else:
        connect_list = args

    if not connect_list:
        sys.exit(0)

    # Do the connections if needed, offer names to plugin lookup() functions
    hosts = []
    for arg in connect_list:
        for helper, resolver in loaded_plugins.items():
            if hasattr(resolver, 'lookup'):
                try:
                    cluster = resolver.lookup(arg)
                    if cluster:
                        logger.debug('%s expanded by %s', arg, helper)
                        for label, host, conn in cluster:
                            if conn:
                                hosts.append((label, conn))
                            else:
                                hosts.append((label, host))
                        break
                except Exception as e:
                    logger.error('Exception looking up %s via %s: %r', arg, helper, e)
                    cluster = None
        else:
            hosts.append((arg, None))

    # Almost done with all the preliminary setup steps...
    if defaults['loglevel'] not in ('CRITICAL', 'ERROR'):
        print('*** Parallel Shell ***')
        print('Using AuthManager:', a)
        print('Logging to %s' % logdir)
        pprint.pprint(defaults, indent=4)
        print()
        star.star_help()

    # Create a RadSSHConsole instance for screen output
    job_buffer = int(defaults['stalled_job_buffer'])
    if defaults['shell.console'] != 'color' or not sys.stdout.isatty():
        console = RadSSHConsole(formatter=monochrome, retain_recent=job_buffer)
    else:
        console = RadSSHConsole(retain_recent=job_buffer)

    # Finally, we are able to create the Cluster
    print('Connecting to %d hosts...' % len(hosts))
    cluster = ssh.Cluster(hosts, auth=a, console=console, defaults=defaults)
    if defaults['loglevel'] not in ('CRITICAL', 'ERROR'):
        star.star_info(cluster, logdir, '', [])
    else:
        # If cluster is not 100% connected, let user know even if loglevel is not low enough
        ready, disabled, failed_auth, failed_connect, dropped = cluster.connection_summary()
        if any((failed_auth, failed_connect, dropped)):
            print('There were problems connecting to some nodes:')
            if failed_connect:
                print('    %d nodes failed to connect' % failed_connect)
            if failed_auth:
                print('    %d nodes failed authentication' % failed_auth)
            if dropped:
                print('    %d dropped connections' % dropped)
            print('    Use "*info" for connection details.')

    # Command line history support
    if defaults.get('historyfile'):
        histfile = os.path.expanduser(defaults['historyfile'])
        try:
            readline.read_history_file(histfile)
        except IOError:
            pass
        readline.set_history_length(int(os.environ.get('HISTSIZE', 1000)))
        atexit.register(readline.write_history_file, histfile)

    # Add TAB completion for *commands and remote file paths
    tab_completion = radssh_tab_handler(cluster, star)

    # With the cluster object, start interactive session
    shell(cluster=cluster, logdir=logdir, defaults=defaults)

Example 5

Project: bleachbit
Source File: Cleaner.py
View license
    def get_commands(self, option_id):
        # This variable will collect fully expanded file names, and
        # at the end of this function, they will be checked they exist
        # and processed through Command.Delete().
        files = []

        # cache
        if 'posix' == os.name and 'cache' == option_id:
            dirname = os.path.expanduser("~/.cache/")
            for filename in children_in_directory(dirname, True):
                if self.whitelisted(filename):
                    continue
                files += [filename]

        # custom
        if 'custom' == option_id:
            for (c_type, c_path) in options.get_custom_paths():
                if 'file' == c_type:
                    files += [c_path]
                elif 'folder' == c_type:
                    files += [c_path]
                    for path in children_in_directory(c_path, True):
                        files += [path]
                else:
                    raise RuntimeError(
                        'custom folder has invalid type %s' % c_type)

        # menu
        menu_dirs = ['~/.local/share/applications',
                     '~/.config/autostart',
                     '~/.gnome/apps/',
                     '~/.gnome2/panel2.d/default/launchers',
                     '~/.gnome2/vfolders/applications/',
                     '~/.kde/share/apps/RecentDocuments/',
                     '~/.kde/share/mimelnk',
                     '~/.kde/share/mimelnk/application/ram.desktop',
                     '~/.kde2/share/mimelnk/application/',
                     '~/.kde2/share/applnk']

        if 'posix' == os.name and 'desktop_entry' == option_id:
            for dirname in menu_dirs:
                for filename in [fn for fn in children_in_directory(dirname, False)
                                 if fn.endswith('.desktop')]:
                    if Unix.is_broken_xdg_desktop(filename):
                        yield Command.Delete(filename)

        # unwanted locales
        if 'posix' == os.name and 'localizations' == option_id:
            for path in Unix.locales.localization_paths(locales_to_keep=options.get_languages()):
                if os.path.isdir(path):
                    for f in FileUtilities.children_in_directory(path, True):
                        yield Command.Delete(f)
                yield Command.Delete(path)

        # Windows logs
        if 'nt' == os.name and 'logs' == option_id:
            paths = (
                '$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\*.log',
                '$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\user.dmp',
                '$LocalAppData\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
                '$LocalAppData\\Microsoft\\Windows\WER\\ReportQueue\\*\\*',
                '$programdata\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
                '$programdata\\Microsoft\\Windows\\WER\\ReportQueue\\*\\*',
                '$localappdata\\Microsoft\\Internet Explorer\\brndlog.bak',
                '$localappdata\\Microsoft\\Internet Explorer\\brndlog.txt',
                '$windir\\*.log',
                '$windir\\imsins.BAK',
                '$windir\\OEWABLog.txt',
                '$windir\\SchedLgU.txt',
                '$windir\\ntbtlog.txt',
                '$windir\\setuplog.txt',
                '$windir\\REGLOCS.OLD',
                '$windir\\Debug\\*.log',
                '$windir\\Debug\\Setup\\UpdSh.log',
                '$windir\\Debug\\UserMode\\*.log',
                '$windir\\Debug\\UserMode\\ChkAcc.bak',
                '$windir\\Debug\\UserMode\\userenv.bak',
                '$windir\\Microsoft.NET\Framework\*\*.log',
                '$windir\\pchealth\\helpctr\\Logs\\hcupdate.log',
                '$windir\\security\\logs\\*.log',
                '$windir\\security\\logs\\*.old',
                '$windir\\SoftwareDistribution\\*.log',
                '$windir\\SoftwareDistribution\\DataStore\\Logs\\*',
                '$windir\\system32\\TZLog.log',
                '$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.bak',
                '$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.txt',
                '$windir\\system32\\LogFiles\\AIT\\AitEventLog.etl.???',
                '$windir\\system32\\LogFiles\\Firewall\\pfirewall.log*',
                '$windir\\system32\\LogFiles\\Scm\\SCM.EVM*',
                '$windir\\system32\\LogFiles\\WMI\\Terminal*.etl',
                '$windir\\system32\\LogFiles\\WMI\\RTBackup\EtwRT.*etl',
                '$windir\\system32\\wbem\\Logs\\*.lo_',
                '$windir\\system32\\wbem\\Logs\\*.log', )

            for path in paths:
                expanded = expandvars(path)
                for globbed in glob.iglob(expanded):
                    files += [globbed]

        # memory
        if sys.platform.startswith('linux') and 'memory' == option_id:
            yield Command.Function(None, Memory.wipe_memory, _('Memory'))

        # memory dump
        # how to manually create this file
        # http://www.pctools.com/guides/registry/detail/856/
        if 'nt' == os.name and 'memory_dump' == option_id:
            fname = expandvars('$windir\\memory.dmp')
            if os.path.exists(fname):
                files += [fname]
            for fname in glob.iglob(expandvars('$windir\\Minidump\\*.dmp')):
                files += [fname]

        # most recently used documents list
        if 'posix' == os.name and 'recent_documents' == option_id:
            files += [os.path.expanduser("~/.recently-used")]
            # GNOME 2.26 (as seen on Ubuntu 9.04) will retain the list
            # in memory if it is simply deleted, so it must be shredded
            # (or at least truncated).
            #
            # GNOME 2.28.1 (Ubuntu 9.10) and 2.30 (10.04) do not re-read
            # the file after truncation, but do re-read it after
            # shredding.
            #
            # https://bugzilla.gnome.org/show_bug.cgi?id=591404
            for pathname in ["~/.recently-used.xbel", "~/.local/share/recently-used.xbel"]:
                pathname = os.path.expanduser(pathname)
                if os.path.lexists(pathname):
                    yield Command.Shred(pathname)
                    if HAVE_GTK:
                        gtk.RecentManager().purge_items()

        if 'posix' == os.name and 'rotated_logs' == option_id:
            for path in Unix.rotated_logs():
                yield Command.Delete(path)

        # temporary files
        if 'posix' == os.name and 'tmp' == option_id:
            dirnames = ['/tmp', '/var/tmp']
            for dirname in dirnames:
                for path in children_in_directory(dirname, True):
                    is_open = FileUtilities.openfiles.is_open(path)
                    ok = not is_open and os.path.isfile(path) and \
                        not os.path.islink(path) and \
                        FileUtilities.ego_owner(path) and \
                        not self.whitelisted(path)
                    if ok:
                        yield Command.Delete(path)

        # temporary files
        if 'nt' == os.name and 'tmp' == option_id:
            dirname = expandvars(
                "$USERPROFILE\\Local Settings\\Temp\\")
            # whitelist the folder %TEMP%\Low but not its contents
            # https://bugs.launchpad.net/bleachbit/+bug/1421726
            low = os.path.join(dirname, 'low').lower()
            for filename in children_in_directory(dirname, True):
                if not low == filename.lower():
                    yield Command.Delete(filename)
            dirname = expandvars("$windir\\temp\\")
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)

        # trash
        if 'posix' == os.name and 'trash' == option_id:
            dirname = os.path.expanduser("~/.Trash")
            for filename in children_in_directory(dirname, False):
                yield Command.Delete(filename)
            # fixme http://www.ramendik.ru/docs/trashspec.html
            # http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html
            # ~/.local/share/Trash
            # * GNOME 2.22, Fedora 9
            # * KDE 4.1.3, Ubuntu 8.10
            dirname = os.path.expanduser("~/.local/share/Trash/files")
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)
            dirname = os.path.expanduser("~/.local/share/Trash/info")
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)
            dirname = os.path.expanduser("~/.local/share/Trash/expunged")
            # [email protected] tells me that the trash
            # backend puts files in here temporary, but in some situations
            # the files are stuck.
            for filename in children_in_directory(dirname, True):
                yield Command.Delete(filename)

        # clipboard
        if HAVE_GTK and 'clipboard' == option_id:
            def clear_clipboard():
                gtk.gdk.threads_enter()
                clipboard = gtk.clipboard_get()
                clipboard.set_text("")
                gtk.gdk.threads_leave()
                return 0
            yield Command.Function(None, clear_clipboard, _('Clipboard'))

        # overwrite free space
        shred_drives = options.get_list('shred_drives')
        if 'free_disk_space' == option_id and shred_drives:
            for pathname in shred_drives:
                # TRANSLATORS: 'Free' means 'unallocated.'
                # %s expands to a path such as C:\ or /tmp/
                display = _("Overwrite free disk space %s") % pathname

                def wipe_path_func():
                    for ret in FileUtilities.wipe_path(pathname, idle=True):
                        # Yield control to GTK idle because this process
                        # is very slow.  Also display progress.
                        yield ret
                    yield 0
                yield Command.Function(None, wipe_path_func, display)

        # MUICache
        if 'nt' == os.name and 'muicache' == option_id:
            keys = (
                'HKCU\\Software\\Microsoft\\Windows\\ShellNoRoam\\MUICache',
                'HKCU\\Software\\Classes\\Local Settings\\Software\\Microsoft\\Windows\\Shell\\MuiCache')
            for key in keys:
                yield Command.Winreg(key, None)

        # prefetch
        if 'nt' == os.name and 'prefetch' == option_id:
            for path in glob.iglob(expandvars('$windir\\Prefetch\\*.pf')):
                yield Command.Delete(path)

        # recycle bin
        if 'nt' == os.name and 'recycle_bin' == option_id:
            # This method allows shredding
            for path in Windows.get_recycle_bin():
                yield Command.Delete(path)
            # If there were any files deleted, Windows XP will show the
            # wrong icon for the recycle bin indicating it is not empty.
            # The icon will be incorrect until logging in to Windows again
            # or until it is emptied using the Windows API call for emptying
            # the recycle bin.

            # Windows 10 refreshes the recycle bin icon when the user
            # opens the recycle bin folder.

            # This is a hack to refresh the icon.
            import tempfile
            tmpdir = tempfile.mkdtemp()
            Windows.move_to_recycle_bin(tmpdir)
            try:
                Windows.empty_recycle_bin(None, True)
            except:
                logger = logging.getLogger(__name__)
                logger.info('error in empty_recycle_bin()', exc_info=True)

        # Windows Updates
        if 'nt' == os.name and 'updates' == option_id:
            for wu in Windows.delete_updates():
                yield wu

        # return queued files
        for filename in files:
            if os.path.lexists(filename):
                yield Command.Delete(filename)

Example 6

Project: mock
Source File: util.py
View license
@traceLog()
def set_config_opts_per_cmdline(config_opts, options, args):
    "takes processed cmdline args and sets config options."
    config_opts['verbose'] = options.verbose
    config_opts['print_main_output'] = config_opts['verbose'] > 0 and sys.stderr.isatty()

    # do some other options and stuff
    if options.arch:
        config_opts['target_arch'] = options.arch
    if options.rpmbuild_arch:
        config_opts['rpmbuild_arch'] = options.rpmbuild_arch
    elif config_opts['rpmbuild_arch'] is None:
        config_opts['rpmbuild_arch'] = config_opts['target_arch']

    if not options.clean:
        config_opts['clean'] = options.clean

    if not options.check:
        config_opts['check'] = options.check

    if options.post_install:
        config_opts['post_install'] = options.post_install

    for option in options.rpmwith:
        options.rpmmacros.append("_with_%s --with-%s" %
                                 (option.replace("-", "_"), option))

    for option in options.rpmwithout:
        options.rpmmacros.append("_without_%s --without-%s" %
                                 (option.replace("-", "_"), option))

    for macro in options.rpmmacros:
        try:
            macro = macro.strip()
            k, v = macro.split(" ", 1)
            if not k.startswith('%'):
                k = '%%%s' % k
            config_opts['macros'].update({k: v})
        except:
            raise exception.BadCmdline(
                "Bad option for '--define' (%s).  Use --define 'macro expr'"
                % macro)

    if options.macrofile:
        config_opts['macrofile'] = os.path.expanduser(options.macrofile)
        if not os.path.isfile(config_opts['macrofile']):
            raise exception.BadCmdline(
                "Input rpm macros file does not exist: %s"
                % options.macrofile)

    if options.resultdir:
        config_opts['resultdir'] = os.path.expanduser(options.resultdir)
    if options.rootdir:
        config_opts['rootdir'] = os.path.expanduser(options.rootdir)
    if options.uniqueext:
        config_opts['unique-ext'] = options.uniqueext
    if options.rpmbuild_timeout is not None:
        config_opts['rpmbuild_timeout'] = options.rpmbuild_timeout

    for i in options.disabled_plugins:
        if i not in config_opts['plugins']:
            raise exception.BadCmdline(
                "Bad option for '--disable-plugin=%s'. Expecting one of: %s"
                % (i, config_opts['plugins']))
        config_opts['plugin_conf']['%s_enable' % i] = False
    for i in options.enabled_plugins:
        if i not in config_opts['plugins']:
            raise exception.BadCmdline(
                "Bad option for '--enable-plugin=%s'. Expecting one of: %s"
                % (i, config_opts['plugins']))
        config_opts['plugin_conf']['%s_enable' % i] = True
    for option in options.plugin_opts:
        try:
            p, kv = option.split(":", 1)
            k, v = kv.split("=", 1)
        except:
            raise exception.BadCmdline(
                "Bad option for '--plugin-option' (%s).  Use --plugin-option 'plugin:key=value'"
                % option)
        if p not in config_opts['plugins']:
            raise exception.BadCmdline(
                "Bad option for '--plugin-option' (%s).  No such plugin: %s"
                % (option, p))
        try:
            v = literal_eval(v)
        except:
            pass
        config_opts['plugin_conf'][p + "_opts"].update({k: v})

    global USE_NSPAWN
    USE_NSPAWN = config_opts['use_nspawn']
    if options.old_chroot:
        USE_NSPAWN = False
    if options.new_chroot:
        USE_NSPAWN = True

    if options.mode in ("rebuild",) and len(args) > 1 and not options.resultdir:
        raise exception.BadCmdline(
            "Must specify --resultdir when building multiple RPMS.")

    if options.cleanup_after is False:
        config_opts['cleanup_on_success'] = False
        config_opts['cleanup_on_failure'] = False

    if options.cleanup_after is True:
        config_opts['cleanup_on_success'] = True
        config_opts['cleanup_on_failure'] = True

    check_config(config_opts)
    # can't cleanup unless resultdir is separate from the root dir
    rootdir = os.path.join(config_opts['basedir'], config_opts['root'])
    if is_in_dir(config_opts['resultdir'] % config_opts, rootdir):
        config_opts['cleanup_on_success'] = False
        config_opts['cleanup_on_failure'] = False

    config_opts['cache_alterations'] = options.cache_alterations

    config_opts['online'] = options.online

    if options.pkg_manager:
        config_opts['package_manager'] = options.pkg_manager
    if options.mode == 'yum-cmd':
        config_opts['package_manager'] = 'yum'
    if options.mode == 'dnf-cmd':
        config_opts['package_manager'] = 'dnf'

    if options.short_circuit:
        config_opts['short_circuit'] = options.short_circuit
        config_opts['clean'] = False

    if options.rpmbuild_opts:
        config_opts['rpmbuild_opts'] = options.rpmbuild_opts

    config_opts['enable_disable_repos'] = options.enable_disable_repos

    if options.scm:
        try:
            from . import scm
        except ImportError as e:
            raise exception.BadCmdline(
                "Mock SCM module not installed: %s" % e)

        config_opts['scm'] = options.scm
        for option in options.scm_opts:
            try:
                k, v = option.split("=", 1)
                config_opts['scm_opts'].update({k: v})
            except:
                raise exception.BadCmdline(
                    "Bad option for '--scm-option' (%s).  Use --scm-option 'key=value'"
                    % option)

Example 7

Project: mock
Source File: util.py
View license
@traceLog()
def set_config_opts_per_cmdline(config_opts, options, args):
    "takes processed cmdline args and sets config options."
    config_opts['verbose'] = options.verbose
    config_opts['print_main_output'] = config_opts['verbose'] > 0 and sys.stderr.isatty()

    # do some other options and stuff
    if options.arch:
        config_opts['target_arch'] = options.arch
    if options.rpmbuild_arch:
        config_opts['rpmbuild_arch'] = options.rpmbuild_arch
    elif config_opts['rpmbuild_arch'] is None:
        config_opts['rpmbuild_arch'] = config_opts['target_arch']

    if not options.clean:
        config_opts['clean'] = options.clean

    if not options.check:
        config_opts['check'] = options.check

    if options.post_install:
        config_opts['post_install'] = options.post_install

    for option in options.rpmwith:
        options.rpmmacros.append("_with_%s --with-%s" %
                                 (option.replace("-", "_"), option))

    for option in options.rpmwithout:
        options.rpmmacros.append("_without_%s --without-%s" %
                                 (option.replace("-", "_"), option))

    for macro in options.rpmmacros:
        try:
            macro = macro.strip()
            k, v = macro.split(" ", 1)
            if not k.startswith('%'):
                k = '%%%s' % k
            config_opts['macros'].update({k: v})
        except:
            raise exception.BadCmdline(
                "Bad option for '--define' (%s).  Use --define 'macro expr'"
                % macro)

    if options.macrofile:
        config_opts['macrofile'] = os.path.expanduser(options.macrofile)
        if not os.path.isfile(config_opts['macrofile']):
            raise exception.BadCmdline(
                "Input rpm macros file does not exist: %s"
                % options.macrofile)

    if options.resultdir:
        config_opts['resultdir'] = os.path.expanduser(options.resultdir)
    if options.rootdir:
        config_opts['rootdir'] = os.path.expanduser(options.rootdir)
    if options.uniqueext:
        config_opts['unique-ext'] = options.uniqueext
    if options.rpmbuild_timeout is not None:
        config_opts['rpmbuild_timeout'] = options.rpmbuild_timeout

    for i in options.disabled_plugins:
        if i not in config_opts['plugins']:
            raise exception.BadCmdline(
                "Bad option for '--disable-plugin=%s'. Expecting one of: %s"
                % (i, config_opts['plugins']))
        config_opts['plugin_conf']['%s_enable' % i] = False
    for i in options.enabled_plugins:
        if i not in config_opts['plugins']:
            raise exception.BadCmdline(
                "Bad option for '--enable-plugin=%s'. Expecting one of: %s"
                % (i, config_opts['plugins']))
        config_opts['plugin_conf']['%s_enable' % i] = True
    for option in options.plugin_opts:
        try:
            p, kv = option.split(":", 1)
            k, v = kv.split("=", 1)
        except:
            raise exception.BadCmdline(
                "Bad option for '--plugin-option' (%s).  Use --plugin-option 'plugin:key=value'"
                % option)
        if p not in config_opts['plugins']:
            raise exception.BadCmdline(
                "Bad option for '--plugin-option' (%s).  No such plugin: %s"
                % (option, p))
        try:
            v = literal_eval(v)
        except:
            pass
        config_opts['plugin_conf'][p + "_opts"].update({k: v})

    global USE_NSPAWN
    USE_NSPAWN = config_opts['use_nspawn']
    if options.old_chroot:
        USE_NSPAWN = False
    if options.new_chroot:
        USE_NSPAWN = True

    if options.mode in ("rebuild",) and len(args) > 1 and not options.resultdir:
        raise exception.BadCmdline(
            "Must specify --resultdir when building multiple RPMS.")

    if options.cleanup_after is False:
        config_opts['cleanup_on_success'] = False
        config_opts['cleanup_on_failure'] = False

    if options.cleanup_after is True:
        config_opts['cleanup_on_success'] = True
        config_opts['cleanup_on_failure'] = True

    check_config(config_opts)
    # can't cleanup unless resultdir is separate from the root dir
    rootdir = os.path.join(config_opts['basedir'], config_opts['root'])
    if is_in_dir(config_opts['resultdir'] % config_opts, rootdir):
        config_opts['cleanup_on_success'] = False
        config_opts['cleanup_on_failure'] = False

    config_opts['cache_alterations'] = options.cache_alterations

    config_opts['online'] = options.online

    if options.pkg_manager:
        config_opts['package_manager'] = options.pkg_manager
    if options.mode == 'yum-cmd':
        config_opts['package_manager'] = 'yum'
    if options.mode == 'dnf-cmd':
        config_opts['package_manager'] = 'dnf'

    if options.short_circuit:
        config_opts['short_circuit'] = options.short_circuit
        config_opts['clean'] = False

    if options.rpmbuild_opts:
        config_opts['rpmbuild_opts'] = options.rpmbuild_opts

    config_opts['enable_disable_repos'] = options.enable_disable_repos

    if options.scm:
        try:
            from . import scm
        except ImportError as e:
            raise exception.BadCmdline(
                "Mock SCM module not installed: %s" % e)

        config_opts['scm'] = options.scm
        for option in options.scm_opts:
            try:
                k, v = option.split("=", 1)
                config_opts['scm_opts'].update({k: v})
            except:
                raise exception.BadCmdline(
                    "Bad option for '--scm-option' (%s).  Use --scm-option 'key=value'"
                    % option)

Example 8

Project: shovel
Source File: runner.py
View license
def run(*args):
    '''Run the normal shovel functionality'''
    import os
    import sys
    import argparse
    import pkg_resources
    # First off, read the arguments
    parser = argparse.ArgumentParser(prog='shovel', description='Rake, for Python')

    parser.add_argument('method', help='The task to run')
    parser.add_argument('--verbose', dest='verbose', action='store_true',
        help='Be extra talkative')
    parser.add_argument('--dry-run', dest='dryRun', action='store_true',
        help='Show the args that would be used')

    ver = pkg_resources.require('shovel')[0].version
    parser.add_argument('--version', action='version',
        version='Shovel v %s' % ver, help='print the version of Shovel.')

    # Parse our arguments
    if args:
        clargs, remaining = parser.parse_known_args(args=args)
    else:  # pragma: no cover
        clargs, remaining = parser.parse_known_args()

    if clargs.verbose:
        logger.setLevel(logging.DEBUG)

    args, kwargs = parse(remaining)

    # Import all of the files we want
    shovel = Shovel()

    # Read in any tasks that have already been defined
    shovel.extend(Task.clear())

    for path in [
        os.path.expanduser('~/.shovel.py'),
        os.path.expanduser('~/.shovel')]:
        if os.path.exists(path):  # pragma: no cover
            shovel.read(path, os.path.expanduser('~/'))

    for path in ['shovel.py', 'shovel']:
        if os.path.exists(path):
            shovel.read(path)

    # If it's help we're looking for, look no further
    if clargs.method == 'help':
        print(help.shovel_help(shovel, *args, **kwargs))
    elif clargs.method == 'tasks':
        tasks = list(v for _, v in shovel.items())
        if not tasks:
            print('No tasks found!')
        else:
            names = list(t.fullname for t in tasks)
            docs = list(t.doc for t in tasks)

            # The width of the screen
            width = 80
            import shutil
            try:
                width, _ = shutil.get_terminal_size(fallback=(0, width))
            except AttributeError:
                pass

            # Create the format with padding for the longest name, and to
            # accomodate the screen width
            format = '%%-%is # %%-%is' % (
                max(len(name) for name in names), width)
            for name, doc in zip(names, docs):
                print(format % (name, doc))
    elif clargs.method:
        # Try to get the first command provided
        try:
            tasks = shovel.tasks(clargs.method)
        except KeyError:
            print('Could not find task "%s"' % clargs.method, file=sys.stderr)
            exit(1)

        if len(tasks) > 1:
            print('Specifier "%s" matches multiple tasks:' % clargs.method, file=sys.stderr)
            for task in tasks:
                print('\t%s' % task.fullname, file=sys.stderr)
            exit(2)

        task = tasks[0]
        if clargs.dryRun:
            print(task.dry(*args, **kwargs))
        else:
            task(*args, **kwargs)

Example 9

Project: glm-parser
Source File: spark_train.py
View license
    def parallel_learn(self,
                       max_iter=-1,
                       dataPool=None,
                       shards=1,
                       fgen=None,
                       parser=None,
                       learner=None,
                       sc=None,
                       d_filename=None,
                       hadoop=False):
        '''
        This is the function which does distributed training using Spark


        :param max_iter: iterations for training the weight vector
        :param dir_name: the output directory storing the sharded data
        :param fgen: feature generator
        :param parser: parser for generating parse tree
        '''

        def create_dp(textString,fgen,format,sign):
            dp = data_pool.DataPool(textString=textString[1],fgen=fgen,format_list=format,comment_sign=sign)
            return dp


        def get_sent_num(dp):
            return dp.get_sent_num()

        if isinstance(fgen, basestring):
            fgen = getClassFromModule('get_local_vector', 'feature', fgen)

        dir_name     = dataPool.loadedPath()
        format_list  = dataPool.get_format_list()
        comment_sign = dataPool.get_comment_sign()


        # By default, when the hdfs is configured for spark, even in local mode it will
        # still try to load from hdfs. The following code is to resolve this confusion.
        if hadoop == True:
            train_files= sc.wholeTextFiles(dir_name, minPartitions=10).cache()
        else:
            dir_name = os.path.abspath(os.path.expanduser(dir_name))
            print dir_name
            train_files= sc.wholeTextFiles("file://" + dir_name, minPartitions=10).cache()

        dp = train_files.map(lambda t: create_dp(t,fgen,format_list,comment_sign)).cache()

        if learner.__class__.__name__== "AveragePerceptronLearner":
            print "[INFO]: Using Averaged Perceptron Learner"
            fv = {}
            total_sent = dp.map(get_sent_num).sum()
            c = total_sent*max_iter
            for iteration in range(max_iter):
                print "[INFO]: Starting Iteration %d"%iteration
                #mapper: computer the weight vector in each shard using avg_perc_train
                feat_vec_list = dp.flatMap(lambda t: learner.parallel_learn(t,fv,parser))
                #reducer: combine the weight vectors from each shard
                feat_vec_list = feat_vec_list.combineByKey(lambda value: (value[0], value[1], 1),
                                 lambda x, value: (x[0] + value[0], x[1] + value[1], x[2] + 1),
                                 lambda x, y: (x[0] + y[0], x[1] + y[1], x[2]+y[2])).collect()

                fv = {}
                for (feat, (a,b,c)) in feat_vec_list:
                    fv[feat] = (float(a)/float(c),b)
                print "[INFO]: Iteration complete, total number of keys: %d"%len(fv.keys())

            self.w_vector.clear()
            for feat in fv.keys():
                self.w_vector[feat] = fv[feat][1]/c

        if learner.__class__.__name__== "PerceptronLearner":
            print "[INFO]: Using Perceptron Learner"
            fv = {}
            for iteration in range(max_iter):
                print "[INFO]: Starting Iteration %d"%iteration
                print "[INFO]: Initial Number of Keys: %d"%len(fv.keys())
                #mapper: computer the weight vector in each shard using avg_perc_train
                feat_vec_list = dp.flatMap(lambda t: learner.parallel_learn(t,fv,parser))
                #reducer: combine the weight vectors from each shard
                feat_vec_list = feat_vec_list.combineByKey(lambda value: (value, 1),
                                 lambda x, value: (x[0] + value, x[1] + 1),
                                 lambda x, y: (x[0] + y[0], x[1] + y[1])).collect()
                #fv = feat_vec_list.map(lambda (label, (value_sum, count)): (label, value_sum / count)).collectAsMap()

                fv = {}
                for (feat,(a,b)) in feat_vec_list:
                    fv[feat] = float(a)/float(b)
                print "[INFO]: Iteration complete"
            self.w_vector.clear()
            self.w_vector.iadd(fv)

        if d_filename is not None:
            if hadoop == False:
                print ("[INFO]: Dumping trained weight vector to local directory: " + os.path.abspath(os.path.expanduser(d_filename)))
                self.w_vector.dump(os.path.abspath(os.path.expanduser(d_filename)) + "_Iter_%d.db"%max_iter)
            else:
                print ("[INFO]: Dumping trained weight vector to HDFS")
                contents = []
                for k, v in w_vector.iteritems():
                    contents.append(str(k) + "    " + str(v) + "\n")
                print ("[INFO]: Dumping to: " + fileWrite(d_filename + "_Iter_%d.db" % max_iter, contents, sc))

Example 10

Project: littlechef
Source File: runner.py
View license
def _readconfig():
    """Configures environment variables"""
    config = ConfigParser.SafeConfigParser()
    try:
        found = config.read(littlechef.CONFIGFILE)
    except ConfigParser.ParsingError as e:
        abort(str(e))
    if not len(found):
        try:
            found = config.read(['config.cfg', 'auth.cfg'])
        except ConfigParser.ParsingError as e:
            abort(str(e))
        if len(found):
            print('\nDeprecationWarning: deprecated config file name \'{0}\'.'
                  ' Use {1}'.format(found[0], littlechef.CONFIGFILE))
        else:
            abort('No {0} file found in the current '
                  'directory'.format(littlechef.CONFIGFILE))

    in_a_kitchen, missing = _check_appliances()
    missing_str = lambda m: ' and '.join(', '.join(m).rsplit(', ', 1))
    if not in_a_kitchen:
        abort("Couldn't find {0}. "
              "Are you executing 'fix' outside of a kitchen?\n"
              "To create a new kitchen in the current directory "
              " type 'fix new_kitchen'".format(missing_str(missing)))

    # We expect an ssh_config file here,
    # and/or a user, (password/keyfile) pair
    try:
        env.ssh_config_path = config.get('userinfo', 'ssh-config')
    except ConfigParser.NoSectionError:
        abort('You need to define a "userinfo" section'
              ' in the config file. Refer to the README for help '
              '(http://github.com/tobami/littlechef)')
    except ConfigParser.NoOptionError:
        env.ssh_config_path = None

    if env.ssh_config_path:
        env.ssh_config = _SSHConfig()
        env.ssh_config_path = os.path.expanduser(env.ssh_config_path)
        env.use_ssh_config = True
        try:
            env.ssh_config.parse(open(env.ssh_config_path))
        except IOError:
            abort("Couldn't open the ssh-config file "
                  "'{0}'".format(env.ssh_config_path))
        except Exception:
            abort("Couldn't parse the ssh-config file "
                  "'{0}'".format(env.ssh_config_path))
    else:
        env.ssh_config = None

    # check for a gateway
    try:
        env.gateway = config.get('connection', 'gateway')
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        env.gateway = None

    # check for http_proxy which will be put into solo.rb
    try:
        env.http_proxy = config.get('connection', 'http_proxy')
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        env.http_proxy = None

    try:
        env.https_proxy = config.get('connection', 'https_proxy')
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        env.https_proxy = None

    try:
        env.remove_data_bags = config.get('userinfo', 'remove_data_bags')
    except ConfigParser.NoOptionError:
        env.remove_data_bags = False
        
    # Check for an encrypted_data_bag_secret file and set the env option
    try:
        env.encrypted_data_bag_secret = config.get('userinfo',
                                                   'encrypted_data_bag_secret')
    except ConfigParser.NoOptionError:
        env.encrypted_data_bag_secret = None

    if env.encrypted_data_bag_secret:
        env.encrypted_data_bag_secret = os.path.expanduser(
            env.encrypted_data_bag_secret)
        try:
            open(env.encrypted_data_bag_secret)
        except IOError as e:
            abort("Failed to open encrypted_data_bag_secret file at "
                  "'{0}'".format(env.encrypted_data_bag_secret))

    try:
        sudo_prefix = config.get('ssh', 'sudo_prefix', raw=True)
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        pass
    else:
        env.sudo_prefix = sudo_prefix

    try:
        env.user = config.get('userinfo', 'user')
    except ConfigParser.NoOptionError:
        if not env.ssh_config_path:
            msg = 'You need to define a user in the "userinfo" section'
            msg += ' of {0}. Refer to the README for help'
            msg += ' (http://github.com/tobami/littlechef)'
            abort(msg.format(littlechef.CONFIGFILE))
        user_specified = False
    else:
        user_specified = True

    try:
        env.password = config.get('userinfo', 'password') or None
    except ConfigParser.NoOptionError:
        pass

    try:
        # If keypair-file is empty, assign None or fabric will try to read key
        env.key_filename = config.get('userinfo', 'keypair-file') or None
    except ConfigParser.NoOptionError:
        pass

    if (user_specified and not env.password and not env.key_filename
            and not env.ssh_config):
        abort('You need to define a password, keypair file, or ssh-config '
              'file in {0}'.format(littlechef.CONFIGFILE))

    # Node's Chef Solo working directory for storing cookbooks, roles, etc.
    try:
        env.node_work_path = os.path.expanduser(config.get('kitchen',
                                                'node_work_path'))
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        env.node_work_path = littlechef.node_work_path
    else:
        if not env.node_work_path:
            abort('The "node_work_path" option cannot be empty')

    # Follow symlinks
    try:
        env.follow_symlinks = config.getboolean('kitchen', 'follow_symlinks')
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        env.follow_symlinks = False

    try:
        env.berksfile = config.get('kitchen', 'berksfile')
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
        env.berksfile = None
    else:
        try:
            env.berksfile_cookbooks_directory = config.get('kitchen', 'berksfile_cookbooks_directory')
            littlechef.cookbook_paths.append(env.berksfile_cookbooks_directory)
        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
            if env.berksfile:
                env.berksfile_cookbooks_directory = tempfile.mkdtemp('littlechef-berks')
                littlechef.cookbook_paths.append(env.berksfile_cookbooks_directory)
            else:
                env.berksfile_cookbooks_directory = None
        chef.ensure_berksfile_cookbooks_are_installed()

    # Upload Directory
    try:
        env.sync_packages_dest_dir = config.get('sync-packages',
                                                'dest-dir')
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        env.sync_packages_dest_dir = None

    # Local Directory
    try:
        env.sync_packages_local_dir = config.get('sync-packages',
                                                 'local-dir')
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        env.sync_packages_local_dir = None

    try:
        env.autodeploy_chef = config.get('userinfo', 'autodeploy_chef') or None
    except ConfigParser.NoOptionError:
        env.autodeploy_chef = None

Example 11

Project: pysftpserver
Source File: proxystorage.py
View license
    def __init__(self, remote,
                 key=None, port=None,
                 ssh_config_path=None, ssh_agent=False,
                 known_hosts_path=None):
        """Home sweet home.

        Init the transport and then the client.
        """
        if '@' in remote:
            self.username, self.hostname = remote.split('@', 1)
        else:
            self.username, self.hostname = None, remote

        self.password = None
        if self.username and ':' in self.username:
            self.username, self.password = self.username.split(':', 1)

        self.port = None

        if ssh_config_path:
            try:
                with open(os.path.expanduser(ssh_config_path)) as c_file:
                    ssh_config = paramiko.SSHConfig()
                    ssh_config.parse(c_file)
                    c = ssh_config.lookup(self.hostname)

                    self.hostname = c.get("hostname", self.hostname)
                    self.username = c.get("user", self.username)
                    self.port = int(c.get("port", port))
                    key = c.get("identityfile", key)
            except Exception as e:
                # it could be safe to continue anyway,
                # because parameters could have been manually specified
                print(
                    "Error while parsing ssh_config file: {}. Trying to continue anyway...".format(e)
                )

        # Set default values
        if not self.username:
            self.username = getuser()  # defaults to current user

        if not self.port:
            self.port = port if port else 22

        self.pkeys = list()
        if ssh_agent:
            try:
                agent = paramiko.agent.Agent()
                self.pkeys.append(*agent.get_keys())

                if not self.pkeys:
                    agent.close()
                    print(
                        "SSH agent didn't provide any valid key. Trying to continue..."
                    )

            except paramiko.SSHException:
                agent.close()
                print(
                    "SSH agent speaks a non-compatible protocol. Ignoring it.")

        if key and not self.password and not self.pkeys:
            key = os.path.expanduser(key)
            try:
                self.pkeys.append(paramiko.RSAKey.from_private_key_file(key))
            except paramiko.PasswordRequiredException:
                print("It seems that your private key is encrypted. Please configure me to use ssh_agent.")
                sys.exit(1)
            except Exception:
                print(
                    "Something went wrong while opening {}. Exiting.".format(
                        key)
                )
                sys.exit(1)
        elif not key and not self.password and not self.pkeys:
            print(
                "You need to specify either a password, an identity or to enable the ssh-agent support."
            )
            sys.exit(1)

        try:
            self.transport = paramiko.Transport((self.hostname, self.port))
        except socket.gaierror:
            print(
                "Hostname not known. Are you sure you inserted it correctly?")
            sys.exit(1)

        try:
            self.transport.start_client()

            if known_hosts_path:
                known_hosts = paramiko.HostKeys()
                known_hosts_path = os.path.realpath(
                    os.path.expanduser(known_hosts_path))

                try:
                    known_hosts.load(known_hosts_path)
                except IOError:
                    print(
                        "Error while loading known hosts file at {}. Exiting...".format(
                            known_hosts_path)
                    )
                    sys.exit(1)

                ssh_host = self.hostname if self.port == 22 else "[{}]:{}".format(
                    self.hostname, self.port)
                pub_k = self.transport.get_remote_server_key()
                if ssh_host in known_hosts.keys() and not known_hosts.check(ssh_host, pub_k):
                    print(
                        "Security warning: "
                        "remote key fingerprint {} for hostname "
                        "{} didn't match the one in known_hosts {}. "
                        "Exiting...".format(
                            pub_k.get_base64(),
                            ssh_host,
                            known_hosts.lookup(self.hostname),
                        )
                    )
                    sys.exit(1)

            if self.password:
                self.transport.auth_password(
                    username=self.username,
                    password=self.password
                )
            else:
                for pkey in self.pkeys:
                    try:
                        self.transport.auth_publickey(
                            username=self.username,
                            key=pkey
                        )
                        break
                    except paramiko.SSHException as e:
                        print(
                            "Authentication with identity {}... failed".format(
                                pkey.get_base64()[:10]
                            )
                        )
                else:  # none of the keys worked
                    raise paramiko.SSHException
        except paramiko.SSHException:
            print(
                "None of the provided authentication methods worked. Exiting."
            )
            self.transport.close()
            sys.exit(1)

        self.client = paramiko.SFTPClient.from_transport(self.transport)

        # Let's retrieve the current dir
        self.client.chdir('.')
        self.home = self.client.getcwd()

Example 12

Project: sftpclone
Source File: sftpclone.py
View license
    def __init__(self, local_path, remote_url,
                 identity_files=None, port=None, fix_symlinks=False,
                 ssh_config_path=None, ssh_agent=False,
                 exclude_file=None, known_hosts_path=None,
                 delete=True, allow_unknown=False
                 ):
        """Init the needed parameters and the SFTPClient."""
        self.local_path = os.path.realpath(os.path.expanduser(local_path))
        self.logger = logger or configure_logging()

        if not os.path.exists(self.local_path):
            self.logger.error("Local path MUST exist. Exiting.")
            sys.exit(1)

        if exclude_file:
            with open(exclude_file) as f:
                # As in rsync's exclude from, ignore lines with leading ; and #
                # and treat each path as relative (thus by removing the leading
                # /)
                exclude_list = [
                    line.rstrip().lstrip("/")
                    for line in f
                    if not line.startswith((";", "#"))
                ]

                # actually, is a set of excluded files
                self.exclude_list = {
                    g
                    for pattern in exclude_list
                    for g in glob.glob(path_join(self.local_path, pattern))
                }
        else:
            self.exclude_list = set()

        username, password, hostname, self.remote_path = parse_username_password_hostname(remote_url)

        identity_files = identity_files or []
        if ssh_config_path:
            try:
                with open(os.path.expanduser(ssh_config_path)) as c_file:
                    ssh_config = paramiko.SSHConfig()
                    ssh_config.parse(c_file)
                    c = ssh_config.lookup(hostname)

                    hostname = c.get("hostname", hostname)
                    username = c.get("user", username)
                    port = int(c.get("port", port))
                    identity_files = c.get("identityfile", identity_files)
            except Exception as e:
                # it could be safe to continue anyway,
                # because parameters could have been manually specified
                self.logger.error(
                    "Error while parsing ssh_config file: %s. Trying to continue anyway...", e
                )

        # Set default values
        if not username:
            username = getuser()  # defaults to current user

        port = port or 22
        allow_unknown = allow_unknown or False

        self.chown = False
        self.fix_symlinks = fix_symlinks or False
        self.delete = delete if delete is not None else True

        agent_keys = list()
        agent = None

        if ssh_agent:
            try:
                agent = paramiko.agent.Agent()
                _agent_keys = agent.get_keys()

                if not _agent_keys:
                    agent.close()
                    self.logger.error(
                        "SSH agent didn't provide any valid key. Trying to continue..."
                    )
                else:
                    agent_keys.append(*_agent_keys)

            except paramiko.SSHException:
                if agent:
                    agent.close()
                self.logger.error(
                    "SSH agent speaks a non-compatible protocol. Ignoring it.")

        if not identity_files and not password and not agent_keys:
            self.logger.error(
                "You need to specify either a password, an identity or to enable the ssh-agent support."
            )
            sys.exit(1)

        # only root can change file owner
        if username == 'root':
            self.chown = True

        try:
            transport = paramiko.Transport((hostname, port))
        except socket.gaierror:
            self.logger.error(
                "Hostname not known. Are you sure you inserted it correctly?")
            sys.exit(1)

        try:
            ssh_host = hostname if port == 22 else "[{}]:{}".format(hostname, port)
            known_hosts = None

            """
            Before starting the transport session, we have to configure it.
            Specifically, we need to configure the preferred PK algorithm.
            If the system already knows a public key of a specific kind for
            a remote host, we have to peek its type as the preferred one.
            """
            if known_hosts_path:
                known_hosts = paramiko.HostKeys()
                known_hosts_path = os.path.realpath(
                    os.path.expanduser(known_hosts_path))

                try:
                    known_hosts.load(known_hosts_path)
                except IOError:
                    self.logger.error(
                        "Error while loading known hosts file at {}. Exiting...".format(
                            known_hosts_path)
                    )
                    sys.exit(1)

                known_keys = known_hosts.lookup(ssh_host)
                if known_keys is not None:
                    # one or more keys are already known
                    # set their type as preferred
                    transport.get_security_options().key_types = \
                        tuple(known_keys.keys())

            transport.start_client()

            if not known_hosts:
                self.logger.warning("Security warning: skipping known hosts check...")
            else:
                pubk = transport.get_remote_server_key()
                if ssh_host in known_hosts.keys():
                    if not known_hosts.check(ssh_host, pubk):
                        self.logger.error(
                            "Security warning: "
                            "remote key fingerprint {} for hostname "
                            "{} didn't match the one in known_hosts {}. "
                            "Exiting...".format(
                                pubk.get_base64(),
                                ssh_host,
                                known_hosts.lookup(hostname),
                            )
                        )
                        sys.exit(1)
                elif not allow_unknown:
                    prompt = ("The authenticity of host '{}' can't be established.\n"
                              "{} key is {}.\n"
                              "Are you sure you want to continue connecting? [y/n] ").format(
                        ssh_host, pubk.get_name(), pubk.get_base64())

                    try:
                        # Renamed to `input` in Python 3.x
                        response = raw_input(prompt)
                    except NameError:
                        response = input(prompt)

                    # Note: we do not modify the user's known_hosts file

                    if not (response == "y" or response == "yes"):
                        self.logger.error(
                            "Host authentication failed."
                        )
                        sys.exit(1)

            def perform_key_auth(pkey):
                try:
                    transport.auth_publickey(
                        username=username,
                        key=pkey
                    )
                    return True
                except paramiko.SSHException:
                    self.logger.warning(
                        "Authentication with identity {}... failed".format(pkey.get_base64()[:10])
                    )
                    return False

            if password:  # Password auth, if specified.
                transport.auth_password(
                    username=username,
                    password=password
                )
            elif agent_keys:  # SSH agent keys have higher priority
                for pkey in agent_keys:
                    if perform_key_auth(pkey):
                        break  # Authentication worked.
                else:  # None of the keys worked.
                    raise paramiko.SSHException
            elif identity_files:  # Then follow identity file (specified from CL or ssh_config)
                # Try identity files one by one, until one works
                for key_path in identity_files:
                    key_path = os.path.expanduser(key_path)

                    try:
                        key = paramiko.RSAKey.from_private_key_file(key_path)
                    except paramiko.PasswordRequiredException:
                        pk_password = getpass(
                            "It seems that your identity from '{}' is encrypted. "
                            "Please enter your password: ".format(key_path)
                        )

                        try:
                            key = paramiko.RSAKey.from_private_key_file(key_path, pk_password)
                        except paramiko.SSHException:
                            self.logger.error(
                                "Incorrect passphrase. Cannot decode private key from '{}'.".format(key_path)
                            )
                            continue
                    except IOError or paramiko.SSHException:
                        self.logger.error(
                            "Something went wrong while opening '{}'. Skipping it.".format(key_path)
                        )
                        continue

                    if perform_key_auth(key):
                        break  # Authentication worked.

                else:  # None of the keys worked.
                    raise paramiko.SSHException
            else:  # No authentication method specified, we shouldn't arrive here.
                assert False
        except paramiko.SSHException:
            self.logger.error(
                "None of the provided authentication methods worked. Exiting."
            )
            transport.close()
            sys.exit(1)
        finally:
            if agent:
                agent.close()

        self.sftp = paramiko.SFTPClient.from_transport(transport)

        if self.remote_path.startswith("~"):
            # nasty hack to let getcwd work without changing dir!
            self.sftp.chdir('.')
            self.remote_path = self.remote_path.replace(
                "~", self.sftp.getcwd())  # home is the initial sftp dir

Example 13

Project: sftpclone
Source File: sftpclone.py
View license
    def __init__(self, local_path, remote_url,
                 identity_files=None, port=None, fix_symlinks=False,
                 ssh_config_path=None, ssh_agent=False,
                 exclude_file=None, known_hosts_path=None,
                 delete=True, allow_unknown=False
                 ):
        """Init the needed parameters and the SFTPClient."""
        self.local_path = os.path.realpath(os.path.expanduser(local_path))
        self.logger = logger or configure_logging()

        if not os.path.exists(self.local_path):
            self.logger.error("Local path MUST exist. Exiting.")
            sys.exit(1)

        if exclude_file:
            with open(exclude_file) as f:
                # As in rsync's exclude from, ignore lines with leading ; and #
                # and treat each path as relative (thus by removing the leading
                # /)
                exclude_list = [
                    line.rstrip().lstrip("/")
                    for line in f
                    if not line.startswith((";", "#"))
                ]

                # actually, is a set of excluded files
                self.exclude_list = {
                    g
                    for pattern in exclude_list
                    for g in glob.glob(path_join(self.local_path, pattern))
                }
        else:
            self.exclude_list = set()

        username, password, hostname, self.remote_path = parse_username_password_hostname(remote_url)

        identity_files = identity_files or []
        if ssh_config_path:
            try:
                with open(os.path.expanduser(ssh_config_path)) as c_file:
                    ssh_config = paramiko.SSHConfig()
                    ssh_config.parse(c_file)
                    c = ssh_config.lookup(hostname)

                    hostname = c.get("hostname", hostname)
                    username = c.get("user", username)
                    port = int(c.get("port", port))
                    identity_files = c.get("identityfile", identity_files)
            except Exception as e:
                # it could be safe to continue anyway,
                # because parameters could have been manually specified
                self.logger.error(
                    "Error while parsing ssh_config file: %s. Trying to continue anyway...", e
                )

        # Set default values
        if not username:
            username = getuser()  # defaults to current user

        port = port or 22
        allow_unknown = allow_unknown or False

        self.chown = False
        self.fix_symlinks = fix_symlinks or False
        self.delete = delete if delete is not None else True

        agent_keys = list()
        agent = None

        if ssh_agent:
            try:
                agent = paramiko.agent.Agent()
                _agent_keys = agent.get_keys()

                if not _agent_keys:
                    agent.close()
                    self.logger.error(
                        "SSH agent didn't provide any valid key. Trying to continue..."
                    )
                else:
                    agent_keys.append(*_agent_keys)

            except paramiko.SSHException:
                if agent:
                    agent.close()
                self.logger.error(
                    "SSH agent speaks a non-compatible protocol. Ignoring it.")

        if not identity_files and not password and not agent_keys:
            self.logger.error(
                "You need to specify either a password, an identity or to enable the ssh-agent support."
            )
            sys.exit(1)

        # only root can change file owner
        if username == 'root':
            self.chown = True

        try:
            transport = paramiko.Transport((hostname, port))
        except socket.gaierror:
            self.logger.error(
                "Hostname not known. Are you sure you inserted it correctly?")
            sys.exit(1)

        try:
            ssh_host = hostname if port == 22 else "[{}]:{}".format(hostname, port)
            known_hosts = None

            """
            Before starting the transport session, we have to configure it.
            Specifically, we need to configure the preferred PK algorithm.
            If the system already knows a public key of a specific kind for
            a remote host, we have to peek its type as the preferred one.
            """
            if known_hosts_path:
                known_hosts = paramiko.HostKeys()
                known_hosts_path = os.path.realpath(
                    os.path.expanduser(known_hosts_path))

                try:
                    known_hosts.load(known_hosts_path)
                except IOError:
                    self.logger.error(
                        "Error while loading known hosts file at {}. Exiting...".format(
                            known_hosts_path)
                    )
                    sys.exit(1)

                known_keys = known_hosts.lookup(ssh_host)
                if known_keys is not None:
                    # one or more keys are already known
                    # set their type as preferred
                    transport.get_security_options().key_types = \
                        tuple(known_keys.keys())

            transport.start_client()

            if not known_hosts:
                self.logger.warning("Security warning: skipping known hosts check...")
            else:
                pubk = transport.get_remote_server_key()
                if ssh_host in known_hosts.keys():
                    if not known_hosts.check(ssh_host, pubk):
                        self.logger.error(
                            "Security warning: "
                            "remote key fingerprint {} for hostname "
                            "{} didn't match the one in known_hosts {}. "
                            "Exiting...".format(
                                pubk.get_base64(),
                                ssh_host,
                                known_hosts.lookup(hostname),
                            )
                        )
                        sys.exit(1)
                elif not allow_unknown:
                    prompt = ("The authenticity of host '{}' can't be established.\n"
                              "{} key is {}.\n"
                              "Are you sure you want to continue connecting? [y/n] ").format(
                        ssh_host, pubk.get_name(), pubk.get_base64())

                    try:
                        # Renamed to `input` in Python 3.x
                        response = raw_input(prompt)
                    except NameError:
                        response = input(prompt)

                    # Note: we do not modify the user's known_hosts file

                    if not (response == "y" or response == "yes"):
                        self.logger.error(
                            "Host authentication failed."
                        )
                        sys.exit(1)

            def perform_key_auth(pkey):
                try:
                    transport.auth_publickey(
                        username=username,
                        key=pkey
                    )
                    return True
                except paramiko.SSHException:
                    self.logger.warning(
                        "Authentication with identity {}... failed".format(pkey.get_base64()[:10])
                    )
                    return False

            if password:  # Password auth, if specified.
                transport.auth_password(
                    username=username,
                    password=password
                )
            elif agent_keys:  # SSH agent keys have higher priority
                for pkey in agent_keys:
                    if perform_key_auth(pkey):
                        break  # Authentication worked.
                else:  # None of the keys worked.
                    raise paramiko.SSHException
            elif identity_files:  # Then follow identity file (specified from CL or ssh_config)
                # Try identity files one by one, until one works
                for key_path in identity_files:
                    key_path = os.path.expanduser(key_path)

                    try:
                        key = paramiko.RSAKey.from_private_key_file(key_path)
                    except paramiko.PasswordRequiredException:
                        pk_password = getpass(
                            "It seems that your identity from '{}' is encrypted. "
                            "Please enter your password: ".format(key_path)
                        )

                        try:
                            key = paramiko.RSAKey.from_private_key_file(key_path, pk_password)
                        except paramiko.SSHException:
                            self.logger.error(
                                "Incorrect passphrase. Cannot decode private key from '{}'.".format(key_path)
                            )
                            continue
                    except IOError or paramiko.SSHException:
                        self.logger.error(
                            "Something went wrong while opening '{}'. Skipping it.".format(key_path)
                        )
                        continue

                    if perform_key_auth(key):
                        break  # Authentication worked.

                else:  # None of the keys worked.
                    raise paramiko.SSHException
            else:  # No authentication method specified, we shouldn't arrive here.
                assert False
        except paramiko.SSHException:
            self.logger.error(
                "None of the provided authentication methods worked. Exiting."
            )
            transport.close()
            sys.exit(1)
        finally:
            if agent:
                agent.close()

        self.sftp = paramiko.SFTPClient.from_transport(transport)

        if self.remote_path.startswith("~"):
            # nasty hack to let getcwd work without changing dir!
            self.sftp.chdir('.')
            self.remote_path = self.remote_path.replace(
                "~", self.sftp.getcwd())  # home is the initial sftp dir

Example 14

Project: Veil-Catapult
Source File: update.py
View license
def generateConfig(options):
    
    config = """#!/usr/bin/python

##################################################################################################
#
# Veil-Framework configuration file                                               
#
# Run update.py to automatically set all these options to their defaults.
#
##################################################################################################



#################################################
#
# General system options
#
#################################################

"""
    print "\n Veil-Framework configuration:"

    config += '# OS to use (Kali/Backtrack/Debian/Windows)\n'
    config += 'OPERATING_SYSTEM="' + options['OPERATING_SYSTEM'] + '"\n\n'
    print "\n [*] OPERATING_SYSTEM = " + options['OPERATING_SYSTEM']

    config += '# Terminal clearing method to use\n'
    config += 'TERMINAL_CLEAR="' + options['TERMINAL_CLEAR'] + '"\n\n'
    print " [*] TERMINAL_CLEAR = " + options['TERMINAL_CLEAR']

    config += '# Path to temporary directory\n'
    config += 'TEMP_DIR="' + options["TEMP_DIR"] + '"\n\n'
    print " [*] TEMP_DIR = " + options["TEMP_DIR"]

    config += '# Default options to pass to msfvenom for shellcode creation\n'
    config += 'MSFVENOM_OPTIONS="' + options['MSFVENOM_OPTIONS'] + '"\n\n'
    print " [*] MSFVENOM_OPTIONS = " + options['MSFVENOM_OPTIONS']
    
    config += '# The path to the metasploit framework, for example: /usr/share/metasploit-framework/\n'
    config += 'METASPLOIT_PATH="' + options['METASPLOIT_PATH'] + '"\n\n'
    print " [*] METASPLOIT_PATH = " + options['METASPLOIT_PATH']

    config += '# The path to pyinstaller, for example: /usr/share/pyinstaller/\n'
    config += 'PYINSTALLER_PATH="' + options['PYINSTALLER_PATH'] + '"\n\n'
    print " [*] PYINSTALLER_PATH = " + options['PYINSTALLER_PATH'] + "\n"


    config += """
#################################################
#
# Veil-Evasion specific options
#
#################################################

"""
    config += '# Veil-Evasion install path\n'
    config += 'VEIL_EVASION_PATH="' + options['VEIL_EVASION_PATH'] + '"\n\n'
    print " [*] VEIL_EVASION_PATH = " + options['VEIL_EVASION_PATH']
    
    source_path = os.path.expanduser(options["PAYLOAD_SOURCE_PATH"])
    config += '# Path to output the source of payloads\n'
    config += 'PAYLOAD_SOURCE_PATH="' + source_path + '"\n\n'
    print " [*] PAYLOAD_SOURCE_PATH = " + source_path

    # create the output source path if it doesn't exist
    if not os.path.exists(source_path): 
        os.makedirs(source_path)
        print " [*] Path '" + source_path + "' Created"
    
    compiled_path = os.path.expanduser(options["PAYLOAD_COMPILED_PATH"])
    config += '# Path to output compiled payloads\n'
    config += 'PAYLOAD_COMPILED_PATH="' + compiled_path +'"\n\n'
    print " [*] PAYLOAD_COMPILED_PATH = " + compiled_path

    # create the output compiled path if it doesn't exist
    if not os.path.exists( compiled_path ): 
        os.makedirs( compiled_path )
        print " [*] Path '" + compiled_path + "' Created"

    handler_path = os.path.expanduser(options["HANDLER_PATH"])
    # create the output compiled path if it doesn't exist
    if not os.path.exists( handler_path ): 
        os.makedirs( handler_path )
        print " [*] Path '" + handler_path + "' Created"

    config += '# Whether to generate a msf handler script and where to place it\n'
    config += 'GENERATE_HANDLER_SCRIPT="' + options['GENERATE_HANDLER_SCRIPT'] + '"\n'
    print " [*] GENERATE_HANDLER_SCRIPT = " + options['GENERATE_HANDLER_SCRIPT']
    config += 'HANDLER_PATH="' + handler_path + '"\n\n'
    print " [*] HANDLER_PATH = " + handler_path

    hash_path = os.path.expanduser(options["HASH_LIST"])
    config += '# Running hash list of all payloads generated\n'
    config += 'HASH_LIST="' + hash_path + '"\n\n'
    print " [*] HASH_LIST = " + hash_path + "\n"


    config += """
#################################################
#
# Veil-Catapult specific options
#
#################################################

"""
    config += '# Veil-Catapult install path\n'
    config += 'VEIL_CATAPULT_PATH="' + options['VEIL_CATAPULT_PATH'] + '"\n\n'
    print " [*] VEIL_CATAPULT_PATH = " + options['VEIL_CATAPULT_PATH']

    catapult_resource_path = os.path.expanduser(options["CATAPULT_RESOURCE_PATH"])
    # create the catapult resource path if it doesn't exist
    if not os.path.exists( catapult_resource_path ): 
        os.makedirs( catapult_resource_path )
        print " [*] Path '" + catapult_resource_path + "' Created"
    config += '# Path to output Veil-Catapult resource/cleanup files\n'
    config += 'CATAPULT_RESOURCE_PATH="' + catapult_resource_path + '"\n\n'
    print " [*] CATAPULT_RESOURCE_PATH = " + catapult_resource_path


    config += '# Whether to automatically spawn a handler for a Veil-Evasion produced payloads\n'
    config += 'SPAWN_CATAPULT_HANDLER="' + options['SPAWN_CATAPULT_HANDLER'] + '"\n\n'
    print " [*] SPAWN_CATAPULT_HANDLER = " + options['SPAWN_CATAPULT_HANDLER'] + "\n"


    if platform.system() == "Linux":
        # create the output compiled path if it doesn't exist
        if not os.path.exists("/etc/veil/"): 
            os.makedirs("/etc/veil/")
            print " [*] Path '/etc/veil/' Created"
        f = open("/etc/veil/settings.py", 'w')
        f.write(config)
        f.close()
        print " Configuration File Written To '/etc/veil/settings.py'\n"
    else:
        print " [!] ERROR: PLATFORM NOT CURRENTLY SUPPORTED"
        sys.exit()

Example 15

Project: Veil-Evasion
Source File: update.py
View license
def generateConfig(options):

    config = """#!/usr/bin/python

##################################################################################################
#
# Veil-Framework configuration file
#
# Run update.py to automatically set all these options to their defaults.
#
##################################################################################################



#################################################
#
# General system options
#
#################################################

"""
    print "\n Veil-Framework configuration:"

    config += '# OS to use (Kali/Backtrack/Debian/Windows)\n'
    config += 'OPERATING_SYSTEM="' + options['OPERATING_SYSTEM'] + '"\n\n'
    print "\n [*] OPERATING_SYSTEM = " + options['OPERATING_SYSTEM']

    config += '# Terminal clearing method to use (use "false" to disable it)\n'
    config += 'TERMINAL_CLEAR="' + options['TERMINAL_CLEAR'] + '"\n\n'
    print " [*] TERMINAL_CLEAR = " + options['TERMINAL_CLEAR']

    config += '# Wine environment\n'
    config += 'WINEPREFIX="' + options["WINEPREFIX"] + '"\n\n'
    print " [*] WINEPREFIX = " + options["WINEPREFIX"]

    config += '# Path to temporary directory\n'
    config += 'TEMP_DIR="' + options["TEMP_DIR"] + '"\n\n'
    print " [*] TEMP_DIR = " + options["TEMP_DIR"]

    config += '# Default options to pass to msfvenom for shellcode creation\n'
    config += 'MSFVENOM_OPTIONS="' + options['MSFVENOM_OPTIONS'] + '"\n\n'
    print " [*] MSFVENOM_OPTIONS = " + options['MSFVENOM_OPTIONS']

    config += '# The path to the metasploit framework, for example: /usr/share/metasploit-framework/\n'
    config += 'METASPLOIT_PATH="' + options['METASPLOIT_PATH'] + '"\n\n'
    print " [*] METASPLOIT_PATH = " + options['METASPLOIT_PATH']

    config += '# The path to msfvenom for shellcode generation purposes\n'
    config += 'MSFVENOM_PATH="' + options["MSFVENOM_PATH"] + '"\n\n'
    print " [*] MSFVENOM_PATH = " + options["MSFVENOM_PATH"]

    config += '# The path to pyinstaller, for example: /opt/pyinstaller-2.0/\n'
    config += 'PYINSTALLER_PATH="' + options['PYINSTALLER_PATH'] + '"\n\n'
    print " [*] PYINSTALLER_PATH = " + options['PYINSTALLER_PATH'] + "\n"


    config += """
#################################################
#
# Veil-Evasion specific options
#
#################################################

"""
    config += '# Veil-Evasion install path\n'
    config += 'VEIL_EVASION_PATH="' + options['VEIL_EVASION_PATH'] + '"\n\n'
    print " [*] VEIL_EVASION_PATH = " + options['VEIL_EVASION_PATH']

    source_path = os.path.expanduser(options["PAYLOAD_SOURCE_PATH"])
    config += '# Path to output the source of payloads\n'
    config += 'PAYLOAD_SOURCE_PATH="' + source_path + '"\n\n'
    print " [*] PAYLOAD_SOURCE_PATH = " + source_path

    # create the output source path if it doesn't exist
    if not os.path.exists(source_path):
        os.makedirs(source_path)
        print " [*] Path '" + source_path + "' Created"

    compiled_path = os.path.expanduser(options["PAYLOAD_COMPILED_PATH"])
    config += '# Path to output compiled payloads\n'
    config += 'PAYLOAD_COMPILED_PATH="' + compiled_path +'"\n\n'
    print " [*] PAYLOAD_COMPILED_PATH = " + compiled_path

    # create the output compiled path if it doesn't exist
    if not os.path.exists( compiled_path ):
        os.makedirs( compiled_path )
        print " [*] Path '" + compiled_path + "' Created"

    handler_path = os.path.expanduser(options["HANDLER_PATH"])
    # create the output compiled path if it doesn't exist
    if not os.path.exists( handler_path ):
        os.makedirs( handler_path )
        print " [*] Path '" + handler_path + "' Created"

    config += '# Whether to generate a msf handler script and where to place it\n'
    config += 'GENERATE_HANDLER_SCRIPT="' + options['GENERATE_HANDLER_SCRIPT'] + '"\n'
    print " [*] GENERATE_HANDLER_SCRIPT = " + options['GENERATE_HANDLER_SCRIPT']
    config += 'HANDLER_PATH="' + handler_path + '"\n\n'
    print " [*] HANDLER_PATH = " + handler_path

    hash_path = os.path.expanduser(options["HASH_LIST"])
    config += '# Running hash list of all payloads generated\n'
    config += 'HASH_LIST="' + hash_path + '"\n\n'
    print " [*] HASH_LIST = " + hash_path + "\n"


    config += """
#################################################
#
# Veil-Catapult specific options
#
#################################################

"""
    config += '# Veil-Catapult install path\n'
    config += 'VEIL_CATAPULT_PATH="' + options['VEIL_CATAPULT_PATH'] + '"\n\n'
    print " [*] VEIL_CATAPULT_PATH = " + options['VEIL_CATAPULT_PATH']

    catapult_resource_path = os.path.expanduser(options["CATAPULT_RESOURCE_PATH"])
    # create the catapult resource path if it doesn't exist
    if not os.path.exists( catapult_resource_path ):
        os.makedirs( catapult_resource_path )
        print " [*] Path '" + catapult_resource_path + "' Created"
    config += '# Path to output Veil-Catapult resource/cleanup files\n'
    config += 'CATAPULT_RESOURCE_PATH="' + catapult_resource_path + '"\n\n'
    print " [*] CATAPULT_RESOURCE_PATH = " + catapult_resource_path + "\n"


    if platform.system() == "Linux":
        # create the output compiled path if it doesn't exist
        if not os.path.exists("/etc/veil/"):
            # os.makedirs("/etc/veil/")
            os.system("sudo mkdir /etc/veil/")
            os.system("sudo touch /etc/veil/settings.py")
            os.system("sudo chmod 777 /etc/veil/settings.py")
            print " [*] Path '/etc/veil/' Created"
        f = open("/etc/veil/settings.py", 'w')
        f.write(config)
        f.close()
        print " Configuration File Written To '/etc/veil/settings.py'\n"
    else:
        print " [!] ERROR: PLATFORM NOT CURRENTLY SUPPORTED"
        sys.exit()

Example 16

Project: ssh
Source File: client.py
View license
    def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys):
        """
        Try, in order:

            - The key passed in, if one was passed in.
            - Any key we can find through an SSH agent (if allowed).
            - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed).
            - Plain username/password auth, if a password was given.

        (The password might be needed to unlock a private key.)
        
        The password is required for two-factor authentication.
        """
        saved_exception = None
        two_factor = False
        allowed_types = []

        if pkey is not None:
            try:
                self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
                allowed_types = self._transport.auth_publickey(username, pkey)
                two_factor = (allowed_types == ['password'])
                if not two_factor:
                    return
            except SSHException, e:
                saved_exception = e

        if not two_factor:
            for key_filename in key_filenames:
                for pkey_class in (RSAKey, DSSKey):
                    try:
                        key = pkey_class.from_private_key_file(key_filename, password)
                        self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
                        self._transport.auth_publickey(username, key)
                        two_factor = (allowed_types == ['password'])
                        if not two_factor:
                            return
                        break
                    except SSHException, e:
                        saved_exception = e

        if not two_factor and allow_agent:
            if self._agent == None:
                self._agent = Agent()

            for key in self._agent.get_keys():
                try:
                    self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
                    # for 2-factor auth a successfully auth'd key will result in ['password']
                    allowed_types = self._transport.auth_publickey(username, key)
                    two_factor = (allowed_types == ['password'])
                    if not two_factor:
                        return
                    break
                except SSHException, e:
                    saved_exception = e

        if not two_factor:
            keyfiles = []
            rsa_key = os.path.expanduser('~/.ssh/id_rsa')
            dsa_key = os.path.expanduser('~/.ssh/id_dsa')
            if os.path.isfile(rsa_key):
                keyfiles.append((RSAKey, rsa_key))
            if os.path.isfile(dsa_key):
                keyfiles.append((DSSKey, dsa_key))
            # look in ~/ssh/ for windows users:
            rsa_key = os.path.expanduser('~/ssh/id_rsa')
            dsa_key = os.path.expanduser('~/ssh/id_dsa')
            if os.path.isfile(rsa_key):
                keyfiles.append((RSAKey, rsa_key))
            if os.path.isfile(dsa_key):
                keyfiles.append((DSSKey, dsa_key))
    
            if not look_for_keys:
                keyfiles = []
    
            for pkey_class, filename in keyfiles:
                try:
                    key = pkey_class.from_private_key_file(filename, password)
                    self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
                    # for 2-factor auth a successfully auth'd key will result in ['password']
                    allowed_types = self._transport.auth_publickey(username, key)
                    two_factor = (allowed_types == ['password'])
                    if not two_factor:
                        return
                    break
                except SSHException, e:
                    saved_exception = e
                except IOError, e:
                    saved_exception = e

        if password is not None:
            try:
                self._transport.auth_password(username, password)
                return
            except SSHException, e:
                saved_exception = e
        elif two_factor:
            raise SSHException('Two-factor authentication requires a password')

        # if we got an auth-failed exception earlier, re-raise it
        if saved_exception is not None:
            raise saved_exception
        raise SSHException('No authentication methods available')

Example 17

Project: cgat
Source File: annotator_distance.py
View license
def main(argv=sys.argv):

    parser = E.OptionParser(
        version="%prog version: $Id: annotator_distance.py 2861 2010-02-23 17:36:32Z andreas $", usage=globals()["__doc__"])

    parser.add_option("-a", "--annotations-tsv-file", dest="filename_annotations", type="string",
                      help="filename mapping gene ids to annotations (a tab-separated table with two-columns) [default=%default].")

    parser.add_option("-r", "--resolution", dest="resolution", type="int",
                      help="resolution of count vector [default=%default].")

    parser.add_option("-b", "--num-bins", dest="num_bins", type="int",
                      help="number of bins in count vector [default=%default].")

    parser.add_option("-i", "--num-samples", dest="num_samples", type="int",
                      help="sample size to compute [default=%default].")

    parser.add_option("-w", "--workspace-bed-file", dest="filename_workspace", type="string",
                      help="filename with workspace information [default=%default].")

    parser.add_option("--workspace-builder", dest="workspace_builder", type="choice",
                      choices=(
                          "gff", "gtf-intergenic", "gtf-intronic", "gtf-genic"),
                      help="given a gff/gtf file build a workspace [default=%default].")

    parser.add_option("--workspace-labels", dest="workspace_labels", type="choice",
                      choices=("none", "direction", "annotation"),
                      help="labels to use for the workspace workspace [default=%default].")

    parser.add_option("--sampler", dest="sampler", type="choice",
                      choices=("permutation", "gaps"),
                      help="sampler to use. The sampler determines the null model of how segments are distributed in the workspace  [default=%default]")

    parser.add_option("--counter", dest="counters", type="choice", action="append",
                      choices=(
                          "transcription", "closest-distance", "all-distances"),
                      help="counter to use. The counter computes the quantity of interest [default=%default]")

    parser.add_option("--analysis", dest="analysis", type="choice", action="append",
                      choices=("proximity", "area-under-curve"),
                      help="analysis to perform [default=%default]")

    parser.add_option("--transform-counts", dest="transform_counts", type="choice",
                      choices=("raw", "cumulative"),
                      help="cumulate counts [default=%default].")

    parser.add_option("-s", "--segments", dest="filename_segments", type="string",
                      help="filename with segment information [default=%default].")

    parser.add_option("--xrange", dest="xrange", type="string",
                      help="xrange to plot [default=%default]")

    parser.add_option("-o", "--logscale", dest="logscale", type="string",
                      help="use logscale on x, y or xy [default=%default]")

    parser.add_option("-p", "--plot", dest="plot", action="store_true",
                      help="output plots [default=%default]")

    parser.add_option("--hardcopy", dest="hardcopy", type="string",
                      help="output hardcopies to file [default=%default]")

    parser.add_option("--no-fdr", dest="do_fdr", action="store_false",
                      help="do not compute FDR rates [default=%default]")

    parser.add_option("--segments-format", dest="segments_format", type="choice",
                      choices=("gtf", "bed"),
                      help="format of segments file [default=%default].")

    parser.add_option("--truncate", dest="truncate", action="store_true",
                      help="truncate segments extending beyond a workspace [default=%default]")

    parser.add_option("--remove-overhangs", dest="remove_overhangs", action="store_true",
                      help="remove segments extending beyond a workspace[default=%default]")

    parser.add_option("--keep-ambiguous", dest="keep_ambiguous", action="store_true",
                      help="keep segments extending to more than one workspace [default=%default]")

    parser.set_defaults(
        filename_annotations=None,
        filename_workspace="workspace.gff",
        filename_segments="FastDown.gtf",
        filename_annotations_gtf="../data/tg1_territories.gff",
        workspace_builder="gff",
        workspace_labels="none",
        sampler="permutation",
        truncate=False,
        num_bins=10000,
        num_samples=10,
        resolution=100,
        plot_samples=False,
        plot_envelope=True,
        counters=[],
        transform_counts="raw",
        xrange=None,
        plot=False,
        logscale=None,
        output_all=False,
        do_test=False,
        analysis=[],
        do_fdr=True,
        hardcopy="%s.png",
        segments_format="gtf",
        remove_overhangs=False,
    )

    (options, args) = E.Start(parser, argv=argv, add_output_options=True)

    ###########################################
    # setup options
    if options.sampler == "permutation":
        sampler = SamplerPermutation
    elif options.sampler == "gaps":
        sampler = SamplerGaps

    if options.xrange:
        options.xrange = list(map(float, options.xrange.split(",")))

    if len(options.counters) == 0:
        raise ValueError("please specify at least one counter.")

    if len(options.analysis) == 0:
        raise ValueError("please specify at least one analysis.")

    if options.workspace_labels == "annotation" and not options.filename_annotations:
        raise ValueError(
            "please specify --annotations-tsv-file is --workspace-labels=annotations.")

    ###########################################
    # read data
    if options.workspace_labels == "annotation":
        def constant_factory(value):
            return itertools.repeat(value).__next__

        def dicttype():
            return collections.defaultdict(constant_factory(("unknown",)))

        map_id2annotations = IOTools.readMultiMap(open(options.filename_annotations, "r"),
                                                  dtype=dicttype)
    else:
        map_id2annotations = {}

    workspace = readWorkspace(open(options.filename_workspace, "r"),
                              options.workspace_builder,
                              options.workspace_labels,
                              map_id2annotations)

    E.info("read workspace for %i contigs" % (len(workspace)))

    indexed_workspace = indexIntervals(workspace, with_values=True)
    segments = readSegments(open(options.filename_segments, "r"), indexed_workspace,
                            format=options.segments_format,
                            keep_ambiguous=options.keep_ambiguous,
                            truncate=options.truncate,
                            remove_overhangs=options.remove_overhangs)

    nsegments = 0
    for contig, vv in segments.items():
        nsegments += len(vv)

    E.info("read %i segments for %i contigs" % (nsegments, len(workspace)))
    indexed_segments = indexIntervals(segments, with_values=False)

    if nsegments == 0:
        E.warn("no segments read - no computation done.")
        E.Stop()
        return

    # build labels
    labels = collections.defaultdict(int)
    for contig, vv in workspace.items():
        for start, end, v in vv:
            for l in v[0]:
                labels[l] += 1
            for l in v[1]:
                labels[l] += 1

    E.info("found %i workspace labels" % len(labels))

    ###########################################
    # setup counting containers
    counters = []
    for cc in options.counters:

        if cc == "transcription":
            counter = CounterTranscription
        elif cc == "closest-distance":
            counter = CounterClosestDistance
        elif cc == "all-distances":
            counter = CounterAllDistances

        if nsegments < 256:
            dtype = numpy.uint8
        elif nsegments < 65536:
            dtype = numpy.uint16
        elif nsegments < 4294967296:
            dtype = numpy.uint32
        else:
            dtype = numpy.int

        E.debug("choosen dtype %s" % str(dtype))

        E.info("samples space is %i bases: %i bins at %i resolution" %
               (options.num_bins * options.resolution,
                options.num_bins,
                options.resolution,
                ))

        E.info("allocating counts: %i bytes (%i labels, %i samples, %i bins)" %
               (options.num_bins * len(labels) * dtype().itemsize * (options.num_samples + 1),
                len(labels),
                options.num_samples,
                options.num_bins,
                ))

        c = CountingResults(labels)
        c.mObservedCounts = counter(
            labels, options.num_bins, options.resolution, dtype=dtype)

        simulated_counts = []
        for x in range(options.num_samples):
            simulated_counts.append(
                counter(labels, options.num_bins, options.resolution, dtype=dtype))
        c.mSimulatedCounts = simulated_counts
        c.mName = c.mObservedCounts.mName

        counters.append(c)

        E.info("allocated memory successfully")

    segments_per_workspace = []
    segment_sizes = []
    segments_per_label = collections.defaultdict(int)
    workspaces_per_label = collections.defaultdict(int)

    ############################################
    # get observed and simpulated counts
    nworkspaces, nempty_workspaces, nempty_contigs, nmiddle = 0, 0, 0, 0
    iteration2 = 0
    for contig, vv in workspace.items():

        iteration2 += 1
        E.info("counting %i/%i: %s %i segments" %
               (iteration2,
                len(workspace),
                contig,
                len(vv)))

        if len(vv) == 0:
            continue

        iteration1 = 0
        for work_start, work_end, v in vv:

            left_labels, right_labels = v[0], v[1]

            iteration1 += 1

            # ignore empty segments
            if contig not in indexed_segments:
                nempty_contigs += 1
                continue

            r = indexed_segments[contig].find(work_start, work_end)
            segments_per_workspace.append(len(r))

            if not r:
                nempty_workspaces += 1
                continue

            # collect segments and stats
            nworkspaces += 1
            observed = [(x.start, x.end) for x in r]
            observed.sort()
            segments_per_workspace.append(len(observed))
            segment_sizes.extend([x[1] - x[0] for x in observed])

            # collect basic counts
            for label in list(left_labels) + list(right_labels):
                workspaces_per_label[label] += 1
                segments_per_label[label] += len(observed)

            # add observed counts
            for counter in counters:
                counter.mObservedCounts.addCounts(
                    observed, work_start, work_end, left_labels, right_labels)

            # create sampler
            s = sampler(observed, work_start, work_end)

            # add simulated counts
            for iteration in range(options.num_samples):
                simulated = s.sample()
                for counter in counters:
                    counter.mSimulatedCounts[iteration].addCounts(
                        simulated, work_start, work_end, left_labels, right_labels)

    E.info("counting finished")
    E.info("nworkspaces=%i, nmiddle=%i, nempty_workspaces=%i, nempty_contigs=%i" %
           (nworkspaces, nmiddle, nempty_workspaces, nempty_contigs))

    ######################################################
    # transform counts

    if options.transform_counts == "cumulative":
        transform = cumulative_transform
    elif options.transform_counts == "raw":
        transform = normalize_transform

    ####################################################
    # analysis

    if "proximity" in options.analysis:
        outfile_proximity = E.openOutputFile("proximity")
        outfile_proximity.write("\t".join(("label", "observed", "pvalue",
                                           "expected", "CIlower", "CIupper", "qvalue", "segments", "workspaces")) + "\n")
    else:
        outfile_proximity = None

    if "area-under-curve" in options.analysis:
        outfile_auc = E.openOutputFile("auc")
        outfile_auc.write("label\tobserved\texpected\tCIlower\tCIupper\n")
    else:
        outfile_auc = None

    # qvalue: expected false positives at p-value
    # qvalue = expected false positives /
    if options.do_fdr:
        E.info("computing pvalues for fdr")
        for counter in counters:
            for label in labels:
                E.info("working on counter:%s label:%s" % (counter, label))

                # collect all P-Values of simulated results to compute FDR
                sim_pvalues = []
                medians = counter.getMedians(label)

                for median in medians:
                    pvalue = float(
                        scipy.stats.percentileofscore(medians, median)) / 100.0
                    sim_pvalues.append(pvalue)

        sim_pvalues.sort()
    else:
        sim_pvalues = []

    # compute observed p-values
    for counter in counters:
        counter.update()

    obs_pvalues = []
    for counter in counters:
        for label in labels:
            obs_pvalues.append(counter.mStats[label].pvalue)
        obs_pvalues.sort()

    # compute observed p-values
    if options.do_fdr:
        for counter in counters:
            counter.updateFDR(obs_pvalues, sim_pvalues)

    for counter in counters:

        outofbounds_sim, totals_sim = 0, 0
        outofbounds_obs, totals_obs = 0, 0
        for label in labels:
            for sample in range(options.num_samples):
                if counter.mSimulatedCounts[sample].mOutOfBounds[label]:
                    E.debug("out of bounds: sample %i, label %s, counts=%i" %
                            (sample, label, counter.mSimulatedCounts[sample].mOutOfBounds[label]))
                    outofbounds_sim += counter.mSimulatedCounts[
                        sample].mOutOfBounds[label]
                totals_sim += counter.mSimulatedCounts[sample].mTotals[label]

            outofbounds_obs += counter.mObservedCounts.mOutOfBounds[label]
            totals_obs += counter.mObservedCounts.mTotals[label]

        E.info("out of bounds observations: observed=%i/%i (%5.2f%%), simulations=%i/%i (%5.2f%%)" %
               (outofbounds_obs, totals_obs,
                100.0 * outofbounds_obs / totals_obs,
                outofbounds_sim, totals_sim,
                100.0 * outofbounds_sim / totals_sim,
                ))

        for label in labels:

            if outfile_auc:
                mmin, mmax, mmean = counter.getEnvelope(
                    label, transform=normalize_transform)
                obs = normalize_transform(
                    counter.mObservedCounts[label], counter.mObservedCounts.mOutOfBounds[label])

                def block_iterator(a1, a2, a3, num_bins):
                    x = 0
                    while x < num_bins:
                        while x < num_bins and a1[x] <= a2[x]:
                            x += 1
                        start = x
                        while x < options.num_bins and a1[x] > a2[x]:
                            x += 1
                        end = x
                        total_a1 = a1[start:end].sum()
                        total_a3 = a3[start:end].sum()
                        if total_a1 > total_a3:
                            yield (total_a1 - total_a3, start, end, total_a1, total_a3)

                blocks = list(
                    block_iterator(obs, mmax, mmean, options.num_bins))

                if options.output_all:
                    for delta, start, end, total_obs, total_mean in blocks:
                        if end - start <= 1:
                            continue
                        outfile_auc.write("%s\t%i\t%i\t%i\t%f\t%f\t%f\t%f\t%f\n" %
                                          (label,
                                           start * options.resolution,
                                           end * options.resolution,
                                           (end - start) * options.resolution,
                                           total_obs,
                                           total_mean,
                                           delta,
                                           total_obs / total_mean,
                                           100.0 * (total_obs / total_mean - 1.0)))

                # output best block
                blocks.sort()
                delta, start, end, total_obs, total_mean = blocks[-1]

                outfile_auc.write("%s\t%i\t%i\t%i\t%f\t%f\t%f\t%f\t%f\n" %
                                  (label,
                                   start * options.resolution,
                                   end * options.resolution,
                                   (end - start) * options.resolution,
                                   total_obs,
                                   total_mean,
                                   delta,
                                   total_obs / total_mean,
                                   100.0 * (total_obs / total_mean - 1.0)))

            if outfile_proximity:

                # find error bars at median
                st = counter.mStats[label]
                outfile_proximity.write("%s\t%i\t%f\t%i\t%i\t%i\t%s\t%i\t%i\n" %
                                        (label,
                                         st.observed *
                                         options.resolution,
                                         st.pvalue,
                                         st.expected *
                                         options.resolution,
                                         st.ci95lower *
                                         options.resolution,
                                         st.ci95upper *
                                         options.resolution,
                                         IOTools.val2str(st.qvalue),
                                         segments_per_label[label],
                                         workspaces_per_label[label],
                                         ))

    if options.plot:

        for counter in counters:
            plotCounts(counter, options, transform)

        # plot summary stats
        plt.figure()
        plt.title("distribution of workspace length")
        data = []
        for contig, segs in workspace.items():
            if len(segs) == 0:
                continue
            data.extend([x[1] - x[0] for x in segs])

        vals, bins = numpy.histogram(
            data, bins=numpy.arange(0, max(data), 100), new=True)

        t = float(sum(vals))
        plt.plot(bins[:-1], numpy.cumsum(vals) / t)
        plt.gca().set_xscale('log')
        plt.legend()
        t = float(sum(vals))
        plt.xlabel("size of workspace")
        plt.ylabel("cumulative relative frequency")
        if options.hardcopy:
            plt.savefig(
                os.path.expanduser(options.hardcopy % "workspace_size"))

        plt.figure()
        plt.title("segments per block")
        vals, bins = numpy.histogram(segments_per_workspace, bins=numpy.arange(
            0, max(segments_per_workspace), 1), new=True)
        plt.plot(bins[:-1], vals)
        plt.xlabel("segments per block")
        plt.ylabel("absolute frequency")
        if options.hardcopy:
            plt.savefig(
                os.path.expanduser(options.hardcopy % "segments_per_block"))

        plt.figure()
        plt.title("workspaces per label")
        plt.barh(
            list(range(0, len(labels))), [workspaces_per_label[x] for x in labels], height=0.5)
        plt.yticks(list(range(0, len(labels))), labels)
        plt.ylabel("workspaces per label")
        plt.xlabel("absolute frequency")
        plt.gca().set_xscale('log')

        if options.hardcopy:
            plt.savefig(
                os.path.expanduser(options.hardcopy % "workspaces_per_label"))

        plt.figure()
        plt.title("segments per label")
        plt.barh(list(range(0, len(labels))), [segments_per_label[x]
                                               for x in labels], height=0.5)
        plt.yticks(list(range(0, len(labels))), labels)
        plt.ylabel("segments per label")
        plt.xlabel("absolute frequency")
        plt.xticks(list(range(0, len(labels))), labels)
        if options.hardcopy:
            plt.savefig(
                os.path.expanduser(options.hardcopy % "segments_per_label"))

        if not options.hardcopy:
            plt.show()

    E.Stop()

Example 18

Project: cgat
Source File: bam2geneprofile.py
View license
def main(argv=None):
    """script main.

    parses command line options in sys.argv, unless *argv* is given.
        """

    if not argv:
        argv = sys.argv

    # setup command line parser
    parser = E.OptionParser(version="%prog version: $Id$",
                            usage=globals()["__doc__"])

    parser.add_option("-m", "--method", dest="methods", type="choice",
                      action="append",
                      choices=("geneprofile", "tssprofile", "utrprofile",
                               "intervalprofile", "midpointprofile",
                               "geneprofilewithintrons",
                               "geneprofileabsolutedistancefromthreeprimeend",
                               "separateexonprofile",
                               "separateexonprofilewithintrons",
                               ),
                      help='counters to use. Counters describe the '
                      'meta-gene structure to use. '
                      'Note using geneprofilewithintrons, or '
                      'geneprofileabsolutedistancefromthreeprimeend will '
                      'automatically turn on the --use-base-accuracy option'
                      '[%default].')

    parser.add_option("-b", "--bam-file", "--bedfile", "--bigwigfile",
                      dest="infiles",
                      metavar="BAM",
                      type="string", action="append",
                      help="BAM/bed/bigwig files to use. Do not mix "
                      "different types [%default]")

    parser.add_option("-c", "--control-bam-file", dest="controlfiles",
                      metavar="BAM",
                      type="string", action="append",
                      help="control/input to use. Should be of the same "
                      "type as the bam/bed/bigwig file"
                      " [%default]")

    parser.add_option("-g", "--gtf-file", dest="gtffile", type="string",
                      metavar="GTF",
                      help="GTF file to use. "
                      "[%default]")

    parser.add_option(
        "--normalize-transcript",
        dest="transcript_normalization",
        type="choice",
        choices=("none", "max", "sum", "total-max", "total-sum"),
        help="normalization to apply on each transcript "
        "profile before adding to meta-gene profile. "
        "[%default]")

    parser.add_option(
        "--normalize-profile",
        dest="profile_normalizations",
        type="choice", action="append",
        choices=("all", "none", "area", "counts", "background"),
        help="normalization to apply on meta-gene "
        "profile normalization. "
        "[%default]")

    parser.add_option(
        "-r", "--reporter", dest="reporter", type="choice",
        choices=("gene", "transcript"),
        help="report results for genes or transcripts."
        " When 'genes` is chosen, exons across all transcripts for"
        " a gene are merged. When 'transcript' is chosen, counts are"
        " computed for each transcript separately with each transcript"
        " contributing equally to the meta-gene profile."
        " [%default]")

    parser.add_option("-i", "--shift-size", dest="shifts", type="int",
                      action="append",
                      help="shift reads in :term:`bam` formatted file "
                      "before computing densities (ChIP-Seq). "
                      "[%default]")

    parser.add_option("-a", "--merge-pairs", dest="merge_pairs",
                      action="store_true",
                      help="merge pairs in :term:`bam` formatted "
                      "file before computing "
                      "densities (ChIP-Seq). "
                      "[%default]")

    parser.add_option("-u", "--use-base-accuracy", dest="base_accuracy",
                      action="store_true",
                      help="compute densities with base accuracy. The default "
                      "is to only use the start and end of the aligned region "
                      "(RNA-Seq) "
                      "[%default]")

    parser.add_option("-e", "--extend", dest="extends", type="int",
                      action="append",
                      help="extend reads in :term:`bam` formatted file "
                      "(ChIP-Seq). "
                      "[%default]")

    parser.add_option("--resolution-upstream", dest="resolution_upstream",
                      type="int",
                      help="resolution of upstream region in bp "
                      "[%default]")

    parser.add_option("--resolution-downstream", dest="resolution_downstream",
                      type="int",
                      help="resolution of downstream region in bp "
                      "[%default]")

    parser.add_option("--resolution-upstream-utr",
                      dest="resolution_upstream_utr",
                      type="int",
                      help="resolution of upstream UTR region in bp "
                      "[%default]")

    parser.add_option("--resolution-downstream-utr",
                      dest="resolution_downstream_utr",
                      type="int",
                      help="resolution of downstream UTR region in bp "
                      "[%default]")

    parser.add_option("--resolution-cds", dest="resolution_cds", type="int",
                      help="resolution of cds region in bp "
                      "[%default]")

    parser.add_option("--resolution-first-exon", dest="resolution_first",
                      type="int",
                      help="resolution of first exon in gene, in bp"
                      "[%default]")

    parser.add_option("--resolution-last-exon", dest="resolution_last",
                      type="int",
                      help="resolution of last exon in gene, in bp"
                      "[%default]")

    parser.add_option("--resolution-introns",
                      dest="resolution_introns", type="int",
                      help="resolution of introns region in bp "
                      "[%default]")

    parser.add_option("--resolution-exons-absolute-distance-topolya",
                      dest="resolution_exons_absolute_distance_topolya",
                      type="int",
                      help="resolution of exons absolute distance "
                      "topolya in bp "
                      "[%default]")

    parser.add_option("--resolution-introns-absolute-distance-topolya",
                      dest="resolution_introns_absolute_distance_topolya",
                      type="int",
                      help="resolution of introns absolute distance "
                      "topolya in bp "
                      "[%default]")

    parser.add_option("--extension-exons-absolute-distance-topolya",
                      dest="extension_exons_absolute_distance_topolya",
                      type="int",
                      help="extension for exons from the absolute "
                      "distance from the topolya in bp "
                      "[%default]")

    parser.add_option(
        "--extension-introns-absolute-distance-topolya",
        dest="extension_introns_absolute_distance_topolya", type="int",
        help="extension for introns from the absolute distance from "
        "the topolya in bp [%default]")

    parser.add_option(
        "--extension-upstream", dest="extension_upstream", type="int",
        help="extension upstream from the first exon in bp"
        "[%default]")

    parser.add_option(
        "--extension-downstream", dest="extension_downstream", type="int",
        help="extension downstream from the last exon in bp"
        "[%default]")

    parser.add_option(
        "--extension-inward", dest="extension_inward", type="int",
        help="extension inward from a TSS start site in bp"
        "[%default]")

    parser.add_option(
        "--extension-outward", dest="extension_outward", type="int",
        help="extension outward from a TSS start site in bp"
        "[%default]")

    parser.add_option("--scale-flank-length", dest="scale_flanks", type="int",
                      help="scale flanks to (integer multiples of) gene length"
                      "[%default]")

    parser.add_option(
        "--control-factor", dest="control_factor", type="float",
        help="factor for normalizing control and foreground data. "
        "Computed from data if not set. "
        "[%default]")

    parser.add_option("--output-all-profiles", dest="output_all_profiles",
                      action="store_true",
                      help="keep individual profiles for each "
                      "transcript and output. "
                      "[%default]")

    parser.add_option("--counts-tsv-file", dest="input_filename_counts",
                      type="string",
                      help="filename with count data for each transcript. "
                      "Use this instead "
                      "of recomputing the profile. Useful for plotting the "
                      "meta-gene profile "
                      "from previously computed counts "
                      "[%default]")

    parser.add_option(
        "--background-region-bins",
        dest="background_region_bins",
        type="int",
        help="number of bins on either end of the profile "
        "to be considered for background meta-gene normalization "
        "[%default]")

    parser.set_defaults(
        remove_rna=False,
        ignore_pairs=False,
        force_output=False,
        bin_size=10,
        extends=[],
        shifts=[],
        sort=[],
        reporter="transcript",
        resolution_cds=1000,
        resolution_introns=1000,
        # 3kb is a good balance of seeing long enough 3 prime bias and not omit
        # too many genes. Tim 31th Aug 2013
        resolution_exons_absolute_distance_topolya=3000,
        # introns is only for assess the noise level, thus do ont need a long
        # region, a long region has the side effect of omit more genes. Tim
        # 31th Aug 2013
        resolution_introns_absolute_distance_topolya=500,
        # extension can simply just be the same as resolution
        extension_exons_absolute_distance_topolya=3000,
        extension_introns_absolute_distance_topolya=500,
        resolution_upstream_utr=1000,
        resolution_downstream_utr=1000,
        resolution_upstream=1000,
        resolution_downstream=1000,
        resolution_first=1000,
        resolution_last=1000,
        # mean length of transcripts: about 2.5 kb
        extension_upstream=2500,
        extension_downstream=2500,
        extension_inward=3000,
        extension_outward=3000,
        plot=True,
        methods=[],
        infiles=[],
        controlfiles=[],
        gtffile=None,
        profile_normalizations=[],
        transcript_normalization=None,
        scale_flanks=0,
        merge_pairs=False,
        min_insert_size=0,
        max_insert_size=1000,
        base_accuracy=False,
        matrix_format="single",
        control_factor=None,
        output_all_profiles=False,
        background_region_bins=10,
        input_filename_counts=None,
    )

    # add common options (-h/--help, ...) and parse command line
    (options, args) = E.Start(parser, argv=argv, add_output_options=True)

    # Keep for backwards compatability
    if len(args) == 2:
        infile, gtf = args
        options.infiles.append(infile)
        options.gtffile = gtf

    if not options.gtffile:
        raise ValueError("no GTF file specified")

    if options.gtffile == "-":
        options.gtffile = options.stdin
    else:
        options.gtffile = IOTools.openFile(options.gtffile)

    if len(options.infiles) == 0:
        raise ValueError("no bam/wig/bed files specified")

    for methodsRequiresBaseAccuracy in [
            "geneprofilewithintrons",
            "geneprofileabsolutedistancefromthreeprimeend",
    ]:
        # If you implemented any methods that you do not want the
        # spliced out introns or exons appear to be covered by
        # non-existent reads, it is better you let those methods imply
        # --base-accurarcy by add them here.
        if methodsRequiresBaseAccuracy in options.methods:
            options.base_accuracy = True

    if options.reporter == "gene":
        gtf_iterator = GTF.flat_gene_iterator(GTF.iterator(options.gtffile))
    elif options.reporter == "transcript":
        gtf_iterator = GTF.transcript_iterator(GTF.iterator(options.gtffile))

    # Select rangecounter based on file type
    if len(options.infiles) > 0:
        if options.infiles[0].endswith(".bam"):
            bamfiles = [pysam.Samfile(x, "rb") for x in options.infiles]

            if options.controlfiles:
                controlfiles = [pysam.Samfile(x, "rb")
                                for x in options.controlfiles]
            else:
                controlfiles = None

            format = "bam"
            if options.merge_pairs:
                range_counter = _bam2geneprofile.RangeCounterBAM(
                    bamfiles,
                    shifts=options.shifts,
                    extends=options.extends,
                    merge_pairs=options.merge_pairs,
                    min_insert_size=options.min_insert_size,
                    max_insert_size=options.max_insert_size,
                    controfiles=controlfiles,
                    control_factor=options.control_factor)

            elif options.shifts or options.extends:
                range_counter = _bam2geneprofile.RangeCounterBAM(
                    bamfiles,
                    shifts=options.shifts,
                    extends=options.extends,
                    controlfiles=controlfiles,
                    control_factor=options.control_factor)

            elif options.base_accuracy:
                range_counter = _bam2geneprofile.RangeCounterBAMBaseAccuracy(
                    bamfiles,
                    controlfiles=controlfiles,
                    control_factor=options.control_factor)
            else:
                range_counter = _bam2geneprofile.RangeCounterBAM(
                    bamfiles,
                    controlfiles=controlfiles,
                    control_factor=options.control_factor)

        elif options.infiles[0].endswith(".bed.gz"):
            bedfiles = [pysam.Tabixfile(x) for x in options.infiles]

            if options.controlfiles:
                controlfiles = [pysam.Tabixfile(x)
                                for x in options.controlfiles]
            else:
                controlfiles = None

            range_counter = _bam2geneprofile.RangeCounterBed(
                bedfiles,
                controlfiles=controlfiles,
                control_factor=options.control_factor)

        elif options.infiles[0].endswith(".bw"):
            wigfiles = [BigWigFile(file=open(x)) for x in options.infiles]
            range_counter = _bam2geneprofile.RangeCounterBigWig(wigfiles)

        else:
            raise NotImplementedError(
                "can't determine file type for %s" % str(options.infiles))

    counters = []
    for method in options.methods:
        if method == "utrprofile":
            counters.append(_bam2geneprofile.UTRCounter(
                range_counter,
                options.resolution_upstream,
                options.resolution_upstream_utr,
                options.resolution_cds,
                options.resolution_downstream_utr,
                options.resolution_downstream,
                options.extension_upstream,
                options.extension_downstream,
            ))

        elif method == "geneprofile":
            counters.append(_bam2geneprofile.GeneCounter(
                range_counter,
                options.resolution_upstream,
                options.resolution_cds,
                options.resolution_downstream,
                options.extension_upstream,
                options.extension_downstream,
                options.scale_flanks))

        elif method == "geneprofilewithintrons":
            counters.append(_bam2geneprofile.GeneCounterWithIntrons(
                range_counter,
                options.resolution_upstream,
                options.resolution_cds,
                options.resolution_introns,
                options.resolution_downstream,
                options.extension_upstream,
                options.extension_downstream,
                options.scale_flanks))

        elif method == "geneprofileabsolutedistancefromthreeprimeend":
            # options.extension_exons_absolute_distance_tostartsite,
            # options.extension_introns_absolute_distance_tostartsite,
            # Tim 31th Aug 2013: a possible feature for future,  if five prime
            # bias is of your interest.
            # (you need to create another class). It is not very difficult to
            # derive from this class, but is not implemented yet
            # This future feature is slightly different the TSS profile
            # already implemented, because in this future feature introns are
            # skipped,
            counters.append(
                _bam2geneprofile.GeneCounterAbsoluteDistanceFromThreePrimeEnd(
                    range_counter, options.resolution_upstream,
                    options.resolution_downstream,
                    options.resolution_exons_absolute_distance_topolya,
                    options.resolution_introns_absolute_distance_topolya,
                    options.extension_upstream,
                    options.extension_downstream,
                    options.extension_exons_absolute_distance_topolya,
                    options.extension_introns_absolute_distance_topolya,
                    options.scale_flanks))

        elif method == "tssprofile":
            counters.append(_bam2geneprofile.TSSCounter(
                range_counter,
                options.extension_outward,
                options.extension_inward))

        elif method == "intervalprofile":
            counters.append(_bam2geneprofile.RegionCounter(
                range_counter,
                options.resolution_upstream,
                options.resolution_cds,
                options.resolution_downstream,
                options.extension_upstream,
                options.extension_downstream))

        elif method == "midpointprofile":
            counters.append(_bam2geneprofile.MidpointCounter(
                range_counter,
                options.resolution_upstream,
                options.resolution_downstream,
                options.extension_upstream,
                options.extension_downstream))

        # add new method to split 1st and last exons out
        # requires a representative transcript for reach gene
        # gtf should be sorted gene-position
        elif method == "separateexonprofile":
            counters.append(_bam2geneprofile.SeparateExonCounter(
                range_counter,
                options.resolution_upstream,
                options.resolution_first,
                options.resolution_last,
                options.resolution_cds,
                options.resolution_downstream,
                options.extension_upstream,
                options.extension_downstream))

        elif method == "separateexonprofilewithintrons":
            counters.append(_bam2geneprofile.SeparateExonWithIntronCounter(
                range_counter,
                options.resolution_upstream,
                options.resolution_first,
                options.resolution_last,
                options.resolution_cds,
                options.resolution_introns,
                options.resolution_downstream,
                options.extension_upstream,
                options.extension_downstream))

    # set normalization
    for c in counters:
        c.setNormalization(options.transcript_normalization)
        if options.output_all_profiles:
            c.setOutputProfiles(IOTools.openFile(E.getOutputFile(c.name) +
                                                 ".profiles.tsv.gz", "w"))

    if options.input_filename_counts:
        # read counts from file
        E.info("reading counts from %s" % options.input_filename_counts)
        all_counts = pandas.read_csv(
            IOTools.openFile(options.input_filename_counts),
            sep='\t', header=0, index_col=0)

        if len(counters) != 1:
            raise NotImplementedError(
                'counting from matrix only implemented for 1 counter.')
        # build counter based on reference counter
        counter = _bam2geneprofile.UnsegmentedCounter(counters[0])
        counters = [counter]
        _bam2geneprofile.countFromCounts(counters, all_counts)

    else:
        E.info("starting counting with %i counters" % len(counters))
        feature_names = _bam2geneprofile.countFromGTF(counters,
                                                      gtf_iterator)

    # output matrices
    if not options.profile_normalizations:
        options.profile_normalizations.append("none")
    elif "all" in options.profile_normalizations:
        options.profile_normalizations = ["none",
                                          "area",
                                          "counts",
                                          "background"]

    for method, counter in zip(options.methods, counters):
        profiles = []
        for norm in options.profile_normalizations:
            # build matrix, apply normalization
            profile = counter.getProfile(
                normalize=norm,
                background_region_bins=options.background_region_bins)
            profiles.append(profile)

        for x in range(1, len(profiles)):
            assert profiles[0].shape == profiles[x].shape

        # build a single matrix of all profiles for output
        matrix = numpy.concatenate(profiles)
        matrix.shape = len(profiles), len(profiles[0])
        matrix = matrix.transpose()

        with IOTools.openFile(E.getOutputFile(counter.name) +
                              ".matrix.tsv.gz", "w") as outfile:
            outfile.write("bin\tregion\tregion_bin\t%s\n" % "\t".join(
                options.profile_normalizations))
            fields = []
            bins = []
            for field, nbins in zip(counter.fields, counter.nbins):
                fields.extend([field] * nbins)
                bins.extend(list(range(nbins)))

            for row, cols in enumerate(zip(fields, bins, matrix)):
                outfile.write("%i\t%s\t" %
                              (row, "\t".join([str(x) for x in cols[:-1]])))
                outfile.write("%s\n" %
                              ("\t".join([str(x) for x in cols[-1]])))

        with IOTools.openFile(E.getOutputFile(counter.name) +
                              ".lengths.tsv.gz", "w") as outfile:
            counter.writeLengthStats(outfile)

        if options.output_all_profiles:
            counter.closeOutputProfiles()

    if options.plot:

        import matplotlib
        # avoid Tk or any X
        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

        for method, counter in zip(options.methods, counters):

            if method in ("geneprofile",
                          "geneprofilewithintrons",
                          "geneprofileabsolutedistancefromthreeprimeend",
                          "utrprofile",
                          "intervalprofile",
                          "separateexonprofile",
                          "separateexonprofilewithintrons"):

                plt.figure()
                plt.subplots_adjust(wspace=0.05)
                max_scale = max([max(x) for x in counter.aggregate_counts])

                for x, counts in enumerate(counter.aggregate_counts):
                    plt.subplot(6, 1, x + 1)
                    plt.plot(list(range(len(counts))), counts)
                    plt.title(counter.fields[x])
                    plt.ylim(0, max_scale)

                figname = counter.name + ".full"

                fn = E.getOutputFile(figname) + ".png"
                plt.savefig(os.path.expanduser(fn))

                plt.figure()

                points = []
                cuts = []
                for x, counts in enumerate(counter.aggregate_counts):
                    points.extend(counts)
                    cuts.append(len(counts))

                plt.plot(list(range(len(points))), points)

                xx, xxx = 0, []
                for x in cuts:
                    xxx.append(xx + x // 2)
                    xx += x
                    plt.axvline(xx,
                                color="r",
                                ls="--")

                plt.xticks(xxx, counter.fields)

                figname = counter.name + ".detail"

                fn = E.getOutputFile(figname) + ".png"
                plt.savefig(os.path.expanduser(fn))

            elif method == "tssprofile":

                plt.figure()
                plt.subplot(1, 3, 1)
                plt.plot(list(range(-options.extension_outward,
                                    options.extension_inward)),
                         counter.aggregate_counts[0])
                plt.title(counter.fields[0])
                plt.subplot(1, 3, 2)
                plt.plot(list(range(-options.extension_inward,
                                    options.extension_outward)),
                         counter.aggregate_counts[1])
                plt.title(counter.fields[1])
                plt.subplot(1, 3, 3)
                plt.title("combined")
                plt.plot(list(range(-options.extension_outward,
                                    options.extension_inward)),
                         counter.aggregate_counts[0])
                plt.plot(list(range(-options.extension_inward,
                                    options.extension_outward)),
                         counter.aggregate_counts[1])
                plt.legend(counter.fields[:2])

                fn = E.getOutputFile(counter.name) + ".png"
                plt.savefig(os.path.expanduser(fn))

            elif method == "midpointprofile":

                plt.figure()
                plt.plot(numpy.arange(-options.resolution_upstream, 0),
                         counter.aggregate_counts[0])
                plt.plot(numpy.arange(0, options.resolution_downstream),
                         counter.aggregate_counts[1])

                fn = E.getOutputFile(counter.name) + ".png"
                plt.savefig(os.path.expanduser(fn))

    # write footer and output benchmark information.
    E.Stop()

Example 19

Project: cgat
Source File: gff2plot.py
View license
def main(argv=None):
    """script main.

    parses command line options in sys.argv, unless *argv* is given.
    """

    if argv is None:
        argv = sys.argv

    parser = E.OptionParser(
        version="%prog version: $Id: gff2plot.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])

    parser.add_option("-f", "--file", dest="filenames", type="string",
                      help="files[s] to take data from,stdin = -.")
    parser.add_option("", "--symbols", dest="symbols", type="string",
                      help="symbols to use for each histogram [steps|...].")
    parser.add_option("--slide-show", dest="slide_show", type="choice",
                      choices=("first", "all", "sequence"),
                      help="do a slide show - otherwise, write image to file.")
    parser.add_option("--config", dest="filename_config", type="string",
                      help="filename of track configuration file.")
    parser.add_option("--dpi", dest="dpi", type="int",
                      help="dpi for hardcopy output.")
    parser.add_option("--window-size", dest="window_size", type="int",
                      help="window-size.")
    parser.add_option("--output-filename-pattern", dest="output_pattern_image", type="string",
                      help="output pattern for images. Should contain a '%(contig)s' pattern .")
    parser.add_option("--global-colours", dest="global_colours", action="store_true",
                      help="cycle through colours for all tracks.")

    parser.set_defaults(
        filenames=None,
        symbols="k-,b-,r-,c-,m-,y-,g-",
        output_pattern_image="%(contig)s.png",
        slide_show=None,
        window_size=None,
        filename_config=None,
        dpi=None,
        global_colours=False,
    )

    (options, args) = E.Start(parser)
    options.symbols = options.symbols.split(",")

    #--------------------------------------------------------
    # collect all the data
    # list of data per source and contig
    tracks = {}
    extra_features = {}

    if options.filenames:
        options.filenames = options.filenames.split(",")

        if len(args) > 0:
            options.filenames = args

    if options.filenames:

        for filename in options.filenames:

            if filename == "-":
                infile = sys.stdin
            else:
                infile = IOTools.openFile(filename)

            data = readData(infile)

            if filename != "-":
                infile.close()

            track[filename] = Track(title=filename, data=data)

    elif options.filename_config:
        # get track information from config file
        config = configparser.ConfigParser()
        config.read(os.path.expanduser(options.filename_config))

        # first extract special sections
        for section in config.sections():
            if section == "vlines":
                infile = IOTools.openFile(config.get(section, "filename"), "r")
                data = readData(infile)
                infile.close()
                extra_features[section] = Track(title=section,
                                                data=data,
                                                config=config)
                config.remove_section(section)
            elif section in ("figure", "legend"):
                extra_features[section] = Track(title=section,
                                                data=None,
                                                config=config)
                config.remove_section(section)
        n = 0
        for section in config.sections():

            if config.has_option(section, "filename"):
                infile = IOTools.openFile(config.get(section, "filename"), "r")
                data = readData(infile)
                infile.close()

                tracks[section] = Track(title=section,
                                        data=data,
                                        priority=n,
                                        config=config)

            elif config.has_option(section, "tracks"):
                subtracks = config.get(section, "tracks")
                subtracks = [x.strip() for x in subtracks.split(",")]

                tracks[section] = Track(title=section,
                                        data=None,
                                        config=config,
                                        priority=n,
                                        subtracks=subtracks)
            n += 1

    # compile set of all contigs
    contigs = set()
    for track in list(tracks.values()):
        if track.mData:
            contigs = contigs.union(list(track.mData.keys()))

    # re-arrange tracks and subtracks
    tracks = layoutTracks(tracks)

    nplots = 0
    figures = []
    legend = None
    for contig in contigs:
        figure, l = plotContig(contig, tracks, options,
                               plot_legend=legend is None,
                               extra_features=extra_features)
        figures.append(figure)
        if l:
            legend = l

    if options.slide_show:
        if options.slide_show == "first":
            pylab.show()
        elif options.slide_show == "all":
            pylab.show()
        elif options.slide_show == "sequence":
            pylab.show()
    else:

        extra_args = {}
        if options.dpi:
            extra_args['dpi'] = options.dpi

        for contig, figure in zip(contigs, figures):
            params = {'contig': contig}
            filename = options.output_pattern_image % params
            E.info("# creating image: %s" % filename)
            figure.savefig(os.path.expanduser(filename), **extra_args)
        if legend:
            params = {'contig': "legend"}
            filename = options.output_pattern_image % params
            E.info("creating image: %s" % filename)
            legend.savefig(os.path.expanduser(filename), **extra_args)

    E.info("ninput=%i, ncontigs=%i, nplots=%i" %
           (len(tracks), nplots, len(contigs)))

    E.Stop()

Example 20

Project: taskwarrior-inthe.am
Source File: commands.py
View license
@command("Set up Taskwarrior to sync issues with your Inthe.AM account.")
def setup(config, args, *extra, **kwargs):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--data-dir',
        type=os.path.expanduser,
        default='~/.tasks'
    )
    extra_args = parser.parse_args(extra)

    twversion = TaskWarriorShellout.get_version()
    if twversion < LooseVersion('2.3'):
        raise IncompatibleVersionError(
            "Only Taskwarrior versions 2.3 and above support "
            "synchronization with a task server.  Please upgrade "
            "and try again."
        )

    api = get_api_connection(config)
    twconfig = get_taskwarrior_config(args.taskrc)

    # Make sure that none of these settings are already set.
    necessary_settings = ['certificate', 'key', 'ca', 'trust']
    if 'taskd' in twconfig:
        for setting in necessary_settings:
            if setting in twconfig['taskd'] and twconfig['taskd'][setting]:
                raise ConfigurationError(
                    "Cannot configure!  Setting taskd.%s is already "
                    "configured in your TaskRC file at %s." % (
                        setting,
                        args.taskrc
                    )
                )

    # Create the data directory if necessary
    data_location = os.path.expanduser(
        twconfig.get('data', {}).get('location', extra_args.data_dir)
    )
    try:
        os.mkdir(data_location)
        logger.info(
            "Data directory %s created.",
            data_location
        )
    except OSError:
        logger.warning(
            "Data directory %s already exists.",
            data_location
        )

    # Get user information
    status = api.get('https://inthe.am/api/v2/user/status/').json()

    # Write certificate files
    files = {
        'private.cert': '/api/v2/user/my-certificate/',
        'private.key': '/api/v2/user/my-key/',
        'ca.cert.pem': '/api/v2/user/ca-certificate/',
    }
    for filename, url in files.items():
        full_path = os.path.join(data_location, filename)
        with open(full_path, 'w') as out:
            full_url = 'https://inthe.am%s' % url
            content = api.get(full_url).content
            out.write(content)
            logger.info(
                "File '%s' written to %s.",
                filename,
                full_path,
            )

    # Write configuration
    taskrc_path = os.path.expanduser(args.taskrc)
    with open(taskrc_path, 'a') as out:
        lines = []
        if twconfig.get('data', {}).get('location') is None:
            lines.append(
                'data.location=%s' % data_location
            )
        lines.extend([
            'taskd.certificate=%s' % os.path.join(
                data_location,
                'private.cert',
            ),
            'taskd.key=%s' % os.path.join(
                data_location,
                'private.key',
            ),
            'taskd.ca=%s' % os.path.join(
                data_location,
                'ca.cert.pem',
            ),
            'taskd.server=%s' % status['taskd_server'],
            'taskd.credentials=%s' % status['taskd_credentials'],
        ])
        if twversion >= LooseVersion('2.4'):
            lines.append(
                'taskd.trust=ignore hostname'
            )
        for line in lines:
            out.write('%s\n' % line)

        logger.info(
            "Configuration written to %s.",
            taskrc_path,
        )

    # Synchronizing with Inthe.AM
    logger.info(
        "Performing initial sync..."
    )
    warrior = TaskWarriorShellout(
        config_filename=taskrc_path
    )
    warrior.sync()
    logger.info(
        "Taskwarrior has successfully been configured to synchronize with "
        "Inthe.AM; In the future, just run `task sync` to synchronize."
    )

Example 21

Project: cstar_perf
Source File: client.py
View license
    def perform_job(self, job):
        """Perform a job the server gave us, stream output and artifacts to the given websocket."""
        job = copy.deepcopy(job['test_definition'])
        # Cleanup the job structure according to what stress_compare needs:
        for operation in job['operations']:
            operation['type'] = operation['operation']
            del operation['operation']

        job_dir = os.path.join(os.path.expanduser('~'),'.cstar_perf','jobs',job['test_id'])
        mkpath(job_dir)
        stats_path = os.path.join(job_dir,'stats.{test_id}.json'.format(test_id=job['test_id']))
        summary_path = os.path.join(job_dir,'stats_summary.{test_id}.json'.format(test_id=job['test_id']))
        stress_log_path = os.path.join(job_dir,'stress_compare.{test_id}.log'.format(test_id=job['test_id']))

        stress_json = json.dumps(dict(revisions=job['revisions'],
                                      operations=job['operations'],
                                      title=job['title'],
                                      leave_data=job.get('leave_data', False),
                                      log=stats_path))

        # Create a temporary location to store the stress_compare json file:
        stress_json_path = os.path.join(job_dir, 'test.{test_id}.json'.format(test_id=job['test_id']))
        with open(stress_json_path, 'w') as f:
            f.write(stress_json)

        # Inform the server we will be streaming the console output to them:
        command = Command.new(self.__ws_client.socket(), action='stream', test_id=job['test_id'],
                              kind='console', name="stress_compare.{test_id}.log".format(test_id=job['test_id']),
                              eof=EOF_MARKER, keepalive=KEEPALIVE_MARKER)
        response = self.__ws_client.send(command, assertions={'message':'ready'})

        # Start a status checking thread.
        # If a user cancel's the job after it's marked in_progress, we
        # need to periodically check for that state change and kill
        # our test:
        cancel_checker = JobCancellationTracker(urlparse.urlparse(self.ws_endpoint).netloc, job['test_id'])
        cancel_checker.start()

        # stats file observer
        # looks for changes to update server with status progress message
        observer = Observer()
        observer.schedule(UpdateServerProgressMessageHandler(job, urlparse.urlparse(self.ws_endpoint).netloc),
                          os.path.join(os.path.expanduser("~"), '.cstar_perf', 'jobs'),
                          recursive=True)
        observer.start()

        # Run stress_compare in a separate process, collecting the
        # output as an artifact:
        try:
            # Run stress_compare with pexpect. subprocess.Popen didn't
            # work due to some kind of tty issue when invoking
            # nodetool.
            stress_proc = pexpect.spawn('cstar_perf_stress {stress_json_path}'.format(stress_json_path=stress_json_path), timeout=None)
            with open(stress_log_path, 'w') as stress_log:
                while True:
                    try:
                        with timeout(25):
                            line = stress_proc.readline()
                            if line == '':
                                break
                            stress_log.write(line)
                            sys.stdout.write(line)
                            self.__ws_client.send(base64.b64encode(line))
                    except TimeoutError:
                        self.__ws_client.send(base64.b64encode(KEEPALIVE_MARKER))
        finally:
            cancel_checker.stop()
            observer.stop()
            self.__ws_client.send(base64.b64encode(EOF_MARKER))

        response = self.__ws_client.receive(response, assertions={'message': 'stream_received', 'done': True})

        # Find the log tarball for each revision by introspecting the stats json:
        system_logs = []
        flamegraph_logs = []
        yourkit_logs = []
        log_dir = CSTAR_PERF_LOGS_DIR
        flamegraph_dir = os.path.join(os.path.expanduser("~"), '.cstar_perf', 'flamegraph')
        yourkit_dir = os.path.join(os.path.expanduser("~"), '.cstar_perf', 'yourkit')
        #Create a stats summary file without voluminous interval data
        if os.path.isfile(stats_path):
            with open(stats_path) as stats:
                stats = json.loads(stats.read())
                for rev in stats['revisions']:
                    last_log_rev_id = rev.get('last_log')
                    if last_log_rev_id:
                        system_logs.append(os.path.join(log_dir, "{name}.tar.gz".format(name=last_log_rev_id)))
                        fg_path = os.path.join(flamegraph_dir, "{name}.tar.gz".format(name=last_log_rev_id))
                        yourkit_path = os.path.join(yourkit_dir, "{name}.tar.gz".format(name=last_log_rev_id))
                        if os.path.exists(fg_path):
                            flamegraph_logs.append(fg_path)
                        if os.path.exists(yourkit_path):
                            yourkit_logs.append(yourkit_path)
                with open(summary_path, 'w') as summary:
                    hadStats = False
                    for op in stats['stats']:
                        if op['type'] == 'stress':
                            try:
                                del op['intervals']
                                hadStats = True
                            except KeyError:
                                pass
                        try:
                            del op['output']
                        except KeyError:
                            pass
                    if hadStats:
                        json.dump(obj=stats, fp=summary, sort_keys=True, indent=4, separators=(',', ': '))

        # Make a new tarball containing all the revision logs:
        tmptardir = tempfile.mkdtemp()
        try:
            startup_log_tarball = self._maybe_get_startup_log_tarball(job['test_id'], log_dir)
            if startup_log_tarball:
                system_logs.append(startup_log_tarball)
            job_log_dir = os.path.join(tmptardir, 'cassandra_logs.{test_id}'.format(test_id=job['test_id']))
            os.mkdir(job_log_dir)
            for x, syslog in enumerate(system_logs, 1):
                with tarfile.open(syslog) as tar:
                    tar.extractall(job_log_dir)
                    os.rename(os.path.join(job_log_dir, tar.getnames()[0]), os.path.join(job_log_dir, 'revision_{x:02d}'.format(x=x)))
            system_logs_path = os.path.join(job_dir, 'cassandra_logs.{test_id}.tar.gz'.format(test_id=job['test_id']))
            with tarfile.open(system_logs_path, 'w:gz') as tar:
                with cd(tmptardir):
                    tar.add('cassandra_logs.{test_id}'.format(test_id=job['test_id']))
            assert os.path.exists(system_logs_path)
        finally:
            shutil.rmtree(tmptardir)

        # Make a new tarball containing all the flamegraph and data
        if flamegraph_logs:
            tmptardir = tempfile.mkdtemp()
            try:
                flamegraph_tmp_dir = os.path.join(tmptardir, 'flamegraph_logs.{test_id}'.format(test_id=job['test_id']))
                os.mkdir(flamegraph_tmp_dir)
                for x, flamegraph in enumerate(flamegraph_logs, 1):
                    with tarfile.open(flamegraph) as tar:
                        tar.extractall(flamegraph_tmp_dir)
                        tmp_dir = os.path.join(flamegraph_tmp_dir, tar.getnames()[0])

                        # Copy all flamegraph as artifacts
                        for node_dir in os.listdir(tmp_dir):
                            glob_match = os.path.join(os.path.join(tmp_dir, node_dir), '*.svg')
                            graphs = glob.glob(glob_match)
                            for graph in graphs:
                                graph_name = os.path.basename(graph).replace(
                                    'flamegraph_', 'flamegraph_{}_{}_'.format(job['test_id'], node_dir))
                                graph_dst_filename = os.path.join(job_dir, graph_name)
                                shutil.copyfile(graph, graph_dst_filename)

                        os.rename(tmp_dir, os.path.join(flamegraph_tmp_dir, 'revision_{x:02d}'.format(x=x)))

                flamegraph_job_path = os.path.join(job_dir, 'flamegraph_logs.{test_id}.tar.gz'.format(test_id=job['test_id']))
                with tarfile.open(flamegraph_job_path, 'w:gz') as tar:
                    with cd(tmptardir):
                        tar.add('flamegraph_logs.{test_id}'.format(test_id=job['test_id']))
                assert os.path.exists(flamegraph_job_path)
            finally:
                shutil.rmtree(tmptardir)

        # Make a new tarball containing all the flamegraph and data
        if yourkit_logs:
            tmptardir = tempfile.mkdtemp()
            try:
                yourkit_tmp_dir = os.path.join(tmptardir, 'yourkit.{test_id}'.format(test_id=job['test_id']))
                os.mkdir(yourkit_tmp_dir)
                for x, yourkit in enumerate(yourkit_logs, 1):
                    with tarfile.open(yourkit) as tar:
                        tar.extractall(yourkit_tmp_dir)
                        tmp_dir = os.path.join(yourkit_tmp_dir, tar.getnames()[0])
                        os.rename(tmp_dir, os.path.join(yourkit_tmp_dir, 'revision_{x:02d}'.format(x=x)))

                yourkit_job_path = os.path.join(job_dir, 'yourkit.{test_id}.tar.gz'.format(test_id=job['test_id']))
                with tarfile.open(yourkit_job_path, 'w:gz') as tar:
                    with cd(tmptardir):
                        tar.add('yourkit.{test_id}'.format(test_id=job['test_id']))
                assert os.path.exists(yourkit_job_path)
            finally:
                shutil.rmtree(tmptardir)

        ## Stream artifacts
        ## Write final job status to 0.job_status file
        final_status = 'local_complete'
        try:
            # Stream artifacts:
            self.stream_artifacts(job['test_id'])
            if self.__ws_client.in_sync():
                final_status = 'server_complete'

            # Spot check stats to ensure it has the data it should
            # contain. Raises JobFailure if something's amiss.
            try:
                self.__spot_check_stats(job, stats_path)
            except JobFailure, e:
                if final_status == 'server_complete':
                    final_status = 'server_fail'
                else:
                    final_status = 'local_fail'
                raise
        finally:
            with open(os.path.join(job_dir, '0.job_status'), 'w') as f:
                f.write(final_status)

Example 22

Project: facenet
Source File: facenet_train.py
View license
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
        
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for input images
        images_placeholder = tf.placeholder(tf.float32, shape=(None, args.image_size, args.image_size, 3), name='input')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        # Build the inference graph
        prelogits, _ = network.inference(images_placeholder, args.keep_probability, 
            phase_train=True, weight_decay=args.weight_decay)
        pre_embeddings = slim.fully_connected(prelogits, 128, activation_fn=None, scope='Embeddings', reuse=False)

        # Split example embeddings into anchor, positive and negative and calculate triplet loss
        embeddings = tf.nn.l2_normalize(pre_embeddings, 1, 1e-10, name='embeddings')
        anchor, positive, negative = tf.split(0, 3, embeddings)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)
        
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses, name='total_loss')

        # Create list with variables to restore
        restore_vars = []
        update_gradient_vars = []
        if args.pretrained_model:
            for var in tf.all_variables():
                if not 'Embeddings/' in var.op.name:
                    restore_vars.append(var)
                else:
                    update_gradient_vars.append(var)
        else:
            restore_vars = tf.all_variables()
            update_gradient_vars = tf.all_variables()

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, update_gradient_vars)
        
        # Create a saver
        restore_saver = tf.train.Saver(restore_vars)
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))        

        # Initialize variables
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())

        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                restore_saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                step = train(args, sess, train_set, epoch, images_placeholder, 
                    learning_rate_placeholder, global_step, embeddings, total_loss, train_op, summary_op, summary_writer)
                if args.lfw_dir:
                    _, _, accuracy, val, val_std, far = lfw.validate(sess, lfw_paths,
                        actual_issame, args.seed, 60, images_placeholder, phase_train_placeholder, embeddings, nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary_writer.add_summary(summary, step)

                # Save the model checkpoint
                print('Saving checkpoint')
                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    return model_dir

Example 23

Project: facenet
Source File: facenet_train_classifier.py
View license
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(image_list, label_list, args.image_size,
            args.batch_size, args.max_nrof_epochs, args.random_crop, args.random_flip, args.nrof_preprocess_threads)
        print('Total number of classes: %d' % len(train_set))
        print('Total number of examples: %d' % len(image_list))
        
        # Node for input images
        image_batch.set_shape((None, args.image_size, args.image_size, 3))
        image_batch = tf.identity(image_batch, name='input')
        
        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        
        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, weight_decay=args.weight_decay)
        with tf.variable_scope('Logits'):
            n = int(prelogits.get_shape()[1])
            m = len(train_set)
            w = tf.get_variable('w', shape=[n,m], dtype=tf.float32, 
                initializer=tf.truncated_normal_initializer(stddev=0.1), 
                regularizer=slim.l2_regularizer(args.weight_decay),
                trainable=True)
            b = tf.get_variable('b', [m], initializer=None, trainable=True)
            logits = tf.matmul(prelogits, w) + b

        # Add DeCov regularization loss
        if args.decov_loss_factor>0.0:
            logits_decov_loss = facenet.decov_loss(logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, logits_decov_loss)
            
        # Add center loss
        update_centers = tf.no_op('update_centers')
        if args.center_loss_factor>0.0:
            prelogits_center_loss, update_centers = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.all_variables(), args.log_histograms)

        # Create a saver
        save_variables = list(set(tf.all_variables())-set([w])-set([b]))
        saver = tf.train.Saver(save_variables, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, phase_train_placeholder, learning_rate_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
                    update_centers)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    start_time = time.time()
                    _, _, accuracy, val, val_std, far = lfw.validate(sess, lfw_paths, actual_issame, args.seed, 
                        args.batch_size, image_batch, phase_train_placeholder, embeddings, nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
                    lfw_time = time.time() - start_time
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary.value.add(tag='time/lfw', simple_value=lfw_time)
                    summary_writer.add_summary(summary, step)
                    with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
                        f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))

                
    return model_dir

Example 24

Project: mycli
Source File: main.py
View license
@click.command()
@click.option('-h', '--host', envvar='MYSQL_HOST', help='Host address of the database.')
@click.option('-P', '--port', envvar='MYSQL_TCP_PORT', type=int, help='Port number to use for connection. Honors '
              '$MYSQL_TCP_PORT')
@click.option('-u', '--user', help='User name to connect to the database.')
@click.option('-S', '--socket', envvar='MYSQL_UNIX_PORT', help='The socket file to use for connection.')
@click.option('-p', '--password', 'password', envvar='MYSQL_PWD', type=str,
              help='Password to connect to the database')
@click.option('--pass', 'password', envvar='MYSQL_PWD', type=str,
              help='Password to connect to the database')
@click.option('--ssl-ca', help='CA file in PEM format',
              type=click.Path(exists=True))
@click.option('--ssl-capath', help='CA directory')
@click.option('--ssl-cert', help='X509 cert in PEM format',
              type=click.Path(exists=True))
@click.option('--ssl-key', help='X509 key in PEM format',
              type=click.Path(exists=True))
@click.option('--ssl-cipher', help='SSL cipher to use')
@click.option('--ssl-verify-server-cert', is_flag=True,
              help=('Verify server\'s "Common Name" in its cert against '
                    'hostname used when connecting. This option is disabled '
                    'by default'))
# as of 2016-02-15 revocation list is not supported by underling PyMySQL
# library (--ssl-crl and --ssl-crlpath options in vanilla mysql client)
@click.option('-v', '--version', is_flag=True, help='Version of mycli.')
@click.option('-D', '--database', 'dbname', help='Database to use.')
@click.option('-R', '--prompt', 'prompt',
              help='Prompt format (Default: "{0}")'.format(
                  MyCli.default_prompt))
@click.option('-l', '--logfile', type=click.File(mode='a', encoding='utf-8'),
              help='Log every query and its results to a file.')
@click.option('--defaults-group-suffix', type=str,
              help='Read config group with the specified suffix.')
@click.option('--defaults-file', type=click.Path(),
              help='Only read default options from the given file')
@click.option('--auto-vertical-output', is_flag=True,
              help='Automatically switch to vertical output mode if the result is wider than the terminal width.')
@click.option('-t', '--table', is_flag=True,
              help='Display batch output in table format.')
@click.option('--warn/--no-warn', default=None,
              help='Warn before running a destructive query.')
@click.option('--local-infile', type=bool,
              help='Enable/disable LOAD DATA LOCAL INFILE.')
@click.option('--login-path', type=str,
              help='Read this path from the login file.')
@click.option('-e', '--execute',  type=str,
              help='Execute query to the database.')
@click.argument('database', default='', nargs=1)
def cli(database, user, host, port, socket, password, dbname,
        version, prompt, logfile, defaults_group_suffix, defaults_file,
        login_path, auto_vertical_output, local_infile, ssl_ca, ssl_capath,
        ssl_cert, ssl_key, ssl_cipher, ssl_verify_server_cert, table, warn,
        execute):

    if version:
        print('Version:', __version__)
        sys.exit(0)

    mycli = MyCli(prompt=prompt, logfile=logfile,
                  defaults_suffix=defaults_group_suffix,
                  defaults_file=defaults_file, login_path=login_path,
                  auto_vertical_output=auto_vertical_output, warn=warn)

    # Choose which ever one has a valid value.
    database = database or dbname

    ssl = {
            'ca': ssl_ca and os.path.expanduser(ssl_ca),
            'cert': ssl_cert and os.path.expanduser(ssl_cert),
            'key': ssl_key and os.path.expanduser(ssl_key),
            'capath': ssl_capath,
            'cipher': ssl_cipher,
            'check_hostname': ssl_verify_server_cert,
            }

    # remove empty ssl options
    ssl = dict((k, v) for (k, v) in ssl.items() if v is not None)
    if database and '://' in database:
        mycli.connect_uri(database, local_infile, ssl)
    else:
        mycli.connect(database, user, password, host, port, socket,
                      local_infile=local_infile, ssl=ssl)

    mycli.logger.debug('Launch Params: \n'
            '\tdatabase: %r'
            '\tuser: %r'
            '\thost: %r'
            '\tport: %r', database, user, host, port)

    #  --execute argument
    if execute:
        try:
            mycli.run_query(execute, table_format=table)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)

    if sys.stdin.isatty():
        mycli.run_cli()
    else:
        stdin = click.get_text_stream('stdin')
        stdin_text = stdin.read()

        try:
            sys.stdin = open('/dev/tty')
        except FileNotFoundError:
            mycli.logger.warning('Unable to open TTY as stdin.')

        if (mycli.destructive_warning and
                confirm_destructive_query(stdin_text) is False):
            exit(0)
        try:
            mycli.run_query(stdin_text, table_format=table)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)

Example 25

Project: mycli
Source File: main.py
View license
@click.command()
@click.option('-h', '--host', envvar='MYSQL_HOST', help='Host address of the database.')
@click.option('-P', '--port', envvar='MYSQL_TCP_PORT', type=int, help='Port number to use for connection. Honors '
              '$MYSQL_TCP_PORT')
@click.option('-u', '--user', help='User name to connect to the database.')
@click.option('-S', '--socket', envvar='MYSQL_UNIX_PORT', help='The socket file to use for connection.')
@click.option('-p', '--password', 'password', envvar='MYSQL_PWD', type=str,
              help='Password to connect to the database')
@click.option('--pass', 'password', envvar='MYSQL_PWD', type=str,
              help='Password to connect to the database')
@click.option('--ssl-ca', help='CA file in PEM format',
              type=click.Path(exists=True))
@click.option('--ssl-capath', help='CA directory')
@click.option('--ssl-cert', help='X509 cert in PEM format',
              type=click.Path(exists=True))
@click.option('--ssl-key', help='X509 key in PEM format',
              type=click.Path(exists=True))
@click.option('--ssl-cipher', help='SSL cipher to use')
@click.option('--ssl-verify-server-cert', is_flag=True,
              help=('Verify server\'s "Common Name" in its cert against '
                    'hostname used when connecting. This option is disabled '
                    'by default'))
# as of 2016-02-15 revocation list is not supported by underling PyMySQL
# library (--ssl-crl and --ssl-crlpath options in vanilla mysql client)
@click.option('-v', '--version', is_flag=True, help='Version of mycli.')
@click.option('-D', '--database', 'dbname', help='Database to use.')
@click.option('-R', '--prompt', 'prompt',
              help='Prompt format (Default: "{0}")'.format(
                  MyCli.default_prompt))
@click.option('-l', '--logfile', type=click.File(mode='a', encoding='utf-8'),
              help='Log every query and its results to a file.')
@click.option('--defaults-group-suffix', type=str,
              help='Read config group with the specified suffix.')
@click.option('--defaults-file', type=click.Path(),
              help='Only read default options from the given file')
@click.option('--auto-vertical-output', is_flag=True,
              help='Automatically switch to vertical output mode if the result is wider than the terminal width.')
@click.option('-t', '--table', is_flag=True,
              help='Display batch output in table format.')
@click.option('--warn/--no-warn', default=None,
              help='Warn before running a destructive query.')
@click.option('--local-infile', type=bool,
              help='Enable/disable LOAD DATA LOCAL INFILE.')
@click.option('--login-path', type=str,
              help='Read this path from the login file.')
@click.option('-e', '--execute',  type=str,
              help='Execute query to the database.')
@click.argument('database', default='', nargs=1)
def cli(database, user, host, port, socket, password, dbname,
        version, prompt, logfile, defaults_group_suffix, defaults_file,
        login_path, auto_vertical_output, local_infile, ssl_ca, ssl_capath,
        ssl_cert, ssl_key, ssl_cipher, ssl_verify_server_cert, table, warn,
        execute):

    if version:
        print('Version:', __version__)
        sys.exit(0)

    mycli = MyCli(prompt=prompt, logfile=logfile,
                  defaults_suffix=defaults_group_suffix,
                  defaults_file=defaults_file, login_path=login_path,
                  auto_vertical_output=auto_vertical_output, warn=warn)

    # Choose which ever one has a valid value.
    database = database or dbname

    ssl = {
            'ca': ssl_ca and os.path.expanduser(ssl_ca),
            'cert': ssl_cert and os.path.expanduser(ssl_cert),
            'key': ssl_key and os.path.expanduser(ssl_key),
            'capath': ssl_capath,
            'cipher': ssl_cipher,
            'check_hostname': ssl_verify_server_cert,
            }

    # remove empty ssl options
    ssl = dict((k, v) for (k, v) in ssl.items() if v is not None)
    if database and '://' in database:
        mycli.connect_uri(database, local_infile, ssl)
    else:
        mycli.connect(database, user, password, host, port, socket,
                      local_infile=local_infile, ssl=ssl)

    mycli.logger.debug('Launch Params: \n'
            '\tdatabase: %r'
            '\tuser: %r'
            '\thost: %r'
            '\tport: %r', database, user, host, port)

    #  --execute argument
    if execute:
        try:
            mycli.run_query(execute, table_format=table)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)

    if sys.stdin.isatty():
        mycli.run_cli()
    else:
        stdin = click.get_text_stream('stdin')
        stdin_text = stdin.read()

        try:
            sys.stdin = open('/dev/tty')
        except FileNotFoundError:
            mycli.logger.warning('Unable to open TTY as stdin.')

        if (mycli.destructive_warning and
                confirm_destructive_query(stdin_text) is False):
            exit(0)
        try:
            mycli.run_query(stdin_text, table_format=table)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)

Example 26

Project: tvnamer
Source File: main.py
View license
def main():
    """Parses command line arguments, displays errors from tvnamer in terminal
    """
    opter = cliarg_parser.getCommandlineParser(defaults)

    opts, args = opter.parse_args()

    if opts.verbose:
        logging.basicConfig(
            level = logging.DEBUG,
            format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    else:
        logging.basicConfig()

    # If a config is specified, load it, update the defaults using the loaded
    # values, then reparse the options with the updated defaults.
    default_configuration = os.path.expanduser("~/.tvnamer.json")

    if opts.loadconfig is not None:
        # Command line overrides loading ~/.tvnamer.json
        configToLoad = opts.loadconfig
    elif os.path.isfile(default_configuration):
        # No --config arg, so load default config if it exists
        configToLoad = default_configuration
    else:
        # No arg, nothing at default config location, don't load anything
        configToLoad = None

    if configToLoad is not None:
        p("Loading config: %s" % (configToLoad))
        try:
            loadedConfig = json.load(open(os.path.expanduser(configToLoad)))
        except ValueError as e:
            p("Error loading config: %s" % e)
            opter.exit(1)
        else:
            # Config loaded, update optparser's defaults and reparse
            defaults.update(loadedConfig)
            opter = cliarg_parser.getCommandlineParser(defaults)
            opts, args = opter.parse_args()

    # Decode args using filesystem encoding (done after config loading
    # as the args are reparsed when the config is loaded)
    if PY2:
        args = [x.decode(sys.getfilesystemencoding()) for x in args]

    # Save config argument
    if opts.saveconfig is not None:
        p("Saving config: %s" % (opts.saveconfig))
        configToSave = dict(opts.__dict__)
        del configToSave['saveconfig']
        del configToSave['loadconfig']
        del configToSave['showconfig']
        json.dump(
            configToSave,
            open(os.path.expanduser(opts.saveconfig), "w+"),
            sort_keys=True,
            indent=4)

        opter.exit(0)

    # Show config argument
    if opts.showconfig:
        print(json.dumps(opts.__dict__, sort_keys=True, indent=2))
        return

    # Process values
    if opts.batch:
        opts.select_first = True
        opts.always_rename = True

    # Update global config object
    Config.update(opts.__dict__)

    if Config["move_files_only"] and not Config["move_files_enable"]:
        p("#" * 20)
        p("Parameter move_files_enable cannot be set to false while parameter move_only is set to true.")
        p("#" * 20)
        opter.exit(0)

    if Config['titlecase_filename'] and Config['lowercase_filename']:
        warnings.warn("Setting 'lowercase_filename' clobbers 'titlecase_filename' option")

    if len(args) == 0:
        opter.error("No filenames or directories supplied")

    try:
        tvnamer(paths = sorted(args))
    except NoValidFilesFoundError:
        opter.error("No valid files were supplied")
    except UserAbort as errormsg:
        opter.error(errormsg)

Example 27

Project: RDFAlchemy
Source File: __init__.py
View license
def create_engine(url='', identifier="", create=False):
    """
    :returns: returns an opened rdflib ConjunctiveGraph

    :param url: a string of the url
    :param identifier: URIRef of the default context for writing e.g.:

      - create_engine('sleepycat://~/working/rdf_db')
      - create_engine('kyotocabinet://~/working/rdf_db')
      - create_engine('zodb:///var/rdflib/Data.fs')
      - create_engine('zodb://localhost:8672')
      - create_engine(
            'sesame://www.example.com:8080/openrdf-sesame/repositories/Test')
      - create_engine('sparql://www.example.com:2020/sparql')

    for zodb:

    the key in the Zope database is hardcoded as 'rdflib', urls ending in `.fs`
    indicate FileStorage, otherwise ClientStoreage is assumed which requires
    a ZEO Server to be running

    for sqlalchemy, prepend the string "sqlachemy+" to a valid SQLAlchemy dburi
    form:

      - create_engine('sqlalchemy+sqlite://')
      - create_engine('sqlalchemy+sqlite:////absolute/path/to/foo.db')
      - create_engine('sqlalchemy+mysql://[email protected]/rdflibdb')
      - create_engine('sqlalchemy+postgresql://[email protected]/rdflibdb')

    etc.

    """
    if url == '' or url.startswith('IOMemory'):
        from rdflib import ConjunctiveGraph
        db = ConjunctiveGraph('IOMemory')

    elif url.lower().startswith('sleepycat://'):
        from rdflib import ConjunctiveGraph
        db = ConjunctiveGraph('Sleepycat', identifier=identifier)
        openstr = os.path.abspath(os.path.expanduser(url[12:]))
        db.open(openstr, create=create)

    elif url.lower().startswith('kyotocabinet://'):
        from rdflib import ConjunctiveGraph
        db = ConjunctiveGraph('Kyotocabinet', identifier=identifier)
        openstr = os.path.abspath(os.path.expanduser(url[15:]))
        db.open(openstr, create=create)

    elif url.lower().startswith('sqlalchemy+'):
        from rdflib import ConjunctiveGraph
        db = ConjunctiveGraph('SQLAlchemy', identifier=identifier)
        db.open(url[11:], create=create)

    elif url.lower().startswith('zodb://'):
        import ZODB
        # import transaction
        from rdflib import ConjunctiveGraph
        db = ConjunctiveGraph('ZODB')
        if url.endswith('.fs'):
            from ZODB.FileStorage import FileStorage
            openstr = os.path.abspath(os.path.expanduser(url[7:]))
            if not os.path.exists(openstr) and not create:
                raise("File not found: %s" % openstr)
            fs = FileStorage(openstr)
        else:
            from ZEO.ClientStorage import ClientStorage
            schema, opts = _parse_rfc1738_args(url)
            fs = ClientStorage((opts['host'], int(opts['port'])))
        # get the Zope Database
        zdb = ZODB.DB(fs)
        # open it
        conn = zdb.open()
        #get the root
        root = conn.root()
        # get the Conjunctive Graph
        if 'rdflib' not in root and create:
            root['rdflib'] = ConjunctiveGraph('ZODB')
        db = root['rdflib']

    elif url.lower().startswith('sesame://'):
        from rdfalchemy.sparql.sesame2 import SesameGraph
        db = SesameGraph("http://" + url[9:])

    elif url.lower().startswith('sparql://'):
        from rdfalchemy.sparql import SPARQLGraph
        db = SPARQLGraph("http://" + url[9:])

    else:
        raise "Could not parse  string '%s'" % url
    return db

Example 28

Project: iOSSecAudit
Source File: locaUtil.py
View license
    def resign_ipa(self, ipa_path, entitlements_path, mobileprovision_path, identity, sign_file=None):
        """
        important: type(sign_file) should be []

        1.unzip ipa file
        2.remove old sign dictionary
        3.copy mobileprovision
        4.sign and inject all sign_file if sign_file is not None
        5.sign app
        6.verify sign
        7.zip as ipa
        8.return new signed ipa_path
        """
        
        ipa_path = os.path.abspath(os.path.expanduser(ipa_path))
        entitlements_path = os.path.abspath(os.path.expanduser(entitlements_path))
        mobileprovision_path = os.path.abspath(os.path.expanduser(mobileprovision_path))
        #check ipa_path, entitlements_path, mobileprovision_path exists and is file
        if not self.local_file_exists(ipa_path):
            G.log(G.INFO, 'File \'%s\' not exists or not a file.' % ipa_path)
            return None
        elif not self.local_file_exists(entitlements_path):
            G.log(G.INFO, 'File \'%s\' not exists or not a file.' % entitlements_path)
            return None
        elif not self.local_file_exists(mobileprovision_path):
            G.log(G.INFO, 'File \'%s\' not exists or not a file.' % mobileprovision_path)
            return None
        
        #mk temp dir
        basename = os.path.basename(ipa_path)
        cmd = '/usr/bin/mktemp -d %s' % os.path.join('/tmp', '_SecAutid_IPA_%s_%s' % (basename, uuid.uuid1()))
        r = self.exec_shell(cmd)
        tmp = r[:-1]
        
        #1.unzip ipa file
        G.log(G.INFO, 'unzip ipa file')
        cmd = 'cd %s && /usr/bin/unzip -q \'%s\'' % (tmp, ipa_path)
        r = self.exec_shell(cmd)
        
        #get *.app path
        app_tmp_path = None
        for f in os.listdir(os.path.join(tmp, 'Payload')):
            if f.endswith('.app'):
                app_tmp_path = os.path.join(tmp, os.path.join('Payload', f))
        
        if app_tmp_path is None:
            G.log(G.INFO, 'unzip error')
            return None
        
        #2.remove old sign dictionary
        G.log(G.INFO, 'remove old sign dictionary')
        old_sign_dir = os.path.join(app_tmp_path, '_CodeSignature')
        if os.path.exists(old_sign_dir):
            cmd = 'rm -rf %s' % (old_sign_dir)
            r = self.exec_shell(cmd)
            
        #3.copy mobileprovision
        G.log(G.INFO, 'copy mobileprovision')
        cmd = 'cp %s %s' % (mobileprovision_path, os.path.join(app_tmp_path, 'embedded.mobileprovision'))
        r = self.exec_shell(cmd)
        
        #4.sign and inject all sign_file if sign_file is not None
        if sign_file is not None:
            if type(sign_file) is list:
                G.log(G.INFO, 'sign all inject file')
                
                #find binary path
                info_plist = os.path.join(app_tmp_path, 'Info.plist')
                import shutil
                info_plist_path = os.path.join(tmp, 'Info.plist')
                shutil.copy2(info_plist, info_plist_path)
                info_plist_util = InfoPlistUtil(info_plist_path)
                binary_name = info_plist_util.get_property("CFBundleExecutable")
                binary_path = os.path.join(app_tmp_path, binary_name)                

                for f in sign_file:
                    #cp f Payload/WeChat.app/
                    #codesign -f -s 'iPhone Developer: Long Chen (3K54S797W6)' --entitlements entitlements.plist Payload/WeChat.app/libeeee.dylib
                    path = os.path.abspath(os.path.expanduser(f))
                    if self.local_file_exists(path):
                        #copy file into *.app
                        cmd = 'cp %s %s' % (path, app_tmp_path)
                        r = self.exec_shell(cmd)
                        #sign inject file
                        cmd = '/usr/bin/codesign -f -s \'%s\' --entitlements %s %s' % (identity, entitlements_path, os.path.join(app_tmp_path, os.path.basename(path)))
                        r = self.exec_shell(cmd)
                        #inject binary
                        self.inject_dylib_to_binary(binary_path, path)
                        
        #5.sign app
        G.log(G.INFO, 'sign app')
        #cmd: codesign -f -s 'iPhone Developer: Name Name (XXXXXXXX)' --entitlements entitlements.plist Payload/WeChat.app
        cmd = '/usr/bin/codesign -f -s \'%s\' --entitlements %s %s' % (identity, entitlements_path, app_tmp_path)
        r = self.exec_shell(cmd)
        
        #6.verify sign
        G.log(G.INFO, 'verify sign')
        cmd = '/usr/bin/codesign --verify %s' % app_tmp_path
        r = self.exec_shell(cmd)
        if r != '':
            G.log(G.INFO, 'verify sign failed')
            return None
        
        #7.zip as ipa
        G.log(G.INFO, 'zip new ipa file')
        new_signed_ipa = os.path.join(os.path.dirname(ipa_path), '%s.resigned.ipa'%basename)
        cmd = 'cd %s && /usr/bin/zip -qr %s %s' % (tmp, new_signed_ipa, 'Payload')
        r = self.exec_shell(cmd)
        
        G.log(G.INFO, 'Resign success')
        G.log(G.INFO, 'resigned ipa: %s' % new_signed_ipa)
        
        return new_signed_ipa

Example 29

Project: carbon
Source File: conf.py
View license
    def postOptions(self):
        global settings

        program = self.parent.subCommand

        # Use provided pidfile (if any) as default for configuration. If it's
        # set to 'twistd.pid', that means no value was provided and the default
        # was used.
        pidfile = self.parent["pidfile"]
        if pidfile.endswith("twistd.pid"):
            pidfile = None
        self["pidfile"] = pidfile

        # Enforce a default umask of '022' if none was set.
        if not self.parent.has_key("umask") or self.parent["umask"] is None:
            self.parent["umask"] = 022

        # Read extra settings from the configuration file.
        program_settings = read_config(program, self)
        settings.update(program_settings)
        settings["program"] = program

        # Normalize and expand paths
        settings["STORAGE_DIR"] = os.path.normpath(os.path.expanduser(settings["STORAGE_DIR"]))
        settings["LOCAL_DATA_DIR"] = os.path.normpath(os.path.expanduser(settings["LOCAL_DATA_DIR"]))
        settings["WHITELISTS_DIR"] = os.path.normpath(os.path.expanduser(settings["WHITELISTS_DIR"]))
        settings["PID_DIR"] = os.path.normpath(os.path.expanduser(settings["PID_DIR"]))
        settings["LOG_DIR"] = os.path.normpath(os.path.expanduser(settings["LOG_DIR"]))
        settings["pidfile"] = os.path.normpath(os.path.expanduser(settings["pidfile"]))

        # Set process uid/gid by changing the parent config, if a user was
        # provided in the configuration file.
        if settings.USER:
            self.parent["uid"], self.parent["gid"] = (
                pwd.getpwnam(settings.USER)[2:4])

        # Set the pidfile in parent config to the value that was computed by
        # C{read_config}.
        self.parent["pidfile"] = settings["pidfile"]

        storage_schemas = join(settings["CONF_DIR"], "storage-schemas.conf")
        if not exists(storage_schemas):
            print "Error: missing required config %s" % storage_schemas
            sys.exit(1)

        if settings.CACHE_WRITE_STRATEGY not in ('sorted', 'max', 'naive'):
            log.err("%s is not a valid value for CACHE_WRITE_STRATEGY, defaulting to %s" %
                    (settings.CACHE_WRITE_STRATEGY, defaults['CACHE_WRITE_STRATEGY']))
        else:
            log.msg("Using %s write strategy for cache" % settings.CACHE_WRITE_STRATEGY)

        # Database-specific settings
        database = settings.DATABASE
        if database not in TimeSeriesDatabase.plugins:
            print "No database plugin implemented for '%s'" % database
            raise SystemExit(1)

        database_class = TimeSeriesDatabase.plugins[database]
        state.database = database_class(settings)

        settings.CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95

        if not "action" in self:
            self["action"] = "start"
        self.handleAction()

        # If we are not running in debug mode or non-daemon mode, then log to a
        # directory, otherwise log output will go to stdout. If parent options
        # are set to log to syslog, then use that instead.
        if not self["debug"]:
            if self.parent.get("syslog", None):
                log.logToSyslog(self.parent["prefix"])
            elif not self.parent["nodaemon"]:
                logdir = settings.LOG_DIR
                if not isdir(logdir):
                    os.makedirs(logdir)
                    if settings.USER:
                        # We have not yet switched to the specified user,
                        # but that user must be able to create files in this
                        # directory.
                        os.chown(logdir, self.parent["uid"], self.parent["gid"])
                log.logToDir(logdir)

        if self["whitelist"] is None:
            self["whitelist"] = join(settings["CONF_DIR"], "whitelist.conf")
        settings["whitelist"] = self["whitelist"]

        if self["blacklist"] is None:
            self["blacklist"] = join(settings["CONF_DIR"], "blacklist.conf")
        settings["blacklist"] = self["blacklist"]

Example 30

Project: conky_colors
Source File: conkyBanshee.py
View license
    def getOutputData(self, datatype, ratingchar, statustext, nounknownoutput, maxlength):
        output = u""
        
        if nounknownoutput == True:
            unknown_time = ""
            unknown_number = ""
            unknown_string = ""
        else:
            unknown_time = "0:00"
            unknown_number = "0"
            unknown_string = "Unknown"
        
        try:
                
            bus = dbus.SessionBus()
            if self.musicData == None:
                
                if self.testDBus(bus, 'org.bansheeproject.Banshee'):
                
                    self.logInfo("Calling dbus interface for music data")
    
                    try:
                        self.logInfo("Setting up dbus interface")
                        
                        # setup dbus hooks
                        remote_player = bus.get_object('org.bansheeproject.Banshee', '/org/bansheeproject/Banshee/PlayerEngine')
                        iface_player = dbus.Interface(remote_player, 'org.bansheeproject.Banshee.PlayerEngine')
                        
                        self.logInfo("Calling dbus interface for music data")
                    
                        # prepare song properties for data retrieval
                        volume = str(iface_player.GetVolume())
                        
                        status = self.getStatusText(iface_player.GetCurrentState(), statustext)

                        # grab the data into variables
                        location = iface_player.GetCurrentUri()

                        # handle a file or stream differently for filename
                        if location.find("file://") != -1:
                            filename = location[location.rfind("/")+1:]
                        elif len(location) > 0:
                            filename = location
                        else:
                            filename = ""

                        # try to get all the normal stuff...the props return an empty string if nothing is available

                        props = iface_player.GetCurrentTrack()

                        if "name" in props:
                            title = props["name"]
                        else:
                            title = None
                            
                        if "album" in props:
                            album = props["album"]
                        else:
                            album = None
                            
                        if "artist" in props:
                            artist = props["artist"]
                        else:
                            artist = None
                            
                        if "year" in props:
                            year = str(props["year"])
                        else:
                            year = None
                        
                        if "track-number" in props:
                            tracknumber = str(props["track-number"])
                        else:
                            tracknumber = None

                        if "bit-rate" in props:
                            bitrate = str(props["bit-rate"])+"k/s"
                        else:
                            bitrate = None
                            
                        if year == "0": year = None
                        if tracknumber == "0": tracknumber = None
                        if bitrate == "0": bitrate = None

                        # TODO: get album art working for internet based (if feasible)...
                        # get coverart url or file link
                        if "artwork-id" in props:
                            if os.path.exists(os.path.expanduser("~/.cache/media-art/")):
                                path_prefix = os.path.expanduser("~/.cache/media-art/")
                            else:
                                path_prefix = os.path.expanduser("~/.cache/album-art/")
                            
                            coverart = os.path.join(path_prefix,str(props["artwork-id"]) +".jpg")
                            if coverart.find("http://") != -1:
                                coverart = None
                        else:
                            coverart = None

                        # common details
                        if "genre" in props:
                            genre = props["genre"]
                        else:
                            genre = None
                        
                        length_seconds = int(iface_player.GetLength() / 1000)
                        current_seconds = int(iface_player.GetPosition() / 1000)
                        current_position = str(int(current_seconds/60)).rjust(1,"0")+":"+str(int(current_seconds%60)).rjust(2,"0")

                        if length_seconds > 0:
                            current_position_percent = str(int((float(current_seconds) / float(length_seconds))*100))
                        else:
                            length_seconds = 0
                            current_position_percent = "0"

                        if self.options.secondsoutput == True:
                            length = str(length_seconds)
                            current_position = str(current_seconds)
                        else:
                            length = str(length_seconds/60).rjust(1,"0")+":"+str(length_seconds%60).rjust(2,"0")
                            current_position = str(int(current_seconds/60)).rjust(1,"0")+":"+str(int(current_seconds%60)).rjust(2,"0")
                                
                        rating = int(iface_player.GetRating())
                        #"0" # not supported
                        

                        volume = str(iface_player.GetVolume())

                        self.musicData = MusicData(status,coverart,title,album,length,artist,tracknumber,genre,year,filename,bitrate,current_position_percent,current_position,rating,volume)
                        
                    except Exception, e:
                        self.logError(e.__str__())

            if self.musicData != None:
                
                self.logInfo("Preparing output for datatype:"+datatype)

                if datatype == "ST": #status
                    if self.musicData.status == None or len(self.musicData.status) == 0:
                        output = None
                    else:
                        output = self.musicData.status

                elif datatype == "CA": #coverart
                    if self.musicData.coverart == None or len(self.musicData.coverart) == 0:
                        output = None
                    else:
                        self.logInfo("Copying coverart from %s to %s"%(self.musicData.coverart, self.options.coverartpath))
                        shutil.copy(self.musicData.coverart, self.options.coverartpath)
                        self.musicData.coverart = self.options.coverartpath                        
                        output = self.musicData.coverart
                            
                elif datatype == "TI": #title
                    if self.musicData.title == None or len(self.musicData.title) == 0:
                        output = None
                    else:
                        output = self.musicData.title
                        
                elif datatype == "AL": #album
                    if self.musicData.album == None or len(self.musicData.album) == 0:
                        output = None
                    else:
                        output = self.musicData.album
                        
                elif datatype == "AR": #artist
                    if self.musicData.artist == None or len(self.musicData.artist) == 0:
                        output = None
                    else:
                        output = self.musicData.artist

                elif datatype == "TN": #tracknumber
                    if self.musicData.tracknumber == None or len(self.musicData.tracknumber) == 0:
                        output = None
                    else:
                        output = self.musicData.tracknumber
                        
                elif datatype == "GE": #genre
                    if self.musicData.title == genre or len(self.musicData.genre) == 0:
                        output = None
                    else:
                        output = self.musicData.genre
                        
                elif datatype == "YR": #year
                    if self.musicData.year == None or len(self.musicData.year) == 0:
                        output = None
                    else:
                        output = self.musicData.year
                                                
                elif datatype == "FN": #filename
                    if self.musicData.filename == None or len(self.musicData.filename) == 0:
                        output = None
                    else:
                        output = self.musicData.filename

                elif datatype == "BR": #bitrate
                    if self.musicData.bitrate == None or len(self.musicData.bitrate) == 0:
                        output = None
                    else:
                        output = self.musicData.bitrate
                        
                elif datatype == "LE": # length
                    if self.musicData.length == None or len(self.musicData.length) == 0:
                        output = None
                    else:
                        output = self.musicData.length
                        
                elif datatype == "PP": #current position in percent
                    if self.musicData.current_position_percent == None or len(self.musicData.current_position_percent) == 0:
                        output = None
                    else:
                        output = self.musicData.current_position_percent
                        
                elif datatype == "PT": #current position in time
                    if self.musicData.current_position == None or len(self.musicData.current_position) == 0:
                        output = None
                    else:
                        output = self.musicData.current_position
                        
                elif datatype == "VO": #volume
                    if self.musicData.volume == None or len(self.musicData.volume) == 0:
                        output = None
                    else:
                        output = self.musicData.volume
                        
                elif datatype == "RT": #rating
                    if self.musicData.rating == None or self.isNumeric(self.musicData.rating) == False:
                        output = None
                    else:
                        rating = int(self.musicData.rating)
                        if rating > 0:
                            output = u"".ljust(rating,ratingchar)
                        elif rating == 0:
                            output = u""
                        else:
                            output = None
                else:
                    self.logError("Unknown datatype provided: " + datatype)
                    return u""

            if output == None or self.musicData == None:
                if datatype in ["LE","PT"]:
                    if self.options.secondsoutput == True:
                        output = unknown_number
                    else:
                        output = unknown_time
                elif datatype in ["PP","VO","YR","TN"]:
                    output = unknown_number
                elif datatype == "CA":
                    output = ""                  
                else:
                    output = unknown_string
            
            if maxlength > 0 and len(output) > maxlength:
                output = output[:maxlength-3]+"..."
                
            return output
        
        except SystemExit:
            self.logError("System Exit!")
            return u""
        except Exception, e:
            traceback.print_exc()
            self.logError("Unknown error when calling getOutputData:" + e.__str__())
            return u""

Example 31

Project: Nagstamon
Source File: Config.py
View license
    def __init__(self):
        """
            read config file and set the appropriate attributes
            supposed to be sensible defaults
        """
        # move from minute interval to seconds
        self.update_interval_seconds = 60
        self.short_display = False
        self.long_display = True
        self.show_tooltips = True
        self.show_grid = True
        self.grid_use_custom_intensity = False
        self.grid_alternation_intensity = 10
        self.highlight_new_events = True
        self.default_sort_field = 'status'
        self.default_sort_order = 'descending'
        self.filter_all_down_hosts = False
        self.filter_all_unreachable_hosts = False
        self.filter_all_flapping_hosts = False
        self.filter_all_unknown_services = False
        self.filter_all_warning_services = False
        self.filter_all_critical_services = False
        self.filter_all_flapping_services = False
        self.filter_acknowledged_hosts_services = False
        self.filter_hosts_services_disabled_notifications = False
        self.filter_hosts_services_disabled_checks = False
        self.filter_hosts_services_maintenance = False
        self.filter_services_on_acknowledged_hosts = False
        self.filter_services_on_down_hosts = False
        self.filter_services_on_hosts_in_maintenance = False
        self.filter_services_on_unreachable_hosts = False
        self.filter_hosts_in_soft_state = False
        self.filter_services_in_soft_state = False
        self.position_x = 30
        self.position_y = 30
        self.popup_details_hover = True
        self.popup_details_clicking = False
        self.close_details_hover = True
        self.close_details_clicking = False
        self.connect_by_host = True
        self.connect_by_dns = False
        self.connect_by_ip = False
        self.use_default_browser = True
        self.use_custom_browser = False
        self.custom_browser = ''
        self.debug_mode = False
        self.debug_to_file = False
        self.debug_file = os.path.expanduser('~') + os.sep + "nagstamon.log"
        self.check_for_new_version = True
        self.notification = True
        self.notification_flashing = True
        self.notification_desktop = False
        self.notification_actions = False
        self.notification_sound = True
        self.notification_sound_repeat = False
        self.notification_default_sound = True
        self.notification_custom_sound = False
        self.notification_custom_sound_warning = ''
        self.notification_custom_sound_critical = ''
        self.notification_custom_sound_down = ''
        self.notification_action_warning = False
        self.notification_action_warning_string = ''
        self.notification_action_critical = False
        self.notification_action_critical_string = ''
        self.notification_action_down = False
        self.notification_action_down_string = ''
        self.notification_action_ok = False
        self.notification_action_ok_string = ''
        self.notification_custom_action = False
        self.notification_custom_action_string = ''
        self.notification_custom_action_separator = ''
        self.notification_custom_action_single = False
        self.notify_if_up = False
        self.notify_if_warning = True
        self.notify_if_critical = True
        self.notify_if_unknown = True
        self.notify_if_unreachable = True
        self.notify_if_down = True
        # Regular expression filters
        self.re_host_enabled = False
        self.re_host_pattern = ''
        self.re_host_reverse = False
        self.re_service_enabled = False
        self.re_service_pattern = ''
        self.re_service_reverse = False
        self.re_status_information_enabled = False
        self.re_status_information_pattern = ''
        self.re_status_information_reverse = False
        self.color_ok_text = self.default_color_ok_text = '#FFFFFF'
        self.color_ok_background = self.default_color_ok_background = '#006400'
        self.color_warning_text = self.default_color_warning_text = "#000000"
        self.color_warning_background = self.default_color_warning_background = '#FFFF00'
        self.color_critical_text = self.default_color_critical_text = '#FFFFFF'
        self.color_critical_background = self.default_color_critical_background = '#FF0000'
        self.color_unknown_text = self.default_color_unknown_text = '#000000'
        self.color_unknown_background = self.default_color_unknown_background = '#FFA500'
        self.color_unreachable_text = self.default_color_unreachable_text = '#FFFFFF'
        self.color_unreachable_background = self.default_color_unreachable_background = '#8B0000'
        self.color_down_text = self.default_color_down_text = '#FFFFFF'
        self.color_down_background = self.default_color_down_background = '#000000'
        self.color_error_text = self.default_color_error_text = '#000000'
        self.color_error_background = self.default_color_error_background = '#D3D3D3'
        self.statusbar_floating = True
        self.icon_in_systray = False
        # ##self.appindicator = False
        self.fullscreen = False
        self.fullscreen_display = 0
        self.font = ''
        self.defaults_acknowledge_sticky = False
        self.defaults_acknowledge_send_notification = False
        self.defaults_acknowledge_persistent_comment = False
        self.defaults_acknowledge_all_services = False
        self.defaults_acknowledge_comment = 'acknowledged'
        self.defaults_submit_check_result_comment = 'check result submitted'
        self.defaults_downtime_duration_hours = 2
        self.defaults_downtime_duration_minutes = 0
        self.defaults_downtime_comment = 'scheduled downtime'
        self.defaults_downtime_type_fixed = True
        self.defaults_downtime_type_flexible = False
        # internal flag to determine if keyring is available at all - defaults to False
        # use_system_keyring is checked and defined some lines later after config file was read
        self.keyring_available = False
        # setting for keyring usage
        self.use_system_keyring = False

        # Special FX
        # Centreon
        self.re_criticality_enabled = False
        self.re_criticality_pattern = ''
        self.re_criticality_reverse = False

        # the app is unconfigured by default and will stay so if it
        # would not find a config file
        self.unconfigured = True

        # adding cli args variable
        self.cli_args = {}

        # Parse the command line
        parser = argparse.ArgumentParser(description='Nagstamon for your CLI')
        # might be not necessary anymore - to be tested
        # ##parser.add_argument('-psn', action='store_true',
        # ##    help='force ~/.nagstamon as config folder (used by launchd in MacOSX)')
        # necessary because otherwise setup.py goes crazy of argparse

        # separate NagstaCLI from
        if len(sys.argv) > 2 or (len(sys.argv) > 1 and sys.argv[1] in ['--help', '-h']):
            parser.add_argument('--servername', type=str, help="name of the (Nagios)server. Look in nagstamon config")
            parser.add_argument('--hostname', type=str)
            parser.add_argument('--comment', type=str, default="")
            parser.add_argument('--service', type=str, default="", help="specify service, if needed. Mostly the whole host goes to downstate")
            parser.add_argument('--fixed', type=str, choices=['y', 'n'], default="y", help="fixed=n means wait for service/host to go down, then start the downtime")
            parser.add_argument('--start_time', type=str, help="start time for downtime")
            parser.add_argument('--hours', type=int, help="amount of hours for downtime")
            parser.add_argument('--minutes', type=int, help="amount of minutes for downtime")
            parser.add_argument('--config', type=str, help="Path for configuration folder")
            parser.add_argument('--output', type=str, choices=['y', 'n'], default="y", help="lists given parameter (for debugging)")
        else:
            parser.add_argument('config', nargs='?', help='Path for configuration folder')

        self.cli_args, unknown = parser.parse_known_args()

        # try to use a given config file - there must be one given
        # if sys.argv is larger than 1
        # ##if args.psn:
        # ##    # new configdir approach
        # ##    self.configdir = os.path.expanduser('~') + os.sep + '.nagstamon'
        # ##elif args.cfgpath:
        if len(sys.argv) < 3 and self.cli_args.config:
            # allow to give a config file
            self.configdir = self.cli_args.config

        # otherwise if there exits a configdir in current working directory it should be used
        elif os.path.exists(os.getcwd() + os.sep + 'nagstamon.config'):
            self.configdir = os.getcwd() + os.sep + 'nagstamon.config'
        else:
            # ~/.nagstamon/nagstamon.conf is the user conf file
            # os.path.expanduser('~') finds out the user HOME dir where
            # nagstamon expects its conf file to be
            self.configdir = os.path.expanduser('~') + os.sep + '.nagstamon'

        self.configfile = self.configdir + os.sep + 'nagstamon.conf'

        # make path fit for actual os, normcase for letters and normpath for path
        self.configfile = os.path.normpath(os.path.normcase(self.configfile))

        # because the name of the configdir is also stored in the configfile
        # there may be situations where the name gets overwritten by a
        # wrong name so it will be stored here temporarily
        configdir_temp = self.configdir

        # default settings dicts
        self.servers = dict()
        self.actions = dict()

        if os.path.exists(self.configfile):
            # instantiate a configparser to parse the conf file
            # SF.net bug #3304423 could be fixed with allow_no_value argument which
            # is only available since Python 2.7
            # since Python 3 '%' will be interpolated by default which crashes
            # with some URLs
            config = configparser.ConfigParser(allow_no_value=True, interpolation=None)
            config.read(self.configfile)

            # go through all sections of the conf file
            for section in config.sections():
                # go through all items of each sections (in fact there is only on
                # section which has to be there to comply to the .INI file standard

                for i in config.items(section):
                    # omit config file info as it makes no sense to store its path
                    if not i[0] in ('configfile', 'configdir'):
                        # create a key of every config item with its appropriate value
                        # check first if it is a bool value and convert string if it is
                        if i[1] in BOOLPOOL:
                            object.__setattr__(self, i[0], BOOLPOOL[i[1]])
                        # in case there are numbers intify them to avoid later conversions
                        # treat negative value specially as .isdecimal() will not detect it
                        elif i[1].isdecimal() or \
                                (i[1].startswith('-') and i[1].split('-')[1].isdecimal()):
                            object.__setattr__(self, i[0], int(i[1]))
                        else:
                            object.__setattr__(self, i[0], i[1])

            # because the switch from Nagstamon 1.0 to 1.0.1 brings the use_system_keyring property
            # and all the thousands 1.0 installations do not know it yet it will be more comfortable
            # for most of the Windows users if it is only defined as False after it was checked
            # from config file
            # if not self.__dict__.has_key("use_system_keyring"):
            if 'use_system_keyring' not in self.__dict__.keys():
                if self.unconfigured is True:
                    # an unconfigured system should start with no keyring to prevent crashes
                    self.use_system_keyring = False
                else:
                    # a configured system seemed to be able to run and thus use system keyring
                    if platform.system() in NON_LINUX:
                        self.use_system_keyring = True
                    else:
                        self.use_system_keyring = self.KeyringAvailable()

            # reset self.configdir to temporarily saved value in case it differs from
            # the one read from configfile and so it would fail to save next time
            self.configdir = configdir_temp

            # Servers configuration...
            self.servers = self._LoadServersMultipleConfig()
            # ... and actions
            self.actions = self.LoadMultipleConfig("actions", "action", "Action")

            # seems like there is a config file so the app is not unconfigured anymore
            self.unconfigured = False

        # Load actions if Nagstamon is not unconfigured, otherwise load defaults
        if self.unconfigured is True:
            self.actions = self._DefaultActions()

        # do some conversion stuff needed because of config changes and code cleanup
        self._LegacyAdjustments()

Example 32

Project: cloud-scheduler
Source File: config.py
View license
def setup(path=None):
    """Setup cloudscheduler using config file.
       setup will look for a configuration file specified on the command line,
       or in ~/.cloudscheduler.conf or /etc/cloudscheduler.conf
    """

    global condor_webservice_url
    global condor_collector_url
    global condor_retrieval_method
    global condor_q_command
    global condor_status_command
    global condor_status_master_command
    global condor_hold_command
    global condor_release_command
    global condor_off_command
    global condor_on_command
    global condor_advertise_command
    global ssh_path
    global openssl_path
    global condor_context_file
    global condor_host
    global condor_host_on_vm
    global vm_lifetime
    global cert_file
    global key_file
    global cert_file_on_vm
    global key_file_on_vm
    global ca_root_certs
    global ca_signing_policies
    global cloudscheduler_ssh_key
    global cloud_resource_config
    global image_attach_device
    global scratch_attach_device
    global info_server_port
    global admin_server_port
    global workspace_path
    global persistence_file
    global user_limit_file
    global target_cloud_alias_file
    global job_ban_timeout
    global ban_tracking
    global ban_file
    global ban_min_track
    global ban_failrate_threshold
    global polling_error_threshold
    global condor_register_time_limit
    global graceful_shutdown
    global graceful_shutdown_method
    global retire_before_lifetime
    global retire_before_lifetime_factor
    global cleanup_missing_vms
    global clean_shutdown_idle
    global getclouds
    global scheduling_metric
    global scheduling_algorithm
    global job_distribution_type
    global high_priority_job_support
    global high_priority_job_weight
    global cpu_distribution_weight
    global memory_distribution_weight
    global storage_distribution_weight
    global cleanup_interval
    global vm_poller_interval
    global job_poller_interval
    global machine_poller_interval
    global scheduler_interval
    global job_proxy_refresher_interval
    global job_proxy_renewal_threshold
    global vm_proxy_refresher_interval
    global vm_proxy_renewal_threshold
    global vm_proxy_shutdown_threshold
    global vm_connection_fail_threshold
    global vm_start_running_timeout
    global vm_idle_threshold
    global max_starting_vm
    global max_keepalive
    global proxy_cache_dir
    global myproxy_logon_command
    global override_vmtype
    global vm_reqs_from_condor_reqs
    global adjust_insufficient_resources
    global use_cloud_init
    global default_yaml
    global validate_yaml
    global retire_reallocate

    global default_VMType
    global default_VMNetwork
    global default_VMCPUArch
    global default_VMHypervisor
    global default_VMName
    global default_VMLoc
    global default_VMAMI
    global default_VMMem
    global default_VMCPUCores
    global default_VMStorage
    global default_VMInstanceType
    global default_VMInstanceTypeList
    global default_VMMaximumPrice
    global default_VMProxyNonBoot
    global default_VMUserData
    global default_TargetClouds
    global default_VMAMIConfig
    global default_VMInjectCA
    global default_VMJobPerCore

    global log_level
    global log_location
    global log_location_cloud_admin
    global admin_log_comments
    global log_stdout
    global log_syslog
    global log_max_size
    global log_format

    global use_pyopenssl

    homedir = os.path.expanduser('~')

    # Find config file
    if not path:
        if os.path.exists(homedir + "/.cloudscheduler/cloud_scheduler.conf"):
            path = homedir + "/.cloudscheduler/cloud_scheduler.conf"
        elif os.path.exists("/etc/cloudscheduler/cloud_scheduler.conf"):
            path = "/etc/cloudscheduler/cloud_scheduler.conf"
        elif os.path.exists("/usr/local/share/cloud-scheduler/cloud_scheduler.conf"):
            path = "/usr/local/share/cloud-scheduler/cloud_scheduler.conf"
        else:
            print >> sys.stderr, "Configuration file problem: There doesn't " \
                  "seem to be a configuration file. " \
                  "You can specify one with the --config-file parameter, " \
                  "or put one in ~/.cloudscheduler/cloud_scheduler.conf or "\
                  "/etc/cloudscheduler/cloud_scheduler.conf"
            sys.exit(1)

    # Read config file
    config_file = ConfigParser.ConfigParser()
    try:
        config_file.read(path)
    except IOError:
        print >> sys.stderr, "Configuration file problem: There was a " \
              "problem reading %s. Check that it is readable," \
              "and that it exists. " % path
        raise
    except ConfigParser.ParsingError:
        print >> sys.stderr, "Configuration file problem: Couldn't " \
              "parse your file. Check for spaces before or after variables."
        raise
    except:
        print "Configuration file problem: There is something wrong with " \
              "your config file."
        raise

    if config_file.has_option("global", "condor_retrieval_method"):
        condor_retrieval_method = config_file.get("global",
                                                "condor_retrieval_method")

    if config_file.has_option("global", "condor_q_command"):
        condor_q_command = config_file.get("global",
                                                "condor_q_command")

    if config_file.has_option("global", "condor_off_command"):
        condor_off_command = config_file.get("global",
                                                "condor_off_command")

    if config_file.has_option("global", "condor_on_command"):
        condor_on_command = config_file.get("global",
                                                "condor_on_command")

    if config_file.has_option("global", "ssh_path"):
        ssh_path = config_file.get("global", "ssh_path")

    if config_file.has_option("global", "openssl_path"):
        openssl_path = config_file.get("global", "openssl_path")

    if config_file.has_option("global", "condor_status_command"):
        condor_status_command = config_file.get("global",
                                                "condor_status_command")

    if config_file.has_option("global", "condor_status_master_command"):
        condor_status_master_command = config_file.get("global",
                                                "condor_status_master_command")

    if config_file.has_option("global", "condor_hold_command"):
        condor_hold_command = config_file.get("global",
                                                "condor_hold_command")

    if config_file.has_option("global", "condor_release_command"):
        condor_release_command = config_file.get("global",
                                                "condor_release_command")

    if config_file.has_option("global", "condor_advertise_command"):
        condor_advertise_command = config_file.get("global",
                                                "condor_advertise_command")
    if config_file.has_option("global", "condor_webservice_url"):
        condor_webservice_url = config_file.get("global",
                                                "condor_webservice_url")

    if config_file.has_option("global", "condor_collector_url"):
        condor_collector_url = config_file.get("global",
                                                "condor_collector_url")

    if config_file.has_option("global", "condor_host_on_vm"):
        condor_host_on_vm = config_file.get("global",
                                                "condor_host_on_vm")

    if config_file.has_option("global", "condor_context_file"):
        condor_context_file = config_file.get("global",
                                                "condor_context_file")

    if config_file.has_option("global", "vm_lifetime"):
        try:
            vm_lifetime = config_file.getint("global", "vm_lifetime")
        except ValueError:
            print "Configuration file problem: vm_lifetime must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "cert_file"):
        cert_file = config_file.get("global", "cert_file")

    if config_file.has_option("global", "key_file"):
        key_file = config_file.get("global", "key_file")

    if config_file.has_option("global", "cert_file_on_vm"):
        cert_file_on_vm = config_file.get("global", "cert_file_on_vm")

    if config_file.has_option("global", "key_file_on_vm"):
        key_file_on_vm = config_file.get("global", "key_file_on_vm")

    if config_file.has_option("global", "ca_root_certs"):
        ca_root_certs = config_file.get("global", "ca_root_certs").split(',')

    if config_file.has_option("global", "ca_signing_policies"):
        ca_signing_policies = config_file.get("global", "ca_signing_policies").split(',')

    if config_file.has_option("global", "cloudscheduler_ssh_key"):
        cloudscheduler_ssh_key = config_file.get("global", "cloudscheduler_ssh_key")

    if config_file.has_option("global", "cloud_resource_config"):
        cloud_resource_config = config_file.get("global",
                                                "cloud_resource_config")

    if config_file.has_option("global", "image_attach_device"):
        image_attach_device = config_file.get("global",
                                                "image_attach_device")

    if config_file.has_option("global", "scratch_attach_device"):
        scratch_attach_device = config_file.get("global",
                                                "scratch_attach_device")

    if config_file.has_option("global", "info_server_port"):
        try:
            info_server_port = config_file.getint("global", "info_server_port")
        except ValueError:
            print "Configuration file problem: info_server_port must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "admin_server_port"):
        try:
            info_server_port = config_file.getint("global", "admin_server_port")
        except ValueError:
            print "Configuration file problem: admin_server_port must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "workspace_path"):
        workspace_path = config_file.get("global", "workspace_path")

    if config_file.has_option("global", "persistence_file"):
        persistence_file = config_file.get("global", "persistence_file")

    if config_file.has_option("global", "user_limit_file"):
        user_limit_file = config_file.get("global", "user_limit_file")

    if config_file.has_option("global", "target_cloud_alias_file"):
        target_cloud_alias_file = config_file.get("global", "target_cloud_alias_file")

    if config_file.has_option("global", "job_ban_timeout"):
        try:
            job_ban_timeout = 60 * config_file.getint("global", "job_ban_timeout")
        except ValueError:
            print "Configuration file problem: job_ban_timeout must be an " \
                  "integer value in minutes."
            sys.exit(1)

    if config_file.has_option("global", "ban_file"):
        ban_file = config_file.get("global", "ban_file")

    if config_file.has_option("global", "polling_error_threshold"):
        try:
            polling_error_threshold = config_file.getint("global", "polling_error_threshold")
        except ValueError:
            print "Configuration file problem: polling_error_threshold must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "ban_failrate_threshold"):
        try:
            ban_failrate_threshold = config_file.getfloat("global", "ban_failrate_threshold")
            if ban_failrate_threshold == 0:
                print "Please use a float value (0, 1.0]"
                sys.exit(1)
        except ValueError:
            print "Configuration file problem: ban_failrate_threshold must be an " \
                  "float value."
            sys.exit(1)

    if config_file.has_option("global", "ban_min_track"):
        try:
            ban_min_track = config_file.getint("global", "ban_min_track")
        except ValueError:
            print "Configuration file problem: ban_min_track must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "condor_register_time_limit"):
        try:
            condor_register_time_limit = 60*config_file.getint("global", "condor_register_time_limit")
        except ValueError:
            print "Configuration file problem: condor_register_time_limit must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "ban_tracking"):
        try:
            ban_tracking = config_file.getboolean("global", "ban_tracking")
        except ValueError:
            print "Configuration file problem: ban_tracking must be an " \
                  "boolean value."
            sys.exit(1)

    if config_file.has_option("global", "graceful_shutdown"):
        try:
            graceful_shutdown = config_file.getboolean("global", "graceful_shutdown")
        except ValueError:
            print "Configuration file problem: graceful_shutdown must be an " \
                  "boolean value."
            sys.exit(1)

    if config_file.has_option("global", "graceful_shutdown_method"):
        graceful_shutdown_method = config_file.get("global", "graceful_shutdown_method")

    if config_file.has_option("global", "retire_before_lifetime"):
        try:
            retire_before_lifetime = config_file.getboolean("global", "retire_before_lifetime")
        except ValueError:
            print "Configuration file problem: retire_before_lifetime must be an " \
                  "boolean value."
            sys.exit(1)

    if config_file.has_option("global", "retire_before_lifetime_factor"):
        try:
            retire_before_lifetime_factor = config_file.getfloat("global", "retire_before_lifetime_factor")
            if retire_before_lifetime_factor < 1.0:
                print "Please use a float value (1.0, X] for the retire_before_lifetime_factor"
                sys.exit(1)
        except ValueError:
            print "Configuration file problem: retire_before_lifetime_factor must be a " \
                  "float value."
            sys.exit(1)

    if config_file.has_option("global", "cleanup_missing_vms"):
        try:
            cleanup_missing_vms = config_file.getboolean("global", "cleanup_missing_vms")
        except ValueError:
            print "Configuration file problem: cleanup_missing_vms must be an " \
                  "boolean value."
            sys.exit(1)

    if config_file.has_option("global", "clean_shutdown_idle"):
        try:
            clean_shutdown_idle = config_file.getboolean("global", "clean_shutdown_idle")
        except ValueError:
            print "Configuration file problem: clean_shutdown_idle must be an " \
                  "boolean value."
            sys.exit(1)

    if config_file.has_option("global", "getclouds"):
        try:
            getclouds = config_file.getboolean("global", "getclouds")
        except ValueError:
            print "Configuration file problem: getclouds must be an " \
                  "boolean value."
            sys.exit(1)

    if config_file.has_option("global", "scheduling_metric"):
        scheduling_metric = config_file.get("global", "scheduling_metric")

    if config_file.has_option("global", "job_distribution_type"):
        job_distribution_type = config_file.get("global", "job_distribution_type")

    if config_file.has_option("global", "memory_distribution_weight"):
        try:
            memory_distribution_weight = config_file.getfloat("global", "memory_distribution_weight")
            if ban_failrate_threshold <= 0:
                print "Please use a float value (0, x]"
                sys.exit(1)
        except ValueError:
            print "Configuration file problem: memory_distribution_weight must be an " \
                  "float value."
            sys.exit(1)

    if config_file.has_option("global", "cpu_distribution_weight"):
        try:
            cpu_distribution_weight = config_file.getfloat("global", "cpu_distribution_weight")
            if ban_failrate_threshold <= 0:
                print "Please use a float value (0, x]"
                sys.exit(1)
        except ValueError:
            print "Configuration file problem: cpu_distribution_weight must be an " \
                  "float value."
            sys.exit(1)

    if config_file.has_option("global", "storage_distribution_weight"):
        try:
            storage_distribution_weight = config_file.getfloat("global", "storage_distribution_weight")
            if ban_failrate_threshold <= 0:
                print "Please use a float value (0, x]"
                sys.exit(1)
        except ValueError:
            print "Configuration file problem: storage_distribution_weight must be an " \
                  "float value."
            sys.exit(1)

    if config_file.has_option("global", "scheduling_algorithm"):
        scheduling_algorithm = config_file.get("global", "scheduling_algorithm")

    if config_file.has_option("global", "high_priority_job_support"):
        try:
            high_priority_job_support = config_file.getboolean("global", "high_priority_job_support")
        except ValueError:
            print "Configuration file problem: high_priority_job_support must be an " \
                  "boolean value."
            sys.exit(1)

    if config_file.has_option("global", "high_priority_job_weight"):
        try:
            high_priority_job_weight = config_file.getint("global", "high_priority_job_weight")
        except ValueError:
            print "Configuration file problem: high_priority_job_weight must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "scheduler_interval"):
        try:
            scheduler_interval = config_file.getint("global", "scheduler_interval")
        except ValueError:
            print "Configuration file problem: scheduler_interval must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "vm_poller_interval"):
        try:
            vm_poller_interval = config_file.getint("global", "vm_poller_interval")
        except ValueError:
            print "Configuration file problem: vm_poller_interval must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "job_poller_interval"):
        try:
            job_poller_interval = config_file.getint("global", "job_poller_interval")
        except ValueError:
            print "Configuration file problem: job_poller_interval must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "machine_poller_interval"):
        try:
            machine_poller_interval = config_file.getint("global", "machine_poller_interval")
        except ValueError:
            print "Configuration file problem: machine_poller_interval must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "cleanup_interval"):
        try:
            cleanup_interval = config_file.getint("global", "cleanup_interval")
        except ValueError:
            print "Configuration file problem: cleanup_interval must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "job_proxy_refresher_interval"):
        try:
            job_proxy_refresher_interval = config_file.getint("global", "job_proxy_refresher_interval")
        except ValueError:
            print "Configuration file problem: job_proxy_refresher_interval must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "job_proxy_renewal_threshold"):
        try:
            job_proxy_renewal_threshold = config_file.getint("global", "job_proxy_renewal_threshold")
        except ValueError:
            print "Configuration file problem: job_proxy_renewal_threshold must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "vm_proxy_refresher_interval"):
        try:
            vm_proxy_refresher_interval = config_file.getint("global", "vm_proxy_refresher_interval")
        except ValueError:
            print "Configuration file problem: vm_proxy_refresher_interval must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "vm_proxy_renewal_threshold"):
        try:
            vm_proxy_renewal_threshold = config_file.getint("global", "vm_proxy_renewal_threshold")
        except ValueError:
            print "Configuration file problem: vm_proxy_renewal_threshold must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "vm_proxy_shutdown_threshold"):
        try:
            vm_proxy_shutdown_threshold = config_file.getint("global", "vm_proxy_shutdown_threshold")
        except ValueError:
            print "Configuration file problem: vm_proxy_shutdown_threshold must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "vm_connection_fail_threshold"):
        try:
            vm_connection_fail_threshold = config_file.getint("global", "vm_connection_fail_threshold")
        except ValueError:
            print "Configuration file problem: vm_connection_fail_threshold must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "vm_idle_threshold"):
        try:
            vm_idle_threshold = config_file.getint("global", "vm_idle_threshold")
        except ValueError:
            print "Configuration file problem: vm_idle_threshold must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "vm_start_running_timeout"):
        try:
            vm_start_running_timeout = config_file.getint("global", "vm_start_running_timeout")
        except ValueError:
            print "Configuration file problem: vm_start_running_timeout must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "max_starting_vm"):
        try:
            max_starting_vm = config_file.getint("global", "max_starting_vm")
            if max_starting_vm < -1:
                max_starting_vm = -1
        except ValueError:
            print "Configuration file problem: max_starting_vm must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "max_keepalive"):
        try:
            max_keepalive = config_file.getint("global", "max_keepalive") * 60
            if max_keepalive < 0:
                max_keepalive = 0
        except ValueError:
            print "Configuration file problem: max_keepalive must be an " \
                  "integer value(# of minutes)."
            sys.exit(1)

    if config_file.has_option("global", "max_destroy_threads"):
        try:
            max_destroy_threads = config_file.getint("global", "max_destroy_threads")
            if max_destroy_threads <= 0:
                max_destroy_threads = 1
        except ValueError:
            print "Configuration file problem: max_destroy_threads must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "proxy_cache_dir"):
        proxy_cache_dir = config_file.get("global", "proxy_cache_dir")

    if config_file.has_option("global", "myproxy_logon_command"):
        myproxy_logon_command = config_file.get("global", "myproxy_logon_command")

    if config_file.has_option("global", "override_vmtype"):
        try:
            override_vmtype = config_file.getboolean("global", "override_vmtype")
        except ValueError:
            print "Configuration file problem: override_vmtype must be a" \
                  " Boolean value."

    if config_file.has_option("global", "vm_reqs_from_condor_reqs"):
        try:
            vm_reqs_from_condor_reqs = config_file.getboolean("global", "vm_reqs_from_condor_reqs")
        except ValueError:
            print "Configuration file problem: vm_reqs_from_condor_reqs must be a" \
                  " Boolean value."

    if config_file.has_option("global", "adjust_insufficient_resources"):
        try:
            adjust_insufficient_resources = config_file.getboolean("global", "adjust_insufficient_resources")
        except ValueError:
            print "Configuration file problem: adjust_insufficient_resources must be a" \
                  " Boolean value."

    if config_file.has_option("global", "connection_fail_disable_time"):
        try:
            connection_fail_disable_time = config_file.getint("global", "connection_fail_disable_time")
        except ValueError:
            print "Configuration file problem: connection_fail_disable_time must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("global", "use_cloud_init"):
        try:
            use_cloud_init = config_file.getboolean("global", "use_cloud_init")
        except ValueError:
            print "Configuration file problem: use_cloud_init must be a" \
                  " Boolean value."

    if config_file.has_option("global", "default_yaml"):
        default_yaml = config_file.get("global", "default_yaml")

    if config_file.has_option("global", "validate_yaml"):
        try:
            validate_yaml = config_file.getboolean("global", "validate_yaml")
        except ValueError:
            print "Configuration file problem: validate_yaml must be a" \
                  " Boolean value."

    if config_file.has_option("global", "retire_reallocate"):
        try:
            retire_reallocate = config_file.getboolean("global", "retire_reallocate")
        except ValueError:
            print "Configuration file problem: retire_reallocate must be a" \
                  " Boolean value."


    # Default Logging options
    if config_file.has_option("logging", "log_level"):
        log_level = config_file.get("logging", "log_level")

    if config_file.has_option("logging", "log_location"):
        log_location = os.path.expanduser(config_file.get("logging", "log_location"))

    if config_file.has_option("logging", "log_location_cloud_admin"):
        log_location_cloud_admin = os.path.expanduser(config_file.get("logging", "log_location_cloud_admin"))

    if config_file.has_option("logging", "admin_log_comments"):
        try:
            admin_log_comments = config_file.getboolean("logging", "admin_log_comments")
        except ValueError:
            print "Configuration file problem: admin_log_comments must be a" \
                  " Boolean value."

    if config_file.has_option("logging", "log_stdout"):
        try:
            log_stdout = config_file.getboolean("logging", "log_stdout")
        except ValueError:
            print "Configuration file problem: log_stdout must be a" \
                  " Boolean value."

    if config_file.has_option("logging", "log_syslog"):
        try:
            log_syslog = config_file.getboolean("logging", "log_syslog")
        except ValueError:
            print "Configuration file problem: log_syslog must be a" \
                  " Boolean value."

    if config_file.has_option("logging", "log_max_size"):
        try:
            log_max_size = config_file.getint("logging", "log_max_size")
        except ValueError:
            print "Configuration file problem: log_max_size must be an " \
                  "integer value in bytes."
            sys.exit(1)

    if config_file.has_option("logging", "log_format"):
        log_format = config_file.get("logging", "log_format", raw=True)

    # Default Job options
    if config_file.has_option("job", "default_VMType"):
        default_VMType = config_file.get("job", "default_VMType")

    if config_file.has_option("job", "default_VMNetwork"):
        default_VMNetwork = config_file.get("job", "default_VMNetwork")

    if config_file.has_option("job", "default_VMCPUArch"):
        default_VMCPUArch = config_file.get("job", "default_VMCPUArch")
        
    if config_file.has_option("job", "default_VMHypervisor"):
        default_VMHypervisor = config_file.get("job", "default_VMHypervisor")

    if config_file.has_option("job", "default_VMName"):
        default_VMName = config_file.get("job", "default_VMName")

    if config_file.has_option("job", "default_VMLoc"):
        default_VMLoc = config_file.get("job", "default_VMLoc")

    if config_file.has_option("job", "default_VMAMI"):
        default_VMAMI = config_file.get("job", "default_VMAMI")

    if config_file.has_option("job", "default_VMMem"):
        try:
            default_VMMem = config_file.getint("job", "default_VMMem")
        except ValueError:
            print "Configuration file problem: default_VMMem must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("job", "default_VMCPUCores"):
        try:
            default_VMCPUCores = config_file.getint("job", "default_VMCPUCores")
        except ValueError:
            print "Configuration file problem: default_VMCPUCores must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("job", "default_VMStorage"):
        try:
            default_VMStorage = config_file.getint("job", "default_VMStorage")
        except ValueError:
            print "Configuration file problem: default_VMStorage must be an " \
                  "integer value."
            sys.exit(1)

    if config_file.has_option("job", "default_VMInstanceType"):
        default_VMInstanceType = config_file.get("job", "default_VMInstanceType")

    if config_file.has_option("job", "default_VMInstanceTypeList"):
        default_VMInstanceTypeList = config_file.get("job", "default_VMInstanceTypeList")

    if config_file.has_option("job", "default_VMMaximumPrice"):
        try:
            default_VMMaximumPrice = config_file.getfloat("job", "default_VMMaximumPrice")
        except ValueError:
            print "Configuration file problem: default_VMMaximumPrice must be an " \
                  "floating point value."
            sys.exit(1)

    if config_file.has_option("job", "default_VMProxyNonBoot"):
        try:
            default_VMProxyNonBoot = config_file.getboolean("global", "default_VMProxyNonBoot")
        except ValueError:
            print "Configuration file problem: default_VMProxyNonBoot must be a" \
                  " Boolean value."

    if config_file.has_option("job", "default_VMUserData"):
        default_VMUserData = config_file.get("job", "default_VMUserData").replace(' ', '').strip('"').split(',')
    
    if config_file.has_option("job", "default_TargetClouds"):
        default_TargetClouds = config_file.get("job", "default_TargetClouds")

    if config_file.has_option("job", "default_VMAMIConfig"):
        default_VMAMIConfig = config_file.get("job", "default_VMAMIConfig")

    if config_file.has_option("job", "default_VMInjectCA"):
        try:
            default_VMInjectCA = config_file.getboolean("job", "default_VMInjectCA")
        except ValueError:
            print "Configuration file problem: default_VMInjectCA must be a" \
                  " Boolean value."

    if config_file.has_option("job", "default_VMJobPerCore"):
        try:
            default_VMJobPerCore = config_file.getboolean("job", "default_VMJobPerCore")
        except ValueError:
            print "Configuration file problem: default_VMJobPerCore must be a" \
                  " Boolean value."    

    # Derived options
    if condor_host_on_vm:
        condor_host = condor_host_on_vm
    else:
        condor_host = utilities.get_hostname_from_url(condor_webservice_url)

    if config_file.has_option("global", "use_pyopenssl"):
        use_pyopenssl = config_file.getboolean("global", "use_pyopenssl")

Example 33

Project: aclhound
Source File: cli.py
View license
def do_init(args, write_config=True):
    """
    Initialise user-specific settings, ask the user for username on
    repository server, location to store aclhound policy, ask to make
    initial clone.

    Usage: aclhound [-d] init [--batch]

    Options:
        --batch     Automatically guess all settings (non-interactive mode).
    """
    if len(args) == 2:
        batch = True if args[1] == "--batch" else False

    if not batch:
        print("""Welcome to ACLHound!

A few user-specific settings are required to set up the proper
environment. The settings can always be changed by editting the
'aclhound/client.conf' file with a text editor.""")

    import getpass
    username = getpass.getuser()
    if not batch:
        username = raw_input("Username on Gerrit server [%s]: "
                             % username) or username

    location = "~/aclhound"
    if not batch:
        location = raw_input("Location for ACLHound datafiles [%s]: "
                             % location) or location
    if not os.path.exists(os.path.expanduser("~/.aclhound")):
        os.mkdir(os.path.expanduser("~/.aclhound"), 0700)
    if not os.path.exists(os.path.expanduser(location)):
        os.mkdir(os.path.expanduser(location), 0700)

    if write_config:
        cfgfile = open("%s/client.conf" % os.path.expanduser("~/.aclhound"), 'w')
        config = ConfigParser.ConfigParser()
        config.add_section('user')
        config.set('user', 'username', username)
        config.set('user', 'location', location)
        config.write(cfgfile)

    if not batch:
        clone = raw_input("Make initial clone of repository data [y]: ") or "y"
    elif batch:
        clone = 'y'
    if clone == 'y':
        cfg = Settings()
        if cfg.getboolean('general', 'local_only'):
            print("INFO: 'local_only' enabled in /etc/aclhound/aclhound.conf.")
            print("HINT: manually copy your data to %s"
                  % os.path.expanduser(location))
            print("INFO: git-review and gerrit intergration are skipped for now")
            return
        os.chdir(os.path.expanduser(location))
        run(['git', 'clone', 'ssh://%[email protected]%s:%s/%s' %
             (username,
              cfg.get('gerrit', 'hostname'),
              cfg.get('gerrit', 'port'),
              cfg.get('gerrit', 'repository')), '.'], 0)

        if not os.path.exists('.gitreview'):
            # create .gitreview file if it does not exist
            gerritcfg = ConfigParser.ConfigParser()
            gerritcfg.add_section('gerrit')
            gerritcfg.set('gerrit', 'host', cfg.get('gerrit', 'hostname'))
            gerritcfg.set('gerrit', 'project', cfg.get('gerrit', 'repository'))
            gerritcfg.write(open('.gitreview', 'w'))
            run(['git', 'add', '.gitreview'], 0)
            run(['git', 'commit', '-am', 'add gitreview'], 0)
            run(['git', 'push'], 0)

        if not os.path.exists('.gitignore'):
            gitignore = open('.gitignore', 'w')
            gitignore.write('networkconfigs/**\n')
            gitignore.close()
            run(['git', 'add', '.gitignore'], 0)
            run(['git', 'commit', '-am', 'add gitreview'], 0)
            run(['git', 'push'], 0)

        # create directories
        for directory in ['objects', 'devices', 'policy', 'networkconfig']:
            if not os.path.exists(directory):
                os.mkdir(directory)

        # setup the review hooks
        run(['git', 'review', '--setup'], 0)

        # Rebase is better to work with in Gerrit, see
        # http://stevenharman.net/git-pull-with-automatic-rebase
        run(['git', 'config', '--local', 'branch.autosetuprebase', 'always'], 0)

Example 34

Project: mammon
Source File: config.py
View license
    def process(self):
        for k, v in self.config_st.items():
            setattr(self, k, v)

        for k, v in self.config_st['server'].items():
            setattr(self, k, v)

        for l in self.listeners:
            proto = l.get('proto', 'client')

            self.ctx.logger.info('opening listener at {0}:{1} [{2}] {3}'.format(l['host'], l['port'], proto, 'SSL' if l['ssl'] else ''))

            if l['ssl']:
                context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)

                try:
                    context.set_ciphers("kEECDH+HIGH:kEDH+HIGH:HIGH:!RC4:!aNULL")
                except ssl.SSLError:
                    print("mammon: error: no ciphers could be selected. SSL is not available for any listener.")
                    break

                keyfile = os.path.expanduser(l.get('keyfile', ''))
                if not keyfile:
                    print('mammon: error: SSL listener {}:{} [{}] does not have a `keyfile`, skipping'.format(l['host'], l['port'], proto))
                    continue

                certfile = os.path.expanduser(l.get('certfile', ''))
                if not certfile:
                    print('mammon: error: SSL listener {}:{} [{}] does not have a `certfile`, skipping'.format(l['host'], l['port'], proto))
                    continue

                if ssl.HAS_ECDH:
                    context.set_ecdh_curve('secp384r1')
                    context.options |= ssl.OP_SINGLE_ECDH_USE

                if 'dhparams' in l:
                    DHparams = os.path.expanduser(l.get('dhparams', ''))

                    if DHparams:
                        context.load_dh_params(DHparams)
                        context.options |= ssl.OP_SINGLE_DH_USE

                context.load_cert_chain(certfile, keyfile=keyfile)

                # disable old protocols
                context.options |= ssl.OP_NO_SSLv2
                context.options |= ssl.OP_NO_SSLv3

                # disable compression because of CRIME attack
                context.options |= ssl.OP_NO_COMPRESSION

                # prefer server's cipher list over the client's
                context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE

                # SSL_OP_NO_TICKET
                # not sure why _ssl doesn't have a bitmask for this, but here's what it really is
                # disable TLS session tickets
                context.options |= 0x00004000

                # XXX - we want to move SSL out-of-process, similar to how charybdis does it,
                #   but for now, just a warning
                print('mammon: note: SSL support is not yet optimized and may cause slowdowns in your server')
            else:
                context = None

            lstn = self.ctx.eventloop.create_server(self.listener_protos[proto], l['host'], l['port'], ssl=context)
            self.ctx.listeners.append(lstn)

        # metadata
        if self.metadata.get('limit', None) is not None:
            try:
                self.metadata['limit'] = int(self.metadata['limit'])
            except:
                print('mammon: error: config key metadata.limit must be an integer or commented out')
                print('mammon: error: setting metadata.limit to default 20')
                self.metadata['limit'] = 20

        if self.metadata.get('restricted_keys', []) is None:
            self.metadata['restricted_keys'] = []
        self.metadata['restricted_keys'] = CaseInsensitiveList(self.metadata['restricted_keys'])

        # roles
        roles = {}
        roles_extending = {
            None: {},
        }

        # get base list of which roles extend from which
        for k, v in self.roles.items():
            extends = v.get('extends', None)
            if extends not in roles_extending:
                roles_extending[extends] = {}
            roles_extending[extends][k] = v

        # load base roles, then roles that extend those
        base_roles = roles_extending[None]
        for k, v in base_roles.items():
            roles[k] = Role(self.ctx, k, roles=roles, **v)
            roles = load_extended_roles(self.ctx, k, roles, roles_extending)

        self.ctx.roles = roles

Example 35

Project: cassandgo
Source File: cluster.py
View license
def createCluster(infosDC,cluster_name):
	"""
	Create a Multi-DC Cassandra cluster
	"""
	clusters = []
	total_nodes = 0
	print
	print "Cluster creation..."
	print "-"*80
	for infoDC in infosDC:
		total_nodes += infoDC['nodes']

		# Params by default
		node_type = infoDC['node_type'] if 'node_type' in infoDC else default_node_type
		nb_seeds_per_dc = infoDC['nb_seeds_per_dc'] if 'nb_seeds_per_dc' in infoDC else default_nb_seeds_per_dc
		opscenter = infoDC['opscenter'] if 'opscenter' in infoDC else True

		if opscenter:
			print "Cluster",cluster_name," in ",(infoDC['region']+'-'+str(infoDC['zone']))," :",infoDC['nodes']," nodes [",node_type,"] ",nb_seeds_per_dc," seed(s) opsCenter:",opscenter
		else:
			print "Cluster",cluster_name," in ",(infoDC['region']+'-'+str(infoDC['zone']))," :",infoDC['nodes']," nodes [",node_type,"] ",nb_seeds_per_dc," seed(s) "
		res = createClusterDC(infoDC['datacenter'],infoDC['region'],str(infoDC['zone']),infoDC['nodes'],node_type,nb_seeds_per_dc,cluster_name,opscenter)
		if res:
			clusters.append(res)
		else:
			logError("Error during cluster creation ! :"+str(infoDC))
			return None

	# Waiting for initalized and running nodes
	print 
	print "Waiting for nodes..."
	print "-"*80
	time.sleep(10);
	bUP = False
	while not bUP:
		for cluster in clusters:
			for instance in cluster['instances']:
				# Instance running ?
				if instance['instance'].state == 'running':
					if 'Ready' not in instance['instance'].tags.keys():
						if checkInstance(instance['instance']):
							# Node is Ready
							total_nodes -= 1
							instance['instance'].add_tag('Ready', 1)
							instance['running'] = datetime.datetime.now()
							dt = instance['running'] - instance['creation']
							duration = (dt.days * 24 * 60 * 60 + dt.seconds)
							instance['creation_duration'] = duration
							print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ'] + ' -> OK [' + str(duration) + 's] IP:' + instance['instance'].ip_address + '/' + instance['instance'].private_ip_address
				else:
					instance['instance'].update()
		time.sleep(2)
		if total_nodes == 0:
			break
	print 
	delay = 180
	print "Waiting for Cassandra warmup on all nodes (",delay,"s)..."
	print "-"*80
	time.sleep(delay)

	print 
	print "Stopping Cassandra, opsCenter and Datastax agent on all nodes..."
	print "-"*80
	for cluster in clusters:
		for instance in cluster['instances']:
			print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
			key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
			cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
			bRunning = False
			while not bRunning:
				res = server.statusCassandra(cmd)
				if 'is running' in res[1]:
					bRunning = True
				else:
					time.sleep(5)
			server.stopCassandra(cmd)
			server.stopDataStaxAgent(cmd)
			if ((cluster['opscenter'] == True) and (instance['index'] == '1')):
				server.stopOpsCenter(cmd)

	print 
	print "Cleaning cassandra data files on all nodes..."
	print "-"*80
	time.sleep(30)
	for cluster in clusters:
		for instance in cluster['instances']:
			print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
			key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
			cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
			server.cleanCassandra(cmd)

	# Multiple Region ?
	snitch = "Ec2Snitch"
	regions = set([])
        for cluster in clusters:
        	for instance in cluster['instances']:
			regions.add(instance['region']+instance['zone'])
	if (len(regions) > 1):
		# Multiple
		snitch = "Ec2MultiRegionSnitch"

	# Enumerate seeds by DC
	bOpsCenterExists = False
	for cluster in clusters:
		cluster['opsCenterServer'] = None
		cluster['InternalSeeds'] = list()
		cluster['ExternalSeeds'] = list()
		for instance in cluster['instances']:
			if ((cluster['opscenter'] == True) and (instance['index'] == '1')):
				cluster['opsCenterServer'] = {'publicIP':instance['instance'].ip_address,'privateIP':instance['instance'].private_ip_address,'instance':instance['instance'],'regionZone':instance['region']+'-'+instance['zone']}
				bOpsCenterExists = True
			if (len(cluster['InternalSeeds']) < cluster['nb_seeds_per_dc']):
				cluster['InternalSeeds'].append(instance['instance'].private_ip_address)
				cluster['ExternalSeeds'].append(instance['instance'].ip_address)

	# seeds merge
	for i in range(len(clusters)):
		clusters[i]['Seeds'] = list(clusters[i]['InternalSeeds'])
		for j in range(len(clusters)):
			if j != i:
				clusters[i]['Seeds'] += clusters[j]['ExternalSeeds'] 
	print 
	print "Updating cassandra.yaml and DataStax agent conf on all nodes..."
	print "-"*80
	for cluster in clusters:
		for instance in cluster['instances']:
			print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
			key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
			cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
			server.updateCassandraYaml(cmd,cluster['Seeds'],instance['instance'].ip_address,snitch)
			# Config DataStax agent
			if cluster['opsCenterServer']:
				# There is an OpsCenter Server on this DC, so we use the private ip address for agent stomp_interface
				server.updateDataStaxAgent(cmd,cluster['opsCenterServer']['privateIP'])
			else:
				# no OpsCenter server on this DC, we search an available server in other DCs
				# Priority on opsCenter server on the same Region and Zone
				bStomp = False
				for cluster2 in clusters:
					if cluster2 != cluster:
						if cluster2['opsCenterServer']:
							if (cluster2['opsCenterServer']['regionZone'] == (instance['region']+'-'+instance['zone'])):
								# same Region and Zone, we could use the private ip address for agent stomp_interface
								server.updateDataStaxAgent(cmd,cluster2['opsCenterServer']['privateIP'])
								bStomp = True
								break
				# If not found, we searcg for an OpsCenter server in another Region Zone
				if not bStomp:
					for cluster2 in clusters:
						if cluster2 != cluster:
							if cluster2['opsCenterServer']:
								# we use the public ip address for agent stomp_interface
								server.updateDataStaxAgent(cmd,cluster2['opsCenterServer']['publicIP'])
								bStomp = True
								break

	print 
	print "Starting Cassandra Seeds nodes..."
	print "-"*80
	for cluster in clusters:
		nb_seeds = 0
		for instance in cluster['instances']:
			if ((nb_seeds < cluster['nb_seeds_per_dc']) and (int(instance['index']) <= cluster['nb_seeds_per_dc'])):
				nb_seeds += 1
				print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
				server.startCassandra(cmd)
				time.sleep(30)

	print 
	print "Starting Cassandra on all other nodes..."
	print "-"*80
	time.sleep(10)
	for cluster in clusters:
		nb_seeds = 0
		for instance in cluster['instances']:
			if ((nb_seeds < cluster['nb_seeds_per_dc']) and (int(instance['index']) <= cluster['nb_seeds_per_dc'])):
				nb_seeds += 1
				server.startDataStaxAgent(cmd)
				time.sleep(5)
			else:
				print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
				server.startCassandra(cmd)
				server.startDataStaxAgent(cmd)
				time.sleep(5)

	if bOpsCenterExists:
		print 
		print "Starting OpsCenter..."
		print "-"*80
		time.sleep(10)
		for cluster in clusters:
			if cluster['opsCenterServer']:
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+cluster['opsCenterServer']['regionZone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(cluster['opsCenterServer']['instance'],key_path,user_name='ubuntu')
				server.startOpsCenter(cmd)
				print cluster['datacenter'] + ' : Node 1 in ' + cluster['opsCenterServer']['regionZone']

		print 
		print "Starting DataStax Agents..."
		print "-"*80
		time.sleep(10)
		for cluster in clusters:
			for instance in cluster['instances']:
				print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
				server.startDataStaxAgent(cmd)
				time.sleep(5)

	print 
	print "Cassandra cluster finalization..."
	print "-"*80
	time.sleep(30)

	print 
	print "Intra-Cluster latencies..."
	print "-"*80
	for cluster in clusters:
		latency.getLatenciesRegion(cluster)

	print 
	print "Inter-Cluster latencies..."
	print "-"*80
	for i in range(len(clusters)):
		if (i == 0):
			dcs = []
			for j in range(len(clusters)):
				dcs.append(clusters[j]['datacenter'])
			latency.printLatency(clusters,dcs)
			print
		j = i+2
		while (j < len(clusters)):
			dcs = [clusters[i]['datacenter'],clusters[j]['datacenter']]
			latency.printLatency(clusters,dcs)
			print
			j += 1

	print 
	print "How to connect to "+cluster_name+"'s nodes..."
	print "-"*80
	for cluster in clusters:
		print '['+cluster['datacenter']+'] '+cluster['region']+'-'+cluster['zone']
		print "-"*80
		for instance in cluster['instances']:
			print 'Node '+instance['index']+' : ssh -i '+keysDir+'/Key-'+cluster['region']+'-'+cluster['zone']+'.pem [email protected]'+instance['instance'].ip_address	
		print

	if bOpsCenterExists:
		# Restart OpsCenterServer
		print
		print "Connect to OpsCenter"
		print "-"*80
		for cluster in clusters:
			if cluster['opsCenterServer']:
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+cluster['opsCenterServer']['regionZone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(cluster['opsCenterServer']['instance'],key_path,user_name='ubuntu')
				server.restartOpsCenter(cmd)
				print 'http://'+cluster['opsCenterServer']['publicIP']+':8888'

	print
	print "Cluster status..."
	print "-"*80
	instance = clusters[0]['instances'][0]
	key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
	cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
	print server.clusterStatus(cmd)
	print

	return clusters

Example 36

Project: cassandgo
Source File: cluster.py
View license
def createCluster(infosDC,cluster_name):
	"""
	Create a Multi-DC Cassandra cluster
	"""
	clusters = []
	total_nodes = 0
	print
	print "Cluster creation..."
	print "-"*80
	for infoDC in infosDC:
		total_nodes += infoDC['nodes']

		# Params by default
		node_type = infoDC['node_type'] if 'node_type' in infoDC else default_node_type
		nb_seeds_per_dc = infoDC['nb_seeds_per_dc'] if 'nb_seeds_per_dc' in infoDC else default_nb_seeds_per_dc
		opscenter = infoDC['opscenter'] if 'opscenter' in infoDC else True

		if opscenter:
			print "Cluster",cluster_name," in ",(infoDC['region']+'-'+str(infoDC['zone']))," :",infoDC['nodes']," nodes [",node_type,"] ",nb_seeds_per_dc," seed(s) opsCenter:",opscenter
		else:
			print "Cluster",cluster_name," in ",(infoDC['region']+'-'+str(infoDC['zone']))," :",infoDC['nodes']," nodes [",node_type,"] ",nb_seeds_per_dc," seed(s) "
		res = createClusterDC(infoDC['datacenter'],infoDC['region'],str(infoDC['zone']),infoDC['nodes'],node_type,nb_seeds_per_dc,cluster_name,opscenter)
		if res:
			clusters.append(res)
		else:
			logError("Error during cluster creation ! :"+str(infoDC))
			return None

	# Waiting for initalized and running nodes
	print 
	print "Waiting for nodes..."
	print "-"*80
	time.sleep(10);
	bUP = False
	while not bUP:
		for cluster in clusters:
			for instance in cluster['instances']:
				# Instance running ?
				if instance['instance'].state == 'running':
					if 'Ready' not in instance['instance'].tags.keys():
						if checkInstance(instance['instance']):
							# Node is Ready
							total_nodes -= 1
							instance['instance'].add_tag('Ready', 1)
							instance['running'] = datetime.datetime.now()
							dt = instance['running'] - instance['creation']
							duration = (dt.days * 24 * 60 * 60 + dt.seconds)
							instance['creation_duration'] = duration
							print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ'] + ' -> OK [' + str(duration) + 's] IP:' + instance['instance'].ip_address + '/' + instance['instance'].private_ip_address
				else:
					instance['instance'].update()
		time.sleep(2)
		if total_nodes == 0:
			break
	print 
	delay = 180
	print "Waiting for Cassandra warmup on all nodes (",delay,"s)..."
	print "-"*80
	time.sleep(delay)

	print 
	print "Stopping Cassandra, opsCenter and Datastax agent on all nodes..."
	print "-"*80
	for cluster in clusters:
		for instance in cluster['instances']:
			print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
			key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
			cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
			bRunning = False
			while not bRunning:
				res = server.statusCassandra(cmd)
				if 'is running' in res[1]:
					bRunning = True
				else:
					time.sleep(5)
			server.stopCassandra(cmd)
			server.stopDataStaxAgent(cmd)
			if ((cluster['opscenter'] == True) and (instance['index'] == '1')):
				server.stopOpsCenter(cmd)

	print 
	print "Cleaning cassandra data files on all nodes..."
	print "-"*80
	time.sleep(30)
	for cluster in clusters:
		for instance in cluster['instances']:
			print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
			key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
			cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
			server.cleanCassandra(cmd)

	# Multiple Region ?
	snitch = "Ec2Snitch"
	regions = set([])
        for cluster in clusters:
        	for instance in cluster['instances']:
			regions.add(instance['region']+instance['zone'])
	if (len(regions) > 1):
		# Multiple
		snitch = "Ec2MultiRegionSnitch"

	# Enumerate seeds by DC
	bOpsCenterExists = False
	for cluster in clusters:
		cluster['opsCenterServer'] = None
		cluster['InternalSeeds'] = list()
		cluster['ExternalSeeds'] = list()
		for instance in cluster['instances']:
			if ((cluster['opscenter'] == True) and (instance['index'] == '1')):
				cluster['opsCenterServer'] = {'publicIP':instance['instance'].ip_address,'privateIP':instance['instance'].private_ip_address,'instance':instance['instance'],'regionZone':instance['region']+'-'+instance['zone']}
				bOpsCenterExists = True
			if (len(cluster['InternalSeeds']) < cluster['nb_seeds_per_dc']):
				cluster['InternalSeeds'].append(instance['instance'].private_ip_address)
				cluster['ExternalSeeds'].append(instance['instance'].ip_address)

	# seeds merge
	for i in range(len(clusters)):
		clusters[i]['Seeds'] = list(clusters[i]['InternalSeeds'])
		for j in range(len(clusters)):
			if j != i:
				clusters[i]['Seeds'] += clusters[j]['ExternalSeeds'] 
	print 
	print "Updating cassandra.yaml and DataStax agent conf on all nodes..."
	print "-"*80
	for cluster in clusters:
		for instance in cluster['instances']:
			print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
			key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
			cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
			server.updateCassandraYaml(cmd,cluster['Seeds'],instance['instance'].ip_address,snitch)
			# Config DataStax agent
			if cluster['opsCenterServer']:
				# There is an OpsCenter Server on this DC, so we use the private ip address for agent stomp_interface
				server.updateDataStaxAgent(cmd,cluster['opsCenterServer']['privateIP'])
			else:
				# no OpsCenter server on this DC, we search an available server in other DCs
				# Priority on opsCenter server on the same Region and Zone
				bStomp = False
				for cluster2 in clusters:
					if cluster2 != cluster:
						if cluster2['opsCenterServer']:
							if (cluster2['opsCenterServer']['regionZone'] == (instance['region']+'-'+instance['zone'])):
								# same Region and Zone, we could use the private ip address for agent stomp_interface
								server.updateDataStaxAgent(cmd,cluster2['opsCenterServer']['privateIP'])
								bStomp = True
								break
				# If not found, we searcg for an OpsCenter server in another Region Zone
				if not bStomp:
					for cluster2 in clusters:
						if cluster2 != cluster:
							if cluster2['opsCenterServer']:
								# we use the public ip address for agent stomp_interface
								server.updateDataStaxAgent(cmd,cluster2['opsCenterServer']['publicIP'])
								bStomp = True
								break

	print 
	print "Starting Cassandra Seeds nodes..."
	print "-"*80
	for cluster in clusters:
		nb_seeds = 0
		for instance in cluster['instances']:
			if ((nb_seeds < cluster['nb_seeds_per_dc']) and (int(instance['index']) <= cluster['nb_seeds_per_dc'])):
				nb_seeds += 1
				print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
				server.startCassandra(cmd)
				time.sleep(30)

	print 
	print "Starting Cassandra on all other nodes..."
	print "-"*80
	time.sleep(10)
	for cluster in clusters:
		nb_seeds = 0
		for instance in cluster['instances']:
			if ((nb_seeds < cluster['nb_seeds_per_dc']) and (int(instance['index']) <= cluster['nb_seeds_per_dc'])):
				nb_seeds += 1
				server.startDataStaxAgent(cmd)
				time.sleep(5)
			else:
				print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
				server.startCassandra(cmd)
				server.startDataStaxAgent(cmd)
				time.sleep(5)

	if bOpsCenterExists:
		print 
		print "Starting OpsCenter..."
		print "-"*80
		time.sleep(10)
		for cluster in clusters:
			if cluster['opsCenterServer']:
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+cluster['opsCenterServer']['regionZone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(cluster['opsCenterServer']['instance'],key_path,user_name='ubuntu')
				server.startOpsCenter(cmd)
				print cluster['datacenter'] + ' : Node 1 in ' + cluster['opsCenterServer']['regionZone']

		print 
		print "Starting DataStax Agents..."
		print "-"*80
		time.sleep(10)
		for cluster in clusters:
			for instance in cluster['instances']:
				print cluster['datacenter'] + ' : Node ' + instance['index'] + ' in ' + instance['AZ']
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
				server.startDataStaxAgent(cmd)
				time.sleep(5)

	print 
	print "Cassandra cluster finalization..."
	print "-"*80
	time.sleep(30)

	print 
	print "Intra-Cluster latencies..."
	print "-"*80
	for cluster in clusters:
		latency.getLatenciesRegion(cluster)

	print 
	print "Inter-Cluster latencies..."
	print "-"*80
	for i in range(len(clusters)):
		if (i == 0):
			dcs = []
			for j in range(len(clusters)):
				dcs.append(clusters[j]['datacenter'])
			latency.printLatency(clusters,dcs)
			print
		j = i+2
		while (j < len(clusters)):
			dcs = [clusters[i]['datacenter'],clusters[j]['datacenter']]
			latency.printLatency(clusters,dcs)
			print
			j += 1

	print 
	print "How to connect to "+cluster_name+"'s nodes..."
	print "-"*80
	for cluster in clusters:
		print '['+cluster['datacenter']+'] '+cluster['region']+'-'+cluster['zone']
		print "-"*80
		for instance in cluster['instances']:
			print 'Node '+instance['index']+' : ssh -i '+keysDir+'/Key-'+cluster['region']+'-'+cluster['zone']+'.pem [email protected]'+instance['instance'].ip_address	
		print

	if bOpsCenterExists:
		# Restart OpsCenterServer
		print
		print "Connect to OpsCenter"
		print "-"*80
		for cluster in clusters:
			if cluster['opsCenterServer']:
				key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+cluster['opsCenterServer']['regionZone']+'.pem')
				cmd = boto.manage.cmdshell.sshclient_from_instance(cluster['opsCenterServer']['instance'],key_path,user_name='ubuntu')
				server.restartOpsCenter(cmd)
				print 'http://'+cluster['opsCenterServer']['publicIP']+':8888'

	print
	print "Cluster status..."
	print "-"*80
	instance = clusters[0]['instances'][0]
	key_path = os.path.join(os.path.expanduser(keysDir),'Key-'+instance['region']+'-'+instance['zone']+'.pem')
	cmd = boto.manage.cmdshell.sshclient_from_instance(instance['instance'],key_path,user_name='ubuntu')
	print server.clusterStatus(cmd)
	print

	return clusters

Example 37

Project: cloudinit.d
Source File: boot.py
View license
def parse_commands(argv):
    global g_verbose

    u = """[options] <command> [<top level launch plan> | <run name>]
Boot and manage a launch plan
Run with the command 'commands' to see a list of all possible commands
"""
    version = "cloudinitd " + (cloudinitd.Version)
    parser = OptionParser(usage=u, version=version)

    all_opts = []
    opt = bootOpts("verbose", "v", "Print more output", 1, count=True)
    all_opts.append(opt)
    opt.add_opt(parser)
    opt = bootOpts("validate", "x", "Check that boot plan is valid before launching it.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("dryrun", "y", "Perform dry run on the boot plan.  The IaaS service is never contacted but all other actions are performed.  This option offers an addition level of plan validation of -x.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("quiet", "q", "Print no output", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("name", "n", "Set the run name, only relevant for boot and reload (by default the system picks)", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("database", "d", "Path to the db directory", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("logdir", "f", "Path to the base log directory.", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("loglevel", "l", "Controls the level of detail in the log file", "info", vals=["debug", "info", "warn", "error"])
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("logstack", "s", "Log stack trace information (extreme debug level)", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("noclean", "c", "Do not delete the database, only relevant for the terminate command", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("safeclean", "C", "Do not delete the database on failed terminate, only relevant for the terminate command", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("kill", "k", "This option only applies to the iceage command.  When on it will terminate all VMs started with IaaS associated with this run to date.  This should be considered an extreme measure to prevent IaaS resource leaks.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("outstream", "O", SUPPRESS_HELP, None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("remotedebug", "X", SUPPRESS_HELP, False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("output", "o", "Create an json document which describes the application and write it to the associated file.  Relevant for boot and status", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("globalvar", "g", "Add a variable to global variable space", None, append_list=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("globalvarfile", "G", "Add a file to global variable space", None, append_list=True)
    opt.add_opt(parser)
    all_opts.append(opt)


    homedir = os.path.expanduser("~/.cloudinitd")
    try:
        if not os.path.exists(homedir):
            os.mkdir(homedir)
            os.chmod(homedir, stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR)
    except Exception, ex:
        print_chars(0, "Error creating cloudinit.d directort %s : %s" % (homedir, str(ex)))

    (options, args) = parser.parse_args(args=argv)

    _deal_with_cmd_line_globals(options)

    for opt in all_opts:
        opt.validate(options)

    if not options.name:
        options.name = str(uuid.uuid4()).split("-")[0]

    if options.logdir is None:
        options.logdir = os.path.expanduser("~/.cloudinitd/")

    (options.logger, logfile) = cloudinitd.make_logger(options.loglevel, options.name, logdir=options.logdir)
    if not options.database:
        dbdir = os.path.expanduser("~/.cloudinitd")
        options.database = dbdir

    if options.logstack:
        logger = logging.getLogger("stacktracelog")
        logger.propagate = False
        logger.setLevel(logging.DEBUG)
        logdir = os.path.join(options.logdir, options.name)
        if not os.path.exists(logdir):
            try:
                os.mkdir(logdir)
            except OSError:
                pass
        stacklogfile = os.path.join(logdir, "stacktrace.log")
        handler = logging.handlers.RotatingFileHandler(stacklogfile, maxBytes=100*1024*1024, backupCount=5)
        logger.addHandler(handler)
        fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        formatter = logging.Formatter(fmt)
        handler.setFormatter(formatter)


    if options.quiet:
        options.verbose = 0
    g_verbose = options.verbose

    if options.outstream:
        global g_outfile
        g_outfile = open(options.outstream, "w")
    else:
        g_outfile = None

    if options.remotedebug:
        try:
            from pydev import pydevd
            debug_cs = os.environ['CLOUDINITD_DEBUG_CS'].split(':')
            debug_host = debug_cs[0]
            debug_port = int(debug_cs[1])
            pydevd.settrace(debug_host, port=debug_port, stdoutToServer=True, stderrToServer=True)
        except ImportError, e:
            print_chars(0, "Could not import remote debugging library: %s\n" % str(e), color="red", bold=True)
        except KeyError:
            print_chars(0, "If you want to do remote debugging please set the env CLOUDINITD_DEBUG_CS to the contact string of you expected debugger.\n", color="red", bold=True)
        except:
            print_chars(0, "Please verify the format of your contact string to be <hostname>:<port>.\n", color="red", bold=True)

    global g_options
    g_options = options
    return (args, options)

Example 38

Project: cloudinit.d
Source File: boot.py
View license
def parse_commands(argv):
    global g_verbose

    u = """[options] <command> [<top level launch plan> | <run name>]
Boot and manage a launch plan
Run with the command 'commands' to see a list of all possible commands
"""
    version = "cloudinitd " + (cloudinitd.Version)
    parser = OptionParser(usage=u, version=version)

    all_opts = []
    opt = bootOpts("verbose", "v", "Print more output", 1, count=True)
    all_opts.append(opt)
    opt.add_opt(parser)
    opt = bootOpts("validate", "x", "Check that boot plan is valid before launching it.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("dryrun", "y", "Perform dry run on the boot plan.  The IaaS service is never contacted but all other actions are performed.  This option offers an addition level of plan validation of -x.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("quiet", "q", "Print no output", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("name", "n", "Set the run name, only relevant for boot and reload (by default the system picks)", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("database", "d", "Path to the db directory", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("logdir", "f", "Path to the base log directory.", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("loglevel", "l", "Controls the level of detail in the log file", "info", vals=["debug", "info", "warn", "error"])
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("logstack", "s", "Log stack trace information (extreme debug level)", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("noclean", "c", "Do not delete the database, only relevant for the terminate command", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("safeclean", "C", "Do not delete the database on failed terminate, only relevant for the terminate command", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("kill", "k", "This option only applies to the iceage command.  When on it will terminate all VMs started with IaaS associated with this run to date.  This should be considered an extreme measure to prevent IaaS resource leaks.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("outstream", "O", SUPPRESS_HELP, None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("remotedebug", "X", SUPPRESS_HELP, False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("output", "o", "Create an json document which describes the application and write it to the associated file.  Relevant for boot and status", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("globalvar", "g", "Add a variable to global variable space", None, append_list=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("globalvarfile", "G", "Add a file to global variable space", None, append_list=True)
    opt.add_opt(parser)
    all_opts.append(opt)


    homedir = os.path.expanduser("~/.cloudinitd")
    try:
        if not os.path.exists(homedir):
            os.mkdir(homedir)
            os.chmod(homedir, stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR)
    except Exception, ex:
        print_chars(0, "Error creating cloudinit.d directort %s : %s" % (homedir, str(ex)))

    (options, args) = parser.parse_args(args=argv)

    _deal_with_cmd_line_globals(options)

    for opt in all_opts:
        opt.validate(options)

    if not options.name:
        options.name = str(uuid.uuid4()).split("-")[0]

    if options.logdir is None:
        options.logdir = os.path.expanduser("~/.cloudinitd/")

    (options.logger, logfile) = cloudinitd.make_logger(options.loglevel, options.name, logdir=options.logdir)
    if not options.database:
        dbdir = os.path.expanduser("~/.cloudinitd")
        options.database = dbdir

    if options.logstack:
        logger = logging.getLogger("stacktracelog")
        logger.propagate = False
        logger.setLevel(logging.DEBUG)
        logdir = os.path.join(options.logdir, options.name)
        if not os.path.exists(logdir):
            try:
                os.mkdir(logdir)
            except OSError:
                pass
        stacklogfile = os.path.join(logdir, "stacktrace.log")
        handler = logging.handlers.RotatingFileHandler(stacklogfile, maxBytes=100*1024*1024, backupCount=5)
        logger.addHandler(handler)
        fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        formatter = logging.Formatter(fmt)
        handler.setFormatter(formatter)


    if options.quiet:
        options.verbose = 0
    g_verbose = options.verbose

    if options.outstream:
        global g_outfile
        g_outfile = open(options.outstream, "w")
    else:
        g_outfile = None

    if options.remotedebug:
        try:
            from pydev import pydevd
            debug_cs = os.environ['CLOUDINITD_DEBUG_CS'].split(':')
            debug_host = debug_cs[0]
            debug_port = int(debug_cs[1])
            pydevd.settrace(debug_host, port=debug_port, stdoutToServer=True, stderrToServer=True)
        except ImportError, e:
            print_chars(0, "Could not import remote debugging library: %s\n" % str(e), color="red", bold=True)
        except KeyError:
            print_chars(0, "If you want to do remote debugging please set the env CLOUDINITD_DEBUG_CS to the contact string of you expected debugger.\n", color="red", bold=True)
        except:
            print_chars(0, "Please verify the format of your contact string to be <hostname>:<port>.\n", color="red", bold=True)

    global g_options
    g_options = options
    return (args, options)

Example 39

View license
    def __init__(self, parent):
        super(ProjectExecution, self).__init__()
        self._parent = parent
        grid = QGridLayout(self)

        grid.addWidget(QLabel(translations.TR_PROJECT_MAIN_FILE), 0, 0)
        self.path = QLineEdit()
        self.path.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'main.py'))
        ui_tools.LineEditButton(
            self.path, self.path.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.path.setText(self._parent.project.main_file)
        self.path.setReadOnly(True)
        self.btnBrowse = QPushButton(QIcon(
            self.style().standardPixmap(self.style().SP_FileIcon)), '')
        grid.addWidget(self.path, 0, 1)
        grid.addWidget(self.btnBrowse, 0, 2)

        # this should be changed, and ALL pythonPath names to
        # python_custom_interpreter or something like that. this is NOT the
        # PYTHONPATH
        self.txtPythonInterpreter = QLineEdit()
        self.txtPythonInterpreter.setText(self._parent.project.python_exec)
        self.txtPythonInterpreter.setCompleter(QCompleter(
            ('python', 'python2', 'python3', 'python.exe', 'pythonw.exe')))
        self.txtPythonInterpreter.setPlaceholderText("python")
        self.btnPythonPath = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(
            translations.TR_PROJECT_PYTHON_INTERPRETER), 1, 0)
        grid.addWidget(self.txtPythonInterpreter, 1, 1)
        grid.addWidget(self.btnPythonPath, 1, 2)

        grid.addWidget(QLabel(translations.TR_PROJECT_PYTHON_PATH), 2, 0)
        self.txtPythonPath = QPlainTextEdit()  # TODO : better widget
        self.txtPythonPath.setPlainText(self._parent.project.python_path)
        self.txtPythonPath.setToolTip(translations.TR_PROJECT_PATH_PER_LINE)
        grid.addWidget(self.txtPythonPath, 2, 1)

        # Additional builtins/globals for pyflakes
        grid.addWidget(QLabel(translations.TR_PROJECT_BUILTINS), 3, 0)
        self.additional_builtins = QLineEdit()
        self.additional_builtins.setText(
            ' '.join(self._parent.project.additional_builtins))
        self.additional_builtins.setToolTip(
            translations.TR_PROJECT_BUILTINS_TOOLTIP)
        grid.addWidget(self.additional_builtins, 3, 1)

        self.txtPreExec = QLineEdit()
        ui_tools.LineEditButton(
            self.txtPreExec, self.txtPreExec.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.txtPreExec.setReadOnly(True)
        self.txtPreExec.setText(self._parent.project.pre_exec_script)
        self.txtPreExec.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'script.sh'))
        self.btnPreExec = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(translations.TR_PROJECT_PRE_EXEC), 4, 0)
        grid.addWidget(self.txtPreExec, 4, 1)
        grid.addWidget(self.btnPreExec, 4, 2)
        self.txtPostExec = QLineEdit()
        ui_tools.LineEditButton(
            self.txtPostExec, self.txtPostExec.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.txtPostExec.setReadOnly(True)
        self.txtPostExec.setText(self._parent.project.post_exec_script)
        self.txtPostExec.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'script.sh'))
        self.btnPostExec = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(translations.TR_PROJECT_POST_EXEC), 5, 0)
        grid.addWidget(self.txtPostExec, 5, 1)
        grid.addWidget(self.btnPostExec, 5, 2)

        grid.addItem(QSpacerItem(5, 10, QSizePolicy.Expanding,
                     QSizePolicy.Expanding), 6, 0)

        # Properties
        grid.addWidget(QLabel(translations.TR_PROJECT_PROPERTIES), 7, 0)
        self.txtParams = QLineEdit()
        self.txtParams.setToolTip(translations.TR_PROJECT_PARAMS_TOOLTIP)
        self.txtParams.setText(self._parent.project.program_params)
        self.txtParams.setPlaceholderText('verbose, debug, force')
        grid.addWidget(QLabel(translations.TR_PROJECT_PARAMS), 8, 0)
        grid.addWidget(self.txtParams, 8, 1)
        #Widgets for virtualenv properties
        self.txtVenvPath = QLineEdit()
        ui_tools.LineEditButton(
            self.txtVenvPath, self.txtVenvPath.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.txtVenvPath.setText(self._parent.project.venv)
        self._dir_completer = QCompleter()
        self._dir_completer.setModel(QDirModel(self._dir_completer))
        self.txtVenvPath.setCompleter(self._dir_completer)
        self.txtVenvPath.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'virtualenv'))
        self.btnVenvPath = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(translations.TR_PROJECT_VIRTUALENV), 9, 0)
        grid.addWidget(self.txtVenvPath, 9, 1)
        grid.addWidget(self.btnVenvPath, 9, 2)

        self.connect(self.btnBrowse, SIGNAL("clicked()"), self.select_file)
        self.connect(self.btnPythonPath, SIGNAL("clicked()"),
                     self._load_python_path)
        self.connect(self.btnVenvPath, SIGNAL("clicked()"),
                     self._load_python_venv)
        self.connect(self.btnPreExec, SIGNAL("clicked()"),
                     self.select_pre_exec_script)
        self.connect(self.btnPostExec, SIGNAL("clicked()"),
                     self.select_post_exec_script)

Example 40

View license
    def __init__(self, parent):
        super(ProjectExecution, self).__init__()
        self._parent = parent
        grid = QGridLayout(self)

        grid.addWidget(QLabel(translations.TR_PROJECT_MAIN_FILE), 0, 0)
        self.path = QLineEdit()
        self.path.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'main.py'))
        ui_tools.LineEditButton(
            self.path, self.path.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.path.setText(self._parent.project.main_file)
        self.path.setReadOnly(True)
        self.btnBrowse = QPushButton(QIcon(
            self.style().standardPixmap(self.style().SP_FileIcon)), '')
        grid.addWidget(self.path, 0, 1)
        grid.addWidget(self.btnBrowse, 0, 2)

        # this should be changed, and ALL pythonPath names to
        # python_custom_interpreter or something like that. this is NOT the
        # PYTHONPATH
        self.txtPythonInterpreter = QLineEdit()
        self.txtPythonInterpreter.setText(self._parent.project.python_exec)
        self.txtPythonInterpreter.setCompleter(QCompleter(
            ('python', 'python2', 'python3', 'python.exe', 'pythonw.exe')))
        self.txtPythonInterpreter.setPlaceholderText("python")
        self.btnPythonPath = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(
            translations.TR_PROJECT_PYTHON_INTERPRETER), 1, 0)
        grid.addWidget(self.txtPythonInterpreter, 1, 1)
        grid.addWidget(self.btnPythonPath, 1, 2)

        grid.addWidget(QLabel(translations.TR_PROJECT_PYTHON_PATH), 2, 0)
        self.txtPythonPath = QPlainTextEdit()  # TODO : better widget
        self.txtPythonPath.setPlainText(self._parent.project.python_path)
        self.txtPythonPath.setToolTip(translations.TR_PROJECT_PATH_PER_LINE)
        grid.addWidget(self.txtPythonPath, 2, 1)

        # Additional builtins/globals for pyflakes
        grid.addWidget(QLabel(translations.TR_PROJECT_BUILTINS), 3, 0)
        self.additional_builtins = QLineEdit()
        self.additional_builtins.setText(
            ' '.join(self._parent.project.additional_builtins))
        self.additional_builtins.setToolTip(
            translations.TR_PROJECT_BUILTINS_TOOLTIP)
        grid.addWidget(self.additional_builtins, 3, 1)

        self.txtPreExec = QLineEdit()
        ui_tools.LineEditButton(
            self.txtPreExec, self.txtPreExec.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.txtPreExec.setReadOnly(True)
        self.txtPreExec.setText(self._parent.project.pre_exec_script)
        self.txtPreExec.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'script.sh'))
        self.btnPreExec = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(translations.TR_PROJECT_PRE_EXEC), 4, 0)
        grid.addWidget(self.txtPreExec, 4, 1)
        grid.addWidget(self.btnPreExec, 4, 2)
        self.txtPostExec = QLineEdit()
        ui_tools.LineEditButton(
            self.txtPostExec, self.txtPostExec.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.txtPostExec.setReadOnly(True)
        self.txtPostExec.setText(self._parent.project.post_exec_script)
        self.txtPostExec.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'script.sh'))
        self.btnPostExec = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(translations.TR_PROJECT_POST_EXEC), 5, 0)
        grid.addWidget(self.txtPostExec, 5, 1)
        grid.addWidget(self.btnPostExec, 5, 2)

        grid.addItem(QSpacerItem(5, 10, QSizePolicy.Expanding,
                     QSizePolicy.Expanding), 6, 0)

        # Properties
        grid.addWidget(QLabel(translations.TR_PROJECT_PROPERTIES), 7, 0)
        self.txtParams = QLineEdit()
        self.txtParams.setToolTip(translations.TR_PROJECT_PARAMS_TOOLTIP)
        self.txtParams.setText(self._parent.project.program_params)
        self.txtParams.setPlaceholderText('verbose, debug, force')
        grid.addWidget(QLabel(translations.TR_PROJECT_PARAMS), 8, 0)
        grid.addWidget(self.txtParams, 8, 1)
        #Widgets for virtualenv properties
        self.txtVenvPath = QLineEdit()
        ui_tools.LineEditButton(
            self.txtVenvPath, self.txtVenvPath.clear,
            self.style().standardPixmap(self.style().SP_TrashIcon))
        self.txtVenvPath.setText(self._parent.project.venv)
        self._dir_completer = QCompleter()
        self._dir_completer.setModel(QDirModel(self._dir_completer))
        self.txtVenvPath.setCompleter(self._dir_completer)
        self.txtVenvPath.setPlaceholderText(
            os.path.join(os.path.expanduser("~"), 'path', 'to', 'virtualenv'))
        self.btnVenvPath = QPushButton(QIcon(":img/open"), '')
        grid.addWidget(QLabel(translations.TR_PROJECT_VIRTUALENV), 9, 0)
        grid.addWidget(self.txtVenvPath, 9, 1)
        grid.addWidget(self.btnVenvPath, 9, 2)

        self.connect(self.btnBrowse, SIGNAL("clicked()"), self.select_file)
        self.connect(self.btnPythonPath, SIGNAL("clicked()"),
                     self._load_python_path)
        self.connect(self.btnVenvPath, SIGNAL("clicked()"),
                     self._load_python_venv)
        self.connect(self.btnPreExec, SIGNAL("clicked()"),
                     self.select_pre_exec_script)
        self.connect(self.btnPostExec, SIGNAL("clicked()"),
                     self.select_post_exec_script)

Example 41

Project: cmddocs
Source File: __init__.py
View license
    def read_config(self, sconf, conf):
        """
        All Config Options being read and defaulting
        """

        self.colors = {}
        config = ConfigParser.ConfigParser()

        if not config.read(os.path.expanduser(conf)):
            print("Error: your config %s could not be read" % conf)
            exit(1)

        try:
            self.datadir = os.path.expanduser(config.get("General", "Datadir"))
        except ConfigParser.NoOptionError:
            print("Error: Please set a Datadir in %s" % conf)
            exit(1)

        try:
            self.exclude = os.path.expanduser(config.get("General", "Excludedir"))
        except ConfigParser.NoOptionError:
            self.exclude = os.path.expanduser('.git/')

        try:
            self.default_commit_msg = config.get("General", "Default_Commit_Message")
        except ConfigParser.NoOptionError:
            self.default_commit_msg = "small changes"

        try:
            self.editor = config.get("General", "Editor")
        except ConfigParser.NoOptionError:
            if os.environ.get('EDITOR') is not None:
                self.editor = os.environ.get('EDITOR')
            else:
                print("Error: Could not find usable editor.")
                print("Please specify one in config or set EDITOR in your \
                OS Environment")
                exit(1)

        try:
            self.pager = config.get("General", "Pager")
        except ConfigParser.NoOptionError:
            if os.environ.get('PAGER') is not None:
                self.editor = os.environ.get('PAGER')
            else:
                print("Error: Could not find usable Pager.")
                print("Please specify one in config or set PAGER in your\
                OS Environment")
                exit(1)

        try:
            self.pagerflags = config.get("General", "PagerFlags")
        except ConfigParser.NoOptionError:
            self.pagerflags = False

        try:
            self.editorflags = config.get("General", "EditorFlags")
        except ConfigParser.NoOptionError:
            self.editorflags = False

        try:
            self.prompt = config.get("General", "Prompt")
        except ConfigParser.NoOptionError:
            self.prompt = "cmddocs>"

        try:
            self.promptcol = config.get("General", "Promptcolor")
        except ConfigParser.NoOptionError:
            self.promptcol = "37"

        try:
            self.intro = config.get("General", "Intro_Message")
        except ConfigParser.NoOptionError:
            self.intro = "cmddocs - press ? for help"

        try:
            self.mailfrom = config.get("General", "Mail")
        except ConfigParser.NoOptionError:
            self.mailfrom = "nobody"

        try:
            self.extension = config.get("General", "Default_Extension")
        except ConfigParser.NoOptionError:
            self.extension = "md"

        try:
            self.colors['h1'] = config.get("Colors", "Header12")
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.colors['h1'] = "37"

        try:
            self.colors['h2'] = config.get("Colors", "Header345")
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.colors['h2'] = "92"

        try:
            self.colors['code'] = config.get("Colors", "Codeblock")
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.colors['code'] = "92"

        return

Example 42

Project: cmddocs
Source File: __init__.py
View license
    def read_config(self, sconf, conf):
        """
        All Config Options being read and defaulting
        """

        self.colors = {}
        config = ConfigParser.ConfigParser()

        if not config.read(os.path.expanduser(conf)):
            print("Error: your config %s could not be read" % conf)
            exit(1)

        try:
            self.datadir = os.path.expanduser(config.get("General", "Datadir"))
        except ConfigParser.NoOptionError:
            print("Error: Please set a Datadir in %s" % conf)
            exit(1)

        try:
            self.exclude = os.path.expanduser(config.get("General", "Excludedir"))
        except ConfigParser.NoOptionError:
            self.exclude = os.path.expanduser('.git/')

        try:
            self.default_commit_msg = config.get("General", "Default_Commit_Message")
        except ConfigParser.NoOptionError:
            self.default_commit_msg = "small changes"

        try:
            self.editor = config.get("General", "Editor")
        except ConfigParser.NoOptionError:
            if os.environ.get('EDITOR') is not None:
                self.editor = os.environ.get('EDITOR')
            else:
                print("Error: Could not find usable editor.")
                print("Please specify one in config or set EDITOR in your \
                OS Environment")
                exit(1)

        try:
            self.pager = config.get("General", "Pager")
        except ConfigParser.NoOptionError:
            if os.environ.get('PAGER') is not None:
                self.editor = os.environ.get('PAGER')
            else:
                print("Error: Could not find usable Pager.")
                print("Please specify one in config or set PAGER in your\
                OS Environment")
                exit(1)

        try:
            self.pagerflags = config.get("General", "PagerFlags")
        except ConfigParser.NoOptionError:
            self.pagerflags = False

        try:
            self.editorflags = config.get("General", "EditorFlags")
        except ConfigParser.NoOptionError:
            self.editorflags = False

        try:
            self.prompt = config.get("General", "Prompt")
        except ConfigParser.NoOptionError:
            self.prompt = "cmddocs>"

        try:
            self.promptcol = config.get("General", "Promptcolor")
        except ConfigParser.NoOptionError:
            self.promptcol = "37"

        try:
            self.intro = config.get("General", "Intro_Message")
        except ConfigParser.NoOptionError:
            self.intro = "cmddocs - press ? for help"

        try:
            self.mailfrom = config.get("General", "Mail")
        except ConfigParser.NoOptionError:
            self.mailfrom = "nobody"

        try:
            self.extension = config.get("General", "Default_Extension")
        except ConfigParser.NoOptionError:
            self.extension = "md"

        try:
            self.colors['h1'] = config.get("Colors", "Header12")
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.colors['h1'] = "37"

        try:
            self.colors['h2'] = config.get("Colors", "Header345")
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.colors['h2'] = "92"

        try:
            self.colors['code'] = config.get("Colors", "Codeblock")
        except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
            self.colors['code'] = "92"

        return

Example 43

View license
def main():
    ''' Main module function '''
    module = AnsibleModule(
        argument_spec=dict(
            _uses_shell=dict(type='bool', default=False),
            command=dict(required=True),
            chdir=dict(),
            executable=dict(),
            creates=dict(),
            removes=dict(),
            warn=dict(type='bool', default=True),
            lockfile=dict(default='/tmp/delegated_serial_command.lock'),
            timeout=dict(type='int', default=30)
        )
    )

    shell = module.params['_uses_shell']
    chdir = module.params['chdir']
    executable = module.params['executable']
    command = module.params['command']
    creates = module.params['creates']
    removes = module.params['removes']
    warn = module.params['warn']
    lockfile = module.params['lockfile']
    timeout = module.params['timeout']

    if command.strip() == '':
        module.fail_json(rc=256, msg="no command given")

    iterated = 0
    lockfd = open(lockfile, 'w+')
    while iterated < timeout:
        try:
            fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            break
        # pylint: disable=invalid-name
        except IOError as e:
            if e.errno != errno.EAGAIN:
                module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror))
            else:
                iterated += 1
                time.sleep(0.1)

    if chdir:
        chdir = os.path.abspath(os.path.expanduser(chdir))
        os.chdir(chdir)

    if creates:
        # do not run the command if the line contains creates=filename
        # and the filename already exists.  This allows idempotence
        # of command executions.
        path = os.path.expanduser(creates)
        if glob.glob(path):
            module.exit_json(
                cmd=command,
                stdout="skipped, since %s exists" % path,
                changed=False,
                stderr=False,
                rc=0
            )

    if removes:
    # do not run the command if the line contains removes=filename
    # and the filename does not exist.  This allows idempotence
    # of command executions.
        path = os.path.expanduser(removes)
        if not glob.glob(path):
            module.exit_json(
                cmd=command,
                stdout="skipped, since %s does not exist" % path,
                changed=False,
                stderr=False,
                rc=0
            )

    warnings = list()
    if warn:
        warnings = check_command(command)

    if not shell:
        command = shlex.split(command)
    startd = datetime.datetime.now()

    # pylint: disable=invalid-name
    rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell)

    fcntl.flock(lockfd, fcntl.LOCK_UN)
    lockfd.close()

    endd = datetime.datetime.now()
    delta = endd - startd

    if out is None:
        out = ''
    if err is None:
        err = ''

    module.exit_json(
        cmd=command,
        stdout=out.rstrip("\r\n"),
        stderr=err.rstrip("\r\n"),
        rc=rc,
        start=str(startd),
        end=str(endd),
        delta=str(delta),
        changed=True,
        warnings=warnings,
        iterated=iterated
    )

Example 44

Project: gertty
Source File: config.py
View license
    def __init__(self, server=None, palette='default', keymap='default',
                 path=DEFAULT_CONFIG_PATH):
        self.path = os.path.expanduser(path)

        if not os.path.exists(self.path):
            self.printSample()
            exit(1)

        self.config = yaml.load(open(self.path))
        schema = ConfigSchema().getSchema(self.config)
        schema(self.config)
        server = self.getServer(server)
        self.server = server
        url = server['url']
        if not url.endswith('/'):
            url += '/'
        self.url = url
        result = urlparse.urlparse(url)
        self.hostname = result.netloc
        self.username = server['username']
        self.password = server.get('password')
        if self.password is None:
            self.password = getpass.getpass("Password for %s (%s): "
                                            % (self.url, self.username))
        else:
            # Ensure file is only readable by user as password is stored in
            # file.
            mode = os.stat(self.path).st_mode & 0o0777
            if not mode == 0o600:
                print (
                    "Error: Config file '{}' contains a password and does "
                    "not have permissions set to 0600.\n"
                    "Permissions are: {}".format(self.path, oct(mode)))
                exit(1)
        self.auth_type = server.get('auth-type', 'digest')
        self.verify_ssl = server.get('verify-ssl', True)
        if not self.verify_ssl:
            os.environ['GIT_SSL_NO_VERIFY']='true'
        self.ssl_ca_path = server.get('ssl-ca-path', None)
        if self.ssl_ca_path is not None:
            self.ssl_ca_path = os.path.expanduser(self.ssl_ca_path)
            # Gertty itself uses the Requests library
            os.environ['REQUESTS_CA_BUNDLE'] = self.ssl_ca_path
            # And this is to allow Git callouts
            os.environ['GIT_SSL_CAINFO'] = self.ssl_ca_path
        self.git_root = os.path.expanduser(server['git-root'])
        git_url = server.get('git-url', self.url + 'p/')
        if not git_url.endswith('/'):
            git_url += '/'
        self.git_url = git_url
        self.dburi = server.get('dburi',
                                'sqlite:///' + os.path.expanduser('~/.gertty.db'))
        socket_path = server.get('socket', '~/.gertty.sock')
        self.socket_path = os.path.expanduser(socket_path)
        log_file = server.get('log-file', '~/.gertty.log')
        self.log_file = os.path.expanduser(log_file)
        lock_file = server.get('lock-file', '~/.gertty.%s.lock' % server['name'])
        self.lock_file = os.path.expanduser(lock_file)

        self.palettes = {'default': gertty.palette.Palette({}),
                         'light': gertty.palette.Palette(gertty.palette.LIGHT_PALETTE),
                         }
        for p in self.config.get('palettes', []):
            if p['name'] not in self.palettes:
                self.palettes[p['name']] = gertty.palette.Palette(p)
            else:
                self.palettes[p['name']].update(p)
        self.palette = self.palettes[self.config.get('palette', palette)]

        self.keymaps = {'default': gertty.keymap.KeyMap({}),
                        'vi': gertty.keymap.KeyMap(gertty.keymap.VI_KEYMAP)}
        for p in self.config.get('keymaps', []):
            if p['name'] not in self.keymaps:
                self.keymaps[p['name']] = gertty.keymap.KeyMap(p)
            else:
                self.keymaps[p['name']].update(p)
        self.keymap = self.keymaps[self.config.get('keymap', keymap)]

        self.commentlinks = [gertty.commentlink.CommentLink(c)
                             for c in self.config.get('commentlinks', [])]
        self.commentlinks.append(
            gertty.commentlink.CommentLink(dict(
                    match="(?P<url>https?://\\S*)",
                    replacements=[
                        dict(link=dict(
                                text="{url}",
                                url="{url}"))])))

        self.project_change_list_query = self.config.get('change-list-query', 'status:open')

        self.diff_view = self.config.get('diff-view', 'side-by-side')

        self.dashboards = OrderedDict()
        for d in self.config.get('dashboards', []):
            self.dashboards[d['key']] = d
            self.dashboards[d['key']]

        self.reviewkeys = OrderedDict()
        for k in self.config.get('reviewkeys', []):
            self.reviewkeys[k['key']] = k

        self.hide_comments = []
        for h in self.config.get('hide-comments', []):
            self.hide_comments.append(re.compile(h['author']))

        self.thread_changes = self.config.get('thread-changes', True)
        self.utc = self.config.get('display-times-in-utc', False)
        self.breadcrumbs = self.config.get('breadcrumbs', True)
        self.handle_mouse = self.config.get('handle-mouse', True)

        change_list_options = self.config.get('change-list-options', {})
        self.change_list_options = {
            'sort-by': change_list_options.get('sort-by', 'number'),
            'reverse': change_list_options.get('reverse', False)}

        self.expire_age = self.config.get('expire-age', '2 months')

Example 45

Project: paramiko
Source File: client.py
View license
    def _auth(self, username, password, pkey, key_filenames, allow_agent,
              look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host):
        """
        Try, in order:

            - The key passed in, if one was passed in.
            - Any key we can find through an SSH agent (if allowed).
            - Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in ~/.ssh/
              (if allowed).
            - Plain username/password auth, if a password was given.

        (The password might be needed to unlock a private key, or for
        two-factor authentication [for which it is required].)
        """
        saved_exception = None
        two_factor = False
        allowed_types = set()
        two_factor_types = set(['keyboard-interactive','password'])

        # If GSS-API support and GSS-PI Key Exchange was performed, we attempt
        # authentication with gssapi-keyex.
        if gss_kex and self._transport.gss_kex_used:
            try:
                self._transport.auth_gssapi_keyex(username)
                return
            except Exception as e:
                saved_exception = e

        # Try GSS-API authentication (gssapi-with-mic) only if GSS-API Key
        # Exchange is not performed, because if we use GSS-API for the key
        # exchange, there is already a fully established GSS-API context, so
        # why should we do that again?
        if gss_auth:
            try:
                self._transport.auth_gssapi_with_mic(username, gss_host,
                                                     gss_deleg_creds)
                return
            except Exception as e:
                saved_exception = e

        if pkey is not None:
            try:
                self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
                allowed_types = set(self._transport.auth_publickey(username, pkey))
                two_factor = (allowed_types & two_factor_types)
                if not two_factor:
                    return
            except SSHException as e:
                saved_exception = e

        if not two_factor:
            for key_filename in key_filenames:
                for pkey_class in (RSAKey, DSSKey, ECDSAKey):
                    try:
                        key = pkey_class.from_private_key_file(key_filename, password)
                        self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
                        allowed_types = set(self._transport.auth_publickey(username, key))
                        two_factor = (allowed_types & two_factor_types)
                        if not two_factor:
                            return
                        break
                    except SSHException as e:
                        saved_exception = e

        if not two_factor and allow_agent:
            if self._agent is None:
                self._agent = Agent()

            for key in self._agent.get_keys():
                try:
                    self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
                    # for 2-factor auth a successfully auth'd key password will return an allowed 2fac auth method
                    allowed_types = set(self._transport.auth_publickey(username, key))
                    two_factor = (allowed_types & two_factor_types)
                    if not two_factor:
                        return
                    break
                except SSHException as e:
                    saved_exception = e

        if not two_factor:
            keyfiles = []
            rsa_key = os.path.expanduser('~/.ssh/id_rsa')
            dsa_key = os.path.expanduser('~/.ssh/id_dsa')
            ecdsa_key = os.path.expanduser('~/.ssh/id_ecdsa')
            if os.path.isfile(rsa_key):
                keyfiles.append((RSAKey, rsa_key))
            if os.path.isfile(dsa_key):
                keyfiles.append((DSSKey, dsa_key))
            if os.path.isfile(ecdsa_key):
                keyfiles.append((ECDSAKey, ecdsa_key))
            # look in ~/ssh/ for windows users:
            rsa_key = os.path.expanduser('~/ssh/id_rsa')
            dsa_key = os.path.expanduser('~/ssh/id_dsa')
            ecdsa_key = os.path.expanduser('~/ssh/id_ecdsa')
            if os.path.isfile(rsa_key):
                keyfiles.append((RSAKey, rsa_key))
            if os.path.isfile(dsa_key):
                keyfiles.append((DSSKey, dsa_key))
            if os.path.isfile(ecdsa_key):
                keyfiles.append((ECDSAKey, ecdsa_key))

            if not look_for_keys:
                keyfiles = []

            for pkey_class, filename in keyfiles:
                try:
                    key = pkey_class.from_private_key_file(filename, password)
                    self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
                    # for 2-factor auth a successfully auth'd key will result in ['password']
                    allowed_types = set(self._transport.auth_publickey(username, key))
                    two_factor = (allowed_types & two_factor_types)
                    if not two_factor:
                        return
                    break
                except (SSHException, IOError) as e:
                    saved_exception = e

        if password is not None:
            try:
                self._transport.auth_password(username, password)
                return
            except SSHException as e:
                saved_exception = e
        elif two_factor:
            try:
                self._transport.auth_interactive_dumb(username)
                return
            except SSHException as e:
                saved_exception = e

        # if we got an auth-failed exception earlier, re-raise it
        if saved_exception is not None:
            raise saved_exception
        raise SSHException('No authentication methods available')

Example 46

Project: alot
Source File: globals.py
View license
    @inlineCallbacks
    def apply(self, ui):
        if self.envelope is None:
            if self.rest:
                if self.rest.startswith('mailto'):
                    self.envelope = mailto_to_envelope(self.rest)
                else:
                    self.envelope = Envelope()
                    self.envelope.add('To', self.rest)
            else:
                self.envelope = Envelope()
        if self.template is not None:
            # get location of tempsdir, containing msg templates
            tempdir = settings.get('template_dir')
            tempdir = os.path.expanduser(tempdir)
            if not tempdir:
                xdgdir = os.environ.get('XDG_CONFIG_HOME',
                                        os.path.expanduser('~/.config'))
                tempdir = os.path.join(xdgdir, 'alot', 'templates')

            path = os.path.expanduser(self.template)
            if not os.path.dirname(path):  # use tempsdir
                if not os.path.isdir(tempdir):
                    ui.notify('no templates directory: %s' % tempdir,
                              priority='error')
                    return
                path = os.path.join(tempdir, path)

            if not os.path.isfile(path):
                ui.notify('could not find template: %s' % path,
                          priority='error')
                return
            try:
                self.envelope.parse_template(open(path).read())
            except Exception as e:
                ui.notify(str(e), priority='error')
                return

        # set forced headers
        for key, value in self.headers.items():
            self.envelope.add(key, value)

        # set forced headers for separate parameters
        if self.sender:
            self.envelope.add('From', self.sender)
        if self.subject:
            self.envelope.add('Subject', self.subject)
        if self.to:
            self.envelope.add('To', ','.join(self.to))
        if self.cc:
            self.envelope.add('Cc', ','.join(self.cc))
        if self.bcc:
            self.envelope.add('Bcc', ','.join(self.bcc))

        # get missing From header
        if 'From' not in self.envelope.headers:
            accounts = settings.get_accounts()
            if len(accounts) == 1:
                a = accounts[0]
                fromstring = "%s <%s>" % (a.realname, a.address)
                self.envelope.add('From', fromstring)
            else:
                cmpl = AccountCompleter()
                fromaddress = yield ui.prompt('From', completer=cmpl,
                                              tab=1)
                if fromaddress is None:
                    raise CommandCanceled()

                self.envelope.add('From', fromaddress)

        # add signature
        if not self.omit_signature:
            name, addr = email.Utils.parseaddr(self.envelope['From'])
            account = settings.get_account_by_address(addr)
            if account is not None:
                if account.signature:
                    logging.debug('has signature')
                    sig = os.path.expanduser(account.signature)
                    if os.path.isfile(sig):
                        logging.debug('is file')
                        if account.signature_as_attachment:
                            name = account.signature_filename or None
                            self.envelope.attach(sig, filename=name)
                            logging.debug('attached')
                        else:
                            sigcontent = open(sig).read()
                            enc = helper.guess_encoding(sigcontent)
                            mimetype = helper.guess_mimetype(sigcontent)
                            if mimetype.startswith('text'):
                                sigcontent = helper.string_decode(sigcontent,
                                                                  enc)
                                self.envelope.body += '\n' + sigcontent
                    else:
                        ui.notify('could not locate signature: %s' % sig,
                                  priority='error')
                        if (yield ui.choice('send without signature?', 'yes',
                                            'no')) == 'no':
                            return

        # Figure out whether we should GPG sign messages by default
        # and look up key if so
        sender = self.envelope.get('From')
        name, addr = email.Utils.parseaddr(sender)
        account = settings.get_account_by_address(addr)
        if account:
            self.envelope.sign = account.sign_by_default
            self.envelope.sign_key = account.gpg_key

        # get missing To header
        if 'To' not in self.envelope.headers:
            allbooks = not settings.get('complete_matching_abook_only')
            logging.debug(allbooks)
            if account is not None:
                abooks = settings.get_addressbooks(order=[account],
                                                   append_remaining=allbooks)
                logging.debug(abooks)
                completer = ContactsCompleter(abooks)
            else:
                completer = None
            to = yield ui.prompt('To',
                                 completer=completer)
            if to is None:
                raise CommandCanceled()

            self.envelope.add('To', to.strip(' \t\n,'))

        if settings.get('ask_subject') and \
                'Subject' not in self.envelope.headers:
            subject = yield ui.prompt('Subject')
            logging.debug('SUBJECT: "%s"' % subject)
            if subject is None:
                raise CommandCanceled()

            self.envelope.add('Subject', subject)

        if settings.get('compose_ask_tags'):
            comp = TagsCompleter(ui.dbman)
            tagsstring = yield ui.prompt('Tags', completer=comp)
            tags = filter(lambda x: x, tagsstring.split(','))
            if tags is None:
                raise CommandCanceled()

            self.envelope.tags = tags

        if self.attach:
            for gpath in self.attach:
                for a in glob.glob(gpath):
                    self.envelope.attach(a)
                    logging.debug('attaching: ' + a)

        # set encryption if needed
        if self.encrypt or account.encrypt_by_default:
            yield self._set_encrypt(ui, self.envelope)

        cmd = commands.envelope.EditCommand(envelope=self.envelope,
                                            spawn=self.force_spawn,
                                            refocus=False)
        ui.apply_command(cmd)

Example 47

Project: pinguino-ide
Source File: pinguino_tools.py
View license
    @Debugger.debug_method
    def link(self, filename):
        """Link.

        NB :  "--opt-code-size"   deprecated
              "--use-non-free"    implicit -I and -L options for non-free headers and libs
                    "-I" + os.path.join(self.P8_DIR, 'sdcc', 'include', 'pic16'),\
                    "-I" + os.path.join(self.P8_DIR, 'sdcc', 'non-free', 'include', 'pic16'),\
                    "-I" + os.path.join(self.P8_DIR, 'pinguino', 'core'),\
                    "-I" + os.path.join(self.P8_DIR, 'pinguino', 'libraries'),\
                    "-L" + os.path.join(self.P8_DIR, 'sdcc', 'lib', 'pic16'),\
                    "-L" + os.path.join(self.P8_DIR, 'sdcc', 'non-free', 'lib', 'pic16'),\
        """

        error = []
        board = self.get_board()
        fichier = open(os.path.join(os.path.expanduser(self.SOURCE_DIR), "stdout"), "w+")

        user_imports = self.get_user_imports_p8()
        #for lib_dir in self.USER_P8_LIBS:
            #user_imports.append("-I" + lib_dir)

        if board.arch == 8:

            if board.bldr == 'boot2':
                sortie = Popen([self.COMPILER_8BIT,
                    "--verbose",\
                    "-mpic16",\
                    "--denable-peeps",\
                    "--obanksel=9",\
                    "--optimize-cmp",\
                    "--optimize-df",\
                    "--no-crt",\
                    "-Wl-s" + os.path.join(self.P8_DIR, 'lkr', board.bldr + '.' + board.proc + '.lkr') + ",-m",\
                    "-p" + board.proc,\
                    "-D" + board.bldr,\
                    "-D" + board.board,\
                    "-DBOARD=\"" + board.board + "\"",\
                    "-DPROC=\"" + board.proc + "\"",\
                    "-DBOOT_VER=2",\
                    "--use-non-free",\
                    "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'core'),\
                    "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'libraries'),\
                    'libio' + board.proc + '.lib',\
                    'libdev' + board.proc + '.lib',\
                    'libc18f.lib',\
                    'libm18f.lib',\
                    'libsdcc.lib',\
                    "-o" + os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.hex'),\
                    os.path.join(self.P8_DIR, 'obj', 'application_iface.o'),\
                    os.path.join(self.P8_DIR, 'obj', 'boot_iface.o'),\
                    os.path.join(self.P8_DIR, 'obj', 'usb_descriptors.o'),\
                    os.path.join(self.P8_DIR, 'obj', 'crt0ipinguino.o'),\
                    os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.o')] + user_imports,\
                    stdout=fichier, stderr=STDOUT)

            elif board.bldr == 'boot4':
                sortie = Popen([self.COMPILER_8BIT,
                    "--verbose", "-V",\
                    "-mpic16",\
                    # optimization
                    "--denable-peeps",\
                    "--obanksel=9",\
                    "--optimize-cmp",\
                    "--optimize-df",\
                    # don't want to link default crt0i.o but crt0i.c
                    "--no-crt",\
                    # move all int. vectors after bootloader code
                    "--ivt-loc=" + str(board.memstart),\
                    # link memory map
                    "-Wl-s" + os.path.join(self.P8_DIR, 'lkr', board.bldr + '.' + board.proc + '.lkr') + ",-m",\
                    "-p" + board.proc,\
                    "-D" + board.bldr,\
                    "-D" + board.board,\
                    "-DBOARD=\"" + board.board + "\"",\
                    "-DPROC=\"" + board.proc + "\"",\
                    "-DBOOT_VER=4",\
                    "--use-non-free",\
                    "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'core'),\
                    "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'libraries'),\
                    os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.o'),\
                    'libio' + board.proc + '.lib',\
                    'libdev' + board.proc + '.lib',\
                    'libc18f.lib',\
                    'libm18f.lib',\
                    # link the default run-time module (crt0i.o)
                    # except when "-no-crt" option is used
                    'libsdcc.lib',\
                    "-o" + os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.hex'),\
                    ] + user_imports,\
                    stdout=fichier, stderr=STDOUT)

            elif board.bldr == 'noboot':
                sortie = Popen([self.COMPILER_8BIT,
                    "--verbose",\
                    "-mpic16",\
                    "--denable-peeps",\
                    "--obanksel=9",\
                    "--optimize-cmp",\
                    "--optimize-df",\
                    #"--no-crt",\ we use default run-time module inside libsdcc.lib
                    "-Wl-s" + os.path.join(self.P8_DIR, 'lkr', board.proc + '_g.lkr') + ",-m",\
                    "-p" + board.proc,\
                    "-D" + board.bldr,\
                    "-D" + board.board,\
                    "-DBOARD=\"" + board.board + "\"",\
                    "-DPROC=\"" + board.proc + "\"",\
                    "-DBOOT_VER=0",\
                    "--use-non-free",\
                    "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'core'),\
                    "-I" + os.path.join(self.P8_DIR, 'include', 'pinguino', 'libraries'),\
                    'libio' + board.proc + '.lib',\
                    'libdev' + board.proc + '.lib',\
                    'libc18f.lib',\
                    'libm18f.lib',\
                    # link the default run-time module
                    'libsdcc.lib',\
                    "-o" + os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.hex'),\
                    os.path.join(os.path.expanduser(self.SOURCE_DIR), 'main.o')] + user_imports,\
                    stdout=fichier, stderr=STDOUT)

        else:#if board.arch == 32:

            makefile = os.path.join(os.path.expanduser(self.SOURCE_DIR), 'Makefile32.'+os.getenv("PINGUINO_OS_NAME"))

            user_imports32 = self.get_user_imports_p32()

            if user_imports32: _IDE_USERLIBS_ = ["_IDE_USERLIBS_=" + user_imports32]
            else: _IDE_USERLIBS_ = []

            #self.report(makefile)

            sortie = Popen([self.MAKE,
                            "--makefile=" + makefile,
                            "_IDE_PDEDIR_=" + os.path.dirname(filename),
                            "_IDE_PROC_=" + board.proc,
                            "_IDE_BOARD_=" + board.board,
                            "_IDE_BINDIR_=" + self.P32_BIN,  #default /usr/bin
                            "_IDE_P32DIR_=" + self.P32_DIR,  #default /usr/share/pinguino-11.0/p32
                            "_IDE_SRCDIR_=" + self.SOURCE_DIR,
                            "_IDE_USERHOMEDIR_=" + os.getenv("PINGUINO_USER_PATH"),  #default ~/.pinguino
                            "_IDE_OSARCH_=" + os.getenv("PINGUINO_OS_ARCH"),
                            "_IDE_HEAP_SIZE_=" + self.HEAPSIZE,
                            "_IDE_MIPS16_ENABLE_=" + self.MIPS16,
                            "_IDE_OPTIMIZATION_=" + self.OPTIMIZATION,

                         ] + _IDE_USERLIBS_,

                         stdout=fichier, stderr=STDOUT)

        sortie.communicate()

        fichier.seek(0)
        # Check if child process has terminated
        if sortie.poll() != 0:
            for ligne in fichier:
                if ligne.find("error") != -1:
                    error.append(ligne)
        fichier.close()

        if sys.platform == "win32":

            if board.board in ["PIC32_PINGUINO_220", "Pinguino32MX220", "Pinguino32MX250", "Pinguino32MX270"]:
                badrecord = ":040000059D0040001A\n"
            else:
                badrecord = ":040000059D006000FA\n"

            if os.path.exists(os.path.join(os.path.expanduser(self.SOURCE_DIR), "main32tmp.hex")):
                fichiersource = open(os.path.join(os.path.expanduser(self.SOURCE_DIR), "main32tmp.hex"), "r")
                fichierdest = open(os.path.join(os.path.expanduser(self.SOURCE_DIR), "main32.hex"), "w+")
                for line in fichiersource:
                    if line != badrecord:
                        fichierdest.writelines(line)
                fichiersource.close()
                fichierdest.close()
                os.remove(os.path.join(os.path.expanduser(self.SOURCE_DIR), "main32tmp.hex"))

        return sortie.poll(), error

Example 48

Project: powerline
Source File: __init__.py
View license
def check(paths=None, debug=False, echoerr=echoerr, require_ext=None):
	'''Check configuration sanity

	:param list paths:
		Paths from which configuration should be loaded.
	:param bool debug:
		Determines whether some information useful for debugging linter should 
		be output.
	:param function echoerr:
		Function that will be used to echo the error(s). Should accept four 
		optional keyword parameters: ``problem`` and ``problem_mark``, and 
		``context`` and ``context_mark``.
	:param str require_ext:
		Require configuration for some extension to be present.

	:return:
		``False`` if user configuration seems to be completely sane and ``True`` 
		if some problems were found.
	'''
	hadproblem = False

	register_common_names()
	search_paths = paths or get_config_paths()
	find_config_files = generate_config_finder(lambda: search_paths)

	logger = logging.getLogger('powerline-lint')
	logger.setLevel(logging.DEBUG if debug else logging.ERROR)
	logger.addHandler(logging.StreamHandler())

	ee = EchoErr(echoerr, logger)

	if require_ext:
		used_main_spec = main_spec.copy()
		try:
			used_main_spec['ext'][require_ext].required()
		except KeyError:
			used_main_spec['ext'][require_ext] = ext_spec()
	else:
		used_main_spec = main_spec

	lhadproblem = [False]
	load_json_config = generate_json_config_loader(lhadproblem)

	config_loader = ConfigLoader(run_once=True, load=load_json_config)

	lists = {
		'colorschemes': set(),
		'themes': set(),
		'exts': set(),
	}
	found_dir = {
		'themes': False,
		'colorschemes': False,
	}
	config_paths = defaultdict(lambda: defaultdict(dict))
	loaded_configs = defaultdict(lambda: defaultdict(dict))
	for d in chain(
		find_all_ext_config_files(search_paths, 'colorschemes'),
		find_all_ext_config_files(search_paths, 'themes'),
	):
		if d['error']:
			hadproblem = True
			ee(problem=d['error'])
			continue
		if d['hadproblem']:
			hadproblem = True
		if d['ext']:
			found_dir[d['type']] = True
			lists['exts'].add(d['ext'])
			if d['name'] == '__main__':
				pass
			elif d['name'].startswith('__') or d['name'].endswith('__'):
				hadproblem = True
				ee(problem='File name is not supposed to start or end with “__”: {0}'.format(
					d['path']))
			else:
				lists[d['type']].add(d['name'])
			config_paths[d['type']][d['ext']][d['name']] = d['path']
			loaded_configs[d['type']][d['ext']][d['name']] = d['config']
		else:
			config_paths[d['type']][d['name']] = d['path']
			loaded_configs[d['type']][d['name']] = d['config']

	for typ in ('themes', 'colorschemes'):
		if not found_dir[typ]:
			hadproblem = True
			ee(problem='Subdirectory {0} was not found in paths {1}'.format(typ, ', '.join(search_paths)))

	diff = set(config_paths['colorschemes']) - set(config_paths['themes'])
	if diff:
		hadproblem = True
		for ext in diff:
			typ = 'colorschemes' if ext in config_paths['themes'] else 'themes'
			if not config_paths['top_' + typ] or typ == 'themes':
				ee(problem='{0} extension {1} not present in {2}'.format(
					ext,
					'configuration' if (
						ext in loaded_configs['themes'] and ext in loaded_configs['colorschemes']
					) else 'directory',
					typ,
				))

	try:
		main_config = load_config('config', find_config_files, config_loader)
	except IOError:
		main_config = {}
		ee(problem='Configuration file not found: config.json')
		hadproblem = True
	except MarkedError as e:
		main_config = {}
		ee(problem=str(e))
		hadproblem = True
	else:
		if used_main_spec.match(
			main_config,
			data={'configs': config_paths, 'lists': lists},
			context=Context(main_config),
			echoerr=ee
		)[1]:
			hadproblem = True

	import_paths = [os.path.expanduser(path) for path in main_config.get('common', {}).get('paths', [])]

	try:
		colors_config = load_config('colors', find_config_files, config_loader)
	except IOError:
		colors_config = {}
		ee(problem='Configuration file not found: colors.json')
		hadproblem = True
	except MarkedError as e:
		colors_config = {}
		ee(problem=str(e))
		hadproblem = True
	else:
		if colors_spec.match(colors_config, context=Context(colors_config), echoerr=ee)[1]:
			hadproblem = True

	if lhadproblem[0]:
		hadproblem = True

	top_colorscheme_configs = dict(loaded_configs['top_colorschemes'])
	data = {
		'ext': None,
		'top_colorscheme_configs': top_colorscheme_configs,
		'ext_colorscheme_configs': {},
		'colors_config': colors_config
	}
	for colorscheme, config in loaded_configs['top_colorschemes'].items():
		data['colorscheme'] = colorscheme
		if top_colorscheme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
			hadproblem = True

	ext_colorscheme_configs = dict2(loaded_configs['colorschemes'])
	for ext, econfigs in ext_colorscheme_configs.items():
		data = {
			'ext': ext,
			'top_colorscheme_configs': top_colorscheme_configs,
			'ext_colorscheme_configs': ext_colorscheme_configs,
			'colors_config': colors_config,
		}
		for colorscheme, config in econfigs.items():
			data['colorscheme'] = colorscheme
			if ext == 'vim':
				spec = vim_colorscheme_spec
			elif ext == 'shell':
				spec = shell_colorscheme_spec
			else:
				spec = colorscheme_spec
			if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
				hadproblem = True

	colorscheme_configs = {}
	for ext in lists['exts']:
		colorscheme_configs[ext] = {}
		for colorscheme in lists['colorschemes']:
			econfigs = ext_colorscheme_configs[ext]
			ecconfigs = econfigs.get(colorscheme)
			mconfigs = (
				top_colorscheme_configs.get(colorscheme),
				econfigs.get('__main__'),
				ecconfigs,
			)
			if not (mconfigs[0] or mconfigs[2]):
				continue
			config = None
			for mconfig in mconfigs:
				if not mconfig:
					continue
				if config:
					config = mergedicts_copy(config, mconfig)
				else:
					config = mconfig
			colorscheme_configs[ext][colorscheme] = config

	theme_configs = dict2(loaded_configs['themes'])
	top_theme_configs = dict(loaded_configs['top_themes'])
	for ext, configs in theme_configs.items():
		data = {
			'ext': ext,
			'colorscheme_configs': colorscheme_configs,
			'import_paths': import_paths,
			'main_config': main_config,
			'top_themes': top_theme_configs,
			'ext_theme_configs': configs,
			'colors_config': colors_config
		}
		for theme, config in configs.items():
			data['theme'] = theme
			if theme == '__main__':
				data['theme_type'] = 'main'
				spec = main_theme_spec
			else:
				data['theme_type'] = 'regular'
				spec = theme_spec
			if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
				hadproblem = True

	for top_theme, config in top_theme_configs.items():
		data = {
			'ext': None,
			'colorscheme_configs': colorscheme_configs,
			'import_paths': import_paths,
			'main_config': main_config,
			'theme_configs': theme_configs,
			'ext_theme_configs': None,
			'colors_config': colors_config
		}
		data['theme_type'] = 'top'
		data['theme'] = top_theme
		if top_theme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
			hadproblem = True

	return hadproblem

Example 49

Project: ochothon
Source File: cli.py
View license
def cli(args):

    tmp = tempfile.mkdtemp()
    try:

        class Shell(cmd.Cmd):

            def __init__(self, ip, token=None):
                cmd.Cmd.__init__(self)
                self.prompt = '%s > ' % ip
                self.ruler = '-'
                self.token = token

            def precmd(self, line):
                return 'shell %s' % line if line not in ['exit'] else line

            def emptyline(self):
                pass

            def do_exit(self, _):
                raise KeyboardInterrupt

            def do_shell(self, line):
                if line:
                    tokens = line.split(' ')

                    #
                    # - update from @stphung -> reformat the input line to handle indirect paths transparently
                    # - for instance ../foo.bar will become foo.bar with the actual file included in the multi-part post
                    #
                    files = {}
                    substituted = tokens[:1]
                    for token in tokens[1:]:
                        expanded = expanduser(token)
                        full = abspath(expanded)
                        tag = basename(full)
                        if isfile(expanded):

                            #
                            # - if the token maps to a local file upload it
                            # - this is for instance what happens when you do 'deploy foo.yml'
                            #
                            files[tag] = abspath(full)
                            substituted += [tag]

                        elif isdir(expanded):

                            #
                            # - if the token maps to a local directory TGZ & upload it
                            # - this is typically used to upload settings & script for our CD pipeline
                            # - the TGZ is stored in our temp. directory
                            #
                            path = join(tmp, '%s.tgz' % tag)
                            shell('tar zcf %s *' % path, cwd=full)
                            files['%s.tgz' % tag] = path
                            substituted += ['%s.tgz' % tag]

                        else:
                            substituted += [token]

                    #
                    # - compute the SHA1 signature if we have a token
                    # - prep the CURL statement and run it
                    # - we should always get a HTTP 200 back with some UTF-8 json payload
                    # - parse & print
                    #
                    line = ' '.join(substituted)
                    unrolled = ['-F %[email protected]%s' % (k, v) for k, v in files.items()]
                    digest = 'sha1=' + hmac.new(self.token, line, hashlib.sha1).hexdigest() if self.token else ''
                    snippet = 'curl -X POST -H "X-Shell:%s" -H "X-Signature:%s" %s %s:9000/shell' % (line, digest, ' '.join(unrolled), ip)
                    code, out = shell(snippet, cwd=tmp)
                    js = json.loads(out.decode('utf-8'))
                    print(js['out'] if code is 0 else 'i/o failure (is the proxy down ?)')

        #
        # - partition ip and args by looking for OCHOPOD_PROXY first
        # - if OCHOPOD_PROXY is not used, treat the first argument as the ip
        #
        ip = None
        if 'OCHOPOD_PROXY' in os.environ:
            ip = os.environ['OCHOPOD_PROXY']
        elif len(args):
            ip = args[0]
            args = args[1:] if len(args) > 1 else []

        #
        # - fail if left undefined
        #
        assert ip is not None, 'either set $OCHOPOD_PROXY or pass the proxy IP as an argument'

        #
        # - set the secret token if specified via the $OCHOPOD_TOKEN variable
        # - if not defined or set to an empty string the SHA1 signature will not be performed
        #
        token = os.environ['OCHOPOD_TOKEN'] if 'OCHOPOD_TOKEN' in os.environ else None

        #
        # - determine whether to run in interactive or non-interactive mode
        #
        if len(args):
            command = " ".join(args)
            Shell(ip, token).do_shell(command)
        else:
            print('welcome to the ocho CLI ! (CTRL-C or exit to get out)')
            if token is None:
                print 'warning, $OCHOPOD_TOKEN is undefined'
            Shell(ip, token).cmdloop()

    except KeyboardInterrupt:
        exit(0)

    except Exception as failure:
        print('internal failure <- %s' % str(failure))
        exit(1)

    finally:
        shutil.rmtree(tmp)

Example 50

Project: Pyfa
Source File: itemDiff.py
View license
def main(old, new, groups=True, effects=True, attributes=True, renames=True):
    # Open both databases and get their cursors
    old_db = sqlite3.connect(os.path.expanduser(old))
    old_cursor = old_db.cursor()
    new_db = sqlite3.connect(os.path.expanduser(new))
    new_cursor = new_db.cursor()

    # Force some of the items to make them published
    FORCEPUB_TYPES = ("Ibis", "Impairor", "Velator", "Reaper",
    "Amarr Tactical Destroyer Propulsion Mode",
    "Amarr Tactical Destroyer Sharpshooter Mode",
    "Amarr Tactical Destroyer Defense Mode")
    OVERRIDES_TYPEPUB = 'UPDATE invtypes SET published = 1 WHERE typeName = ?'
    for typename in FORCEPUB_TYPES:
        old_cursor.execute(OVERRIDES_TYPEPUB, (typename,))
        new_cursor.execute(OVERRIDES_TYPEPUB, (typename,))

    # Initialization of few things used by both changed/renamed effects list
    script_dir = os.path.dirname(__file__)
    effectspath = os.path.join(script_dir, "..", "eos", "effects")
    implemented = set()

    for filename in os.listdir(effectspath):
        basename, extension = filename.rsplit('.', 1)
        # Ignore non-py files and exclude implementation-specific 'effect'
        if extension == "py" and basename not in ("__init__",):
            implemented.add(basename)

    # Effects' names are used w/o any special symbols by eos
    stripspec = "[^A-Za-z0-9]"

    # Method to get data if effect is implemented in eos or not
    def geteffst(effectname):
        eosname = re.sub(stripspec, "", effectname).lower()
        if eosname in implemented:
            impstate = True
        else:
            impstate = False
        return impstate

    def findrenames(ren_dict, query, strip=False):

        old_namedata = {}
        new_namedata = {}

        for cursor, dictionary in ((old_cursor, old_namedata), (new_cursor, new_namedata)):
            cursor.execute(query)
            for row in cursor:
                id = row[0]
                name = row[1]
                if strip is True:
                    name = re.sub(stripspec, "", name)
                dictionary[id] = name

        for id in set(old_namedata.keys()).intersection(new_namedata.keys()):
            oldname = old_namedata[id]
            newname = new_namedata[id]
            if oldname != newname:
                ren_dict[id] = (oldname, newname)
        return

    def printrenames(ren_dict, title, implementedtag=False):
        if len(ren_dict) > 0:
            print('\nRenamed ' + title + ':')
            for id in sorted(ren_dict):
                couple = ren_dict[id]
                if implementedtag:
                    print("\n[{0}] \"{1}\"\n[{2}] \"{3}\"".format(geteffst(couple[0]), couple[0], geteffst(couple[1]), couple[1]))
                else:
                    print("    \"{0}\": \"{1}\",".format(couple[0].encode('utf-8'), couple[1].encode('utf-8')))

    groupcats = {}
    def getgroupcat(grp):
        """Get group category from the new db"""
        if grp in groupcats:
            cat = groupcats[grp]
        else:
            query = 'SELECT categoryID FROM invgroups WHERE groupID = ?'
            new_cursor.execute(query, (grp,))
            cat = 0
            for row in new_cursor:
                cat = row[0]
            groupcats[grp] = cat
        return cat

    itemnames = {}
    def getitemname(item):
        """Get item name from the new db"""
        if item in itemnames:
            name = itemnames[item]
        else:
            query = 'SELECT typeName FROM invtypes WHERE typeID = ?'
            new_cursor.execute(query, (item,))
            name = ""
            for row in new_cursor:
                name = row[0]
            if not name:
                old_cursor.execute(query, (item,))
                for row in old_cursor:
                    name = row[0]
            itemnames[item] = name
        return name

    groupnames = {}
    def getgroupname(grp):
        """Get group name from the new db"""
        if grp in groupnames:
            name = groupnames[grp]
        else:
            query = 'SELECT groupName FROM invgroups WHERE groupID = ?'
            new_cursor.execute(query, (grp,))
            name = ""
            for row in new_cursor:
                name = row[0]
            if not name:
                old_cursor.execute(query, (grp,))
                for row in old_cursor:
                    name = row[0]
            groupnames[grp] = name
        return name

    effectnames = {}
    def geteffectname(effect):
        """Get effect name from the new db"""
        if effect in effectnames:
            name = effectnames[effect]
        else:
            query = 'SELECT effectName FROM dgmeffects WHERE effectID = ?'
            new_cursor.execute(query, (effect,))
            name = ""
            for row in new_cursor:
                name = row[0]
            if not name:
                old_cursor.execute(query, (effect,))
                for row in old_cursor:
                    name = row[0]
            effectnames[effect] = name
        return name

    attrnames = {}
    def getattrname(attr):
        """Get attribute name from the new db"""
        if attr in attrnames:
            name = attrnames[attr]
        else:
            query = 'SELECT attributeName FROM dgmattribs WHERE attributeID = ?'
            new_cursor.execute(query, (attr,))
            name = ""
            for row in new_cursor:
                name = row[0]
            if not name:
                old_cursor.execute(query, (attr,))
                for row in old_cursor:
                    name = row[0]
            attrnames[attr] = name
        return name

    # State table
    S = {"unchanged": 0,
         "removed": 1,
         "changed": 2,
         "added": 3 }

    if effects or attributes or groups:
        # Format:
        # Key: item id
        # Value: [groupID, set(effects), {attribute id : value}]
        old_itmdata = {}
        new_itmdata = {}

        for cursor, dictionary in ((old_cursor, old_itmdata), (new_cursor, new_itmdata)):
            # Compose list of items we're interested in, filtered by category
            query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID INNER JOIN invcategories AS ic ON ig.categoryID = ic.categoryID WHERE it.published = 1 AND ic.categoryName IN ("Ship", "Module", "Charge", "Skill", "Drone", "Implant", "Subsystem")'
            cursor.execute(query)
            for row in cursor:
                itemid = row[0]
                groupID = row[1]
                # Initialize container for the data for each item with empty stuff besides groupID
                dictionary[itemid] = [groupID, set(), {}]
            # Add items filtered by group
            query = 'SELECT it.typeID, it.groupID FROM invtypes AS it INNER JOIN invgroups AS ig ON it.groupID = ig.groupID WHERE it.published = 1 AND ig.groupName IN ("Effect Beacon", "Ship Modifiers")'
            cursor.execute(query)
            for row in cursor:
                itemid = row[0]
                groupID = row[1]
                dictionary[itemid] = [groupID, set(), {}]

            if effects:
                # Pull all eff
                query = 'SELECT it.typeID, de.effectID FROM invtypes AS it INNER JOIN dgmtypeeffects AS dte ON dte.typeID = it.typeID INNER JOIN dgmeffects AS de ON de.effectID = dte.effectID WHERE it.published = 1'
                cursor.execute(query)
                for row in cursor:
                    itemid = row[0]
                    effectID = row[1]
                    # Process only items we need
                    if itemid in dictionary:
                        # Add effect to the set
                        effectSet = dictionary[itemid][1]
                        effectSet.add(effectID)

            if attributes:
                # Add base attributes to our data
                query = 'SELECT it.typeID, it.mass, it.capacity, it.volume FROM invtypes AS it'
                cursor.execute(query)
                for row in cursor:
                    itemid = row[0]
                    if itemid in dictionary:
                        attrdict = dictionary[itemid][2]
                        # Add base attributes: mass (4), capacity (38) and volume (161)
                        attrdict[4] = row[1]
                        attrdict[38] = row[2]
                        attrdict[161] = row[3]

                # Add attribute data for other attributes
                query = 'SELECT dta.typeID, dta.attributeID, dta.value FROM dgmtypeattribs AS dta'
                cursor.execute(query)
                for row in cursor:
                    itemid = row[0]
                    if itemid in dictionary:
                        attrid = row[1]
                        attrval = row[2]
                        attrdict = dictionary[itemid][2]
                        attrdict[attrid] = attrval

        # Get set of IDs from both dictionaries
        items_old = set(old_itmdata.keys())
        items_new = set(new_itmdata.keys())

        # Format:
        # Key: item state
        # Value: {item id: ((group state, old group, new group), {effect state: set(effects)}, {attribute state: {attributeID: (old value, new value)}})}
        global_itmdata = {}

        # Initialize it
        for state in S:
            global_itmdata[S[state]] = {}


        # Fill all the data for removed items
        for item in items_old.difference(items_new):
            # Set item state to removed
            state = S["removed"]
            # Set only old group for item
            oldgroup = old_itmdata[item][0]
            groupdata = (S["unchanged"], oldgroup, None)
            # Set old set of effects and mark all as unchanged
            effectsdata = {}
            effectsdata[S["unchanged"]] = set()
            if effects:
                oldeffects = old_itmdata[item][1]
                effectsdata[S["unchanged"]].update(oldeffects)
            # Set old set of attributes and mark all as unchanged
            attrdata = {}
            attrdata[S["unchanged"]] = {}
            if attributes:
                oldattrs = old_itmdata[item][2]
                for attr in oldattrs:
                    # NULL will mean there's no such attribute in db
                    attrdata[S["unchanged"]][attr] = (oldattrs[attr], "NULL")
            # Fill global dictionary with data we've got
            global_itmdata[state][item] = (groupdata, effectsdata, attrdata)


        # Now, for added items
        for item in items_new.difference(items_old):
            # Set item state to added
            state = S["added"]
            # Set only new group for item
            newgroup = new_itmdata[item][0]
            groupdata = (S["unchanged"], None, newgroup)
            # Set new set of effects and mark all as unchanged
            effectsdata = {}
            effectsdata[S["unchanged"]] = set()
            if effects:
                neweffects = new_itmdata[item][1]
                effectsdata[S["unchanged"]].update(neweffects)
            # Set new set of attributes and mark all as unchanged
            attrdata = {}
            attrdata[S["unchanged"]] = {}
            if attributes:
                newattrs = new_itmdata[item][2]
                for attr in newattrs:
                    # NULL will mean there's no such attribute in db
                    attrdata[S["unchanged"]][attr] = ("NULL", newattrs[attr])
            # Fill global dictionary with data we've got
            global_itmdata[state][item] = (groupdata, effectsdata, attrdata)

        # Now, check all the items which exist in both databases
        for item in items_old.intersection(items_new):
            # Set group data for an item
            oldgroup = old_itmdata[item][0]
            newgroup = new_itmdata[item][0]
            # If we're not asked to compare groups, mark them as unchanged anyway
            groupdata = (S["changed"] if oldgroup != newgroup and groups else S["unchanged"], oldgroup, newgroup)
            # Fill effects data into appropriate groups
            effectsdata = {}
            for state in S:
                # We do not have changed effects whatsoever
                if state != "changed":
                    effectsdata[S[state]] = set()
            if effects:
                oldeffects = old_itmdata[item][1]
                neweffects = new_itmdata[item][1]
                effectsdata[S["unchanged"]].update(oldeffects.intersection(neweffects))
                effectsdata[S["removed"]].update(oldeffects.difference(neweffects))
                effectsdata[S["added"]].update(neweffects.difference(oldeffects))
            # Go through all attributes, filling global data dictionary
            attrdata = {}
            for state in S:
                attrdata[S[state]] = {}
            if attributes:
                oldattrs = old_itmdata[item][2]
                newattrs = new_itmdata[item][2]
                for attr in set(oldattrs.keys()).union(newattrs.keys()):
                    # NULL will mean there's no such attribute in db
                    oldattr = oldattrs.get(attr, "NULL")
                    newattr = newattrs.get(attr, "NULL")
                    attrstate = S["unchanged"]
                    if oldattr == "NULL" and newattr != "NULL":
                        attrstate = S["added"]
                    elif oldattr != "NULL" and newattr == "NULL":
                        attrstate = S["removed"]
                    elif oldattr != newattr:
                        attrstate = S["changed"]
                    attrdata[attrstate][attr] = (oldattr, newattr)
            # Consider item as unchanged by default and set it to change when we see any changes in sub-items
            state = S["unchanged"]
            if state == S["unchanged"] and groupdata[0] != S["unchanged"]:
                state = S["changed"]
            if state == S["unchanged"] and (len(effectsdata[S["removed"]]) > 0 or len(effectsdata[S["added"]]) > 0):
                state = S["changed"]
            if state == S["unchanged"] and (len(attrdata[S["removed"]]) > 0 or len(attrdata[S["changed"]]) > 0 or len(attrdata[S["added"]]) > 0):
                state = S["changed"]
            # Fill global dictionary with data we've got
            global_itmdata[state][item] = (groupdata, effectsdata, attrdata)

    # As eos uses names as unique IDs in lot of places, we have to keep track of name changes
    if renames:
        ren_effects = {}
        query = 'SELECT effectID, effectName FROM dgmeffects'
        findrenames(ren_effects, query, strip = True)

        ren_attributes = {}
        query = 'SELECT attributeID, attributeName FROM dgmattribs'
        findrenames(ren_attributes, query)

        ren_categories = {}
        query = 'SELECT categoryID, categoryName FROM invcategories'
        findrenames(ren_categories, query)

        ren_groups = {}
        query = 'SELECT groupID, groupName FROM invgroups'
        findrenames(ren_groups, query)

        ren_marketgroups = {}
        query = 'SELECT marketGroupID, marketGroupName FROM invmarketgroups'
        findrenames(ren_marketgroups, query)

        ren_items = {}
        query = 'SELECT typeID, typeName FROM invtypes'
        findrenames(ren_items, query)

    try:
        # Get db metadata
        old_meta = {}
        new_meta = {}
        query = 'SELECT field_name, field_value FROM metadata WHERE field_name LIKE "client_build"'
        old_cursor.execute(query)
        for row in old_cursor:
            old_meta[row[0]] = row[1]
        new_cursor.execute(query)
        for row in new_cursor:
            new_meta[row[0]] = row[1]
    except:
        pass
    # Print jobs
    print("Comparing databases:\n{0} -> {1}\n".format(old_meta.get("client_build"), new_meta.get("client_build")))

    if renames:
        title = 'effects'
        printrenames(ren_effects, title, implementedtag=True)

        title = 'attributes'
        printrenames(ren_attributes, title)

        title = 'categories'
        printrenames(ren_categories, title)

        title = 'groups'
        printrenames(ren_groups, title)

        title = 'market groups'
        printrenames(ren_marketgroups, title)

        title = 'items'
        printrenames(ren_items, title)

    if effects or attributes or groups:
        # Print legend only when there're any interesting changes
        if len(global_itmdata[S["removed"]]) > 0 or len(global_itmdata[S["changed"]]) > 0 or len(global_itmdata[S["added"]]) > 0:
            genleg = "[+] - new item\n[-] - removed item\n[*] - changed item\n"
            grpleg = "(x => y) - group changes\n" if groups else ""
            attreffleg = "  [+] - effect or attribute has been added to item\n  [-] - effect or attribute has been removed from item\n" if attributes or effects else ""
            effleg = "  [y] - effect is implemented\n  [n] - effect is not implemented\n" if effects else ""
            print("{0}{1}{2}{3}\nItems:".format(genleg, grpleg, attreffleg, effleg))

            # Make sure our states are sorted
            stateorder = sorted(global_itmdata)

            TG = {S["unchanged"]: "+", S["changed"]: "*",
                  S["removed"]: "-",
                  S["added"]: "+"}

            # Cycle through states
            for itmstate in stateorder:
                # Skip unchanged items
                if itmstate == S["unchanged"]:
                    continue
                items = global_itmdata[itmstate]
                # Sort by name first
                itemorder = sorted(items, key=lambda item: getitemname(item))
                # Then by group id
                itemorder = sorted(itemorder, key=lambda item: items[item][0][2] or items[item][0][1])
                # Then by category id
                itemorder = sorted(itemorder, key=lambda item: getgroupcat(items[item][0][2] or items[item][0][1]))

                for item in itemorder:
                    groupdata = items[item][0]
                    groupstr = " ({0} => {1})".format(getgroupname(groupdata[1]), getgroupname(groupdata[2])) if groupdata[0] == S["changed"] else ""
                    print("\n[{0}] {1}{2}".format(TG[itmstate], getitemname(item).encode('utf-8'), groupstr))

                    effdata = items[item][1]
                    for effstate in stateorder:
                        # Skip unchanged effect sets, but always include them for added or removed ships
                        # Also, always skip empty data
                        if (effstate == S["unchanged"] and itmstate not in (S["removed"], S["added"])) or effstate not in effdata:
                            continue
                        effects = effdata[effstate]
                        efforder = sorted(effects, key=lambda eff: geteffectname(eff))
                        for eff in efforder:
                            # Take tag from item if item was added or removed
                            tag = TG[effstate] if itmstate not in (S["removed"], S["added"]) else TG[itmstate]
                            print("  [{0}|{1}] {2}".format(tag, "y" if geteffst(geteffectname(eff)) else "n", geteffectname(eff)))

                    attrdata = items[item][2]
                    for attrstate in stateorder:
                        # Skip unchanged and empty attribute sets, also skip attributes display for added and removed items
                        if (attrstate == S["unchanged"] and itmstate != S["added"]) or itmstate in (S["removed"], ) or attrstate not in attrdata:
                            continue
                        attrs = attrdata[attrstate]
                        attrorder = sorted(attrs, key=lambda attr: getattrname(attr))
                        for attr in attrorder:
                            valline = ""
                            if attrs[attr][0] == "NULL" or itmstate == S["added"]:
                                valline = "{0}".format(attrs[attr][1] or 0)
                            elif attrs[attr][1] == "NULL":
                                valline = "{0}".format(attrs[attr][0] or 0)
                            else:
                                valline = "{0} => {1}".format(attrs[attr][0] or 0, attrs[attr][1] or 0)
                            print("  [{0}] {1}: {2}".format(TG[attrstate], getattrname(attr), valline))